diff options
Diffstat (limited to 'drivers/firmware')
98 files changed, 8713 insertions, 4330 deletions
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index 75cb91055c17..b59e3041fd62 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -40,6 +40,7 @@ config ARM_SCPI_POWER_DOMAIN config ARM_SDE_INTERFACE bool "ARM Software Delegated Exception Interface (SDEI)" depends on ARM64 + depends on ACPI_APEI_GHES help The Software Delegated Exception Interface (SDEI) is an ARM standard for registering callbacks from the platform firmware @@ -202,6 +203,15 @@ config INTEL_STRATIX10_RSU Say Y here if you want Intel RSU support. +config MTK_ADSP_IPC + tristate "MTK ADSP IPC Protocol driver" + depends on MTK_ADSP_MBOX + help + Say yes here to add support for the MediaTek ADSP IPC + between host AP (Linux) and the firmware running on ADSP. + ADSP exists on some mtk processors. + Client might use shared memory to exchange information with ADSP. + config QCOM_SCM tristate @@ -218,12 +228,12 @@ config QCOM_SCM_DOWNLOAD_MODE_DEFAULT config SYSFB bool - default y - depends on X86 || EFI + select BOOT_VESA_SUPPORT config SYSFB_SIMPLEFB bool "Mark VGA/VBE/EFI FB as generic system framebuffer" - depends on SYSFB + depends on X86 || EFI + select SYSFB help Firmwares often provide initial graphics framebuffers so the BIOS, bootloader or kernel can show basic video-output during boot for diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile index 4e58cb474a68..28fcddcd688f 100644 --- a/drivers/firmware/Makefile +++ b/drivers/firmware/Makefile @@ -15,6 +15,7 @@ obj-$(CONFIG_INTEL_STRATIX10_RSU) += stratix10-rsu.o obj-$(CONFIG_ISCSI_IBFT_FIND) += iscsi_ibft_find.o obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o +obj-$(CONFIG_MTK_ADSP_IPC) += mtk-adsp-ipc.o obj-$(CONFIG_RASPBERRYPI_FIRMWARE) += raspberrypi.o obj-$(CONFIG_FW_CFG_SYSFS) += qemu_fw_cfg.o obj-$(CONFIG_QCOM_SCM) += qcom-scm.o @@ -31,8 +32,7 @@ obj-y += broadcom/ obj-y += cirrus/ obj-y += meson/ obj-$(CONFIG_GOOGLE_FIRMWARE) += google/ -obj-$(CONFIG_EFI) += efi/ -obj-$(CONFIG_UEFI_CPER) += efi/ +obj-y += efi/ obj-y += imx/ obj-y += psci/ obj-y += smccc/ diff --git a/drivers/firmware/arm_ffa/bus.c b/drivers/firmware/arm_ffa/bus.c index 641a91819088..99d439480612 100644 --- a/drivers/firmware/arm_ffa/bus.c +++ b/drivers/firmware/arm_ffa/bus.c @@ -167,7 +167,8 @@ bool ffa_device_is_valid(struct ffa_device *ffa_dev) return valid; } -struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id) +struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id, + const struct ffa_ops *ops) { int ret; struct device *dev; @@ -183,6 +184,7 @@ struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id) dev_set_name(&ffa_dev->dev, "arm-ffa-%04x", vm_id); ffa_dev->vm_id = vm_id; + ffa_dev->ops = ops; uuid_copy(&ffa_dev->uuid, uuid); ret = device_register(&ffa_dev->dev); diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c index 14f900047ac0..d5e86ef40b89 100644 --- a/drivers/firmware/arm_ffa/driver.c +++ b/drivers/firmware/arm_ffa/driver.c @@ -163,6 +163,7 @@ struct ffa_drv_info { struct mutex tx_lock; /* lock to protect Tx buffer */ void *rx_buffer; void *tx_buffer; + bool mem_ops_native; }; static struct ffa_drv_info *drv_info; @@ -263,18 +264,24 @@ static int ffa_rxtx_unmap(u16 vm_id) return 0; } +#define PARTITION_INFO_GET_RETURN_COUNT_ONLY BIT(0) + /* buffer must be sizeof(struct ffa_partition_info) * num_partitions */ static int __ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3, struct ffa_partition_info *buffer, int num_partitions) { - int count; + int idx, count, flags = 0, sz, buf_sz; ffa_value_t partition_info; + if (!buffer || !num_partitions) /* Just get the count for now */ + flags = PARTITION_INFO_GET_RETURN_COUNT_ONLY; + mutex_lock(&drv_info->rx_lock); invoke_ffa_fn((ffa_value_t){ .a0 = FFA_PARTITION_INFO_GET, .a1 = uuid0, .a2 = uuid1, .a3 = uuid2, .a4 = uuid3, + .a5 = flags, }, &partition_info); if (partition_info.a0 == FFA_ERROR) { @@ -284,8 +291,19 @@ __ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3, count = partition_info.a2; + if (drv_info->version > FFA_VERSION_1_0) { + buf_sz = sz = partition_info.a3; + if (sz > sizeof(*buffer)) + buf_sz = sizeof(*buffer); + } else { + /* FFA_VERSION_1_0 lacks size in the response */ + buf_sz = sz = 8; + } + if (buffer && count <= num_partitions) - memcpy(buffer, drv_info->rx_buffer, sizeof(*buffer) * count); + for (idx = 0; idx < count; idx++) + memcpy(buffer + idx, drv_info->rx_buffer + idx * sz, + buf_sz); ffa_rx_release(); @@ -398,11 +416,15 @@ static int ffa_mem_first_frag(u32 func_id, phys_addr_t buf, u32 buf_sz, if (ret.a0 == FFA_ERROR) return ffa_to_linux_errno((int)ret.a2); - if (ret.a0 != FFA_SUCCESS) + if (ret.a0 == FFA_SUCCESS) { + if (handle) + *handle = PACK_HANDLE(ret.a2, ret.a3); + } else if (ret.a0 == FFA_MEM_FRAG_RX) { + if (handle) + *handle = PACK_HANDLE(ret.a1, ret.a2); + } else { return -EOPNOTSUPP; - - if (handle) - *handle = PACK_HANDLE(ret.a2, ret.a3); + } return frag_len; } @@ -426,10 +448,12 @@ static int ffa_mem_next_frag(u64 handle, u32 frag_len) if (ret.a0 == FFA_ERROR) return ffa_to_linux_errno((int)ret.a2); - if (ret.a0 != FFA_MEM_FRAG_RX) - return -EOPNOTSUPP; + if (ret.a0 == FFA_MEM_FRAG_RX) + return ret.a3; + else if (ret.a0 == FFA_SUCCESS) + return 0; - return ret.a3; + return -EOPNOTSUPP; } static int @@ -565,6 +589,39 @@ static int ffa_memory_reclaim(u64 g_handle, u32 flags) return 0; } +static int ffa_features(u32 func_feat_id, u32 input_props, + u32 *if_props_1, u32 *if_props_2) +{ + ffa_value_t id; + + if (!ARM_SMCCC_IS_FAST_CALL(func_feat_id) && input_props) { + pr_err("%s: Invalid Parameters: %x, %x", __func__, + func_feat_id, input_props); + return ffa_to_linux_errno(FFA_RET_INVALID_PARAMETERS); + } + + invoke_ffa_fn((ffa_value_t){ + .a0 = FFA_FEATURES, .a1 = func_feat_id, .a2 = input_props, + }, &id); + + if (id.a0 == FFA_ERROR) + return ffa_to_linux_errno((int)id.a2); + + if (if_props_1) + *if_props_1 = id.a2; + if (if_props_2) + *if_props_2 = id.a3; + + return 0; +} + +static void ffa_set_up_mem_ops_native_flag(void) +{ + if (!ffa_features(FFA_FN_NATIVE(MEM_LEND), 0, NULL, NULL) || + !ffa_features(FFA_FN_NATIVE(MEM_SHARE), 0, NULL, NULL)) + drv_info->mem_ops_native = true; +} + static u32 ffa_api_version_get(void) { return drv_info->version; @@ -582,7 +639,7 @@ static int ffa_partition_info_get(const char *uuid_str, return -ENODEV; } - count = ffa_partition_probe(&uuid_null, &pbuf); + count = ffa_partition_probe(&uuid, &pbuf); if (count <= 0) return -ENOENT; @@ -591,11 +648,19 @@ static int ffa_partition_info_get(const char *uuid_str, return 0; } -static void ffa_mode_32bit_set(struct ffa_device *dev) +static void _ffa_mode_32bit_set(struct ffa_device *dev) { dev->mode_32bit = true; } +static void ffa_mode_32bit_set(struct ffa_device *dev) +{ + if (drv_info->version > FFA_VERSION_1_0) + return; + + _ffa_mode_32bit_set(dev); +} + static int ffa_sync_send_receive(struct ffa_device *dev, struct ffa_send_direct_data *data) { @@ -603,17 +668,15 @@ static int ffa_sync_send_receive(struct ffa_device *dev, dev->mode_32bit, data); } -static int -ffa_memory_share(struct ffa_device *dev, struct ffa_mem_ops_args *args) +static int ffa_memory_share(struct ffa_mem_ops_args *args) { - if (dev->mode_32bit) - return ffa_memory_ops(FFA_MEM_SHARE, args); + if (drv_info->mem_ops_native) + return ffa_memory_ops(FFA_FN_NATIVE(MEM_SHARE), args); - return ffa_memory_ops(FFA_FN_NATIVE(MEM_SHARE), args); + return ffa_memory_ops(FFA_MEM_SHARE, args); } -static int -ffa_memory_lend(struct ffa_device *dev, struct ffa_mem_ops_args *args) +static int ffa_memory_lend(struct ffa_mem_ops_args *args) { /* Note that upon a successful MEM_LEND request the caller * must ensure that the memory region specified is not accessed @@ -622,36 +685,47 @@ ffa_memory_lend(struct ffa_device *dev, struct ffa_mem_ops_args *args) * however on systems without a hypervisor the responsibility * falls to the calling kernel driver to prevent access. */ - if (dev->mode_32bit) - return ffa_memory_ops(FFA_MEM_LEND, args); + if (drv_info->mem_ops_native) + return ffa_memory_ops(FFA_FN_NATIVE(MEM_LEND), args); - return ffa_memory_ops(FFA_FN_NATIVE(MEM_LEND), args); + return ffa_memory_ops(FFA_MEM_LEND, args); } -static const struct ffa_dev_ops ffa_ops = { +static const struct ffa_info_ops ffa_drv_info_ops = { .api_version_get = ffa_api_version_get, .partition_info_get = ffa_partition_info_get, +}; + +static const struct ffa_msg_ops ffa_drv_msg_ops = { .mode_32bit_set = ffa_mode_32bit_set, .sync_send_receive = ffa_sync_send_receive, +}; + +static const struct ffa_mem_ops ffa_drv_mem_ops = { .memory_reclaim = ffa_memory_reclaim, .memory_share = ffa_memory_share, .memory_lend = ffa_memory_lend, }; -const struct ffa_dev_ops *ffa_dev_ops_get(struct ffa_device *dev) -{ - if (ffa_device_is_valid(dev)) - return &ffa_ops; - - return NULL; -} -EXPORT_SYMBOL_GPL(ffa_dev_ops_get); +static const struct ffa_ops ffa_drv_ops = { + .info_ops = &ffa_drv_info_ops, + .msg_ops = &ffa_drv_msg_ops, + .mem_ops = &ffa_drv_mem_ops, +}; void ffa_device_match_uuid(struct ffa_device *ffa_dev, const uuid_t *uuid) { int count, idx; struct ffa_partition_info *pbuf, *tpbuf; + /* + * FF-A v1.1 provides UUID for each partition as part of the discovery + * API, the discovered UUID must be populated in the device's UUID and + * there is no need to copy the same from the driver table. + */ + if (drv_info->version > FFA_VERSION_1_0) + return; + count = ffa_partition_probe(uuid, &pbuf); if (count <= 0) return; @@ -665,6 +739,7 @@ void ffa_device_match_uuid(struct ffa_device *ffa_dev, const uuid_t *uuid) static void ffa_setup_partitions(void) { int count, idx; + uuid_t uuid; struct ffa_device *ffa_dev; struct ffa_partition_info *pbuf, *tpbuf; @@ -675,21 +750,24 @@ static void ffa_setup_partitions(void) } for (idx = 0, tpbuf = pbuf; idx < count; idx++, tpbuf++) { - /* Note that the &uuid_null parameter will require + import_uuid(&uuid, (u8 *)tpbuf->uuid); + + /* Note that if the UUID will be uuid_null, that will require * ffa_device_match() to find the UUID of this partition id - * with help of ffa_device_match_uuid(). Once the FF-A spec - * is updated to provide correct UUID here for each partition - * as part of the discovery API, we need to pass the - * discovered UUID here instead. + * with help of ffa_device_match_uuid(). FF-A v1.1 and above + * provides UUID here for each partition as part of the + * discovery API and the same is passed. */ - ffa_dev = ffa_device_register(&uuid_null, tpbuf->id); + ffa_dev = ffa_device_register(&uuid, tpbuf->id, &ffa_drv_ops); if (!ffa_dev) { pr_err("%s: failed to register partition ID 0x%x\n", __func__, tpbuf->id); continue; } - ffa_dev_set_drvdata(ffa_dev, drv_info); + if (drv_info->version > FFA_VERSION_1_0 && + !(tpbuf->properties & FFA_PARTITION_AARCH64_EXEC)) + _ffa_mode_32bit_set(ffa_dev); } kfree(pbuf); } @@ -747,6 +825,8 @@ static int __init ffa_init(void) ffa_setup_partitions(); + ffa_set_up_mem_ops_native_flag(); + return 0; free_pages: if (drv_info->tx_buffer) diff --git a/drivers/firmware/arm_scmi/Kconfig b/drivers/firmware/arm_scmi/Kconfig index 3d7081e84853..a14f65444b35 100644 --- a/drivers/firmware/arm_scmi/Kconfig +++ b/drivers/firmware/arm_scmi/Kconfig @@ -54,6 +54,19 @@ config ARM_SCMI_TRANSPORT_MAILBOX If you want the ARM SCMI PROTOCOL stack to include support for a transport based on mailboxes, answer Y. +config ARM_SCMI_TRANSPORT_OPTEE + bool "SCMI transport based on OP-TEE service" + depends on OPTEE=y || OPTEE=ARM_SCMI_PROTOCOL + select ARM_SCMI_HAVE_TRANSPORT + select ARM_SCMI_HAVE_SHMEM + select ARM_SCMI_HAVE_MSG + default y + help + This enables the OP-TEE service based transport for SCMI. + + If you want the ARM SCMI PROTOCOL stack to include support for a + transport based on OP-TEE SCMI service, answer Y. + config ARM_SCMI_TRANSPORT_SMC bool "SCMI transport based on SMC" depends on HAVE_ARM_SMCCC_DISCOVERY @@ -66,6 +79,20 @@ config ARM_SCMI_TRANSPORT_SMC If you want the ARM SCMI PROTOCOL stack to include support for a transport based on SMC, answer Y. +config ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE + bool "Enable atomic mode support for SCMI SMC transport" + depends on ARM_SCMI_TRANSPORT_SMC + help + Enable support of atomic operation for SCMI SMC based transport. + + If you want the SCMI SMC based transport to operate in atomic + mode, avoiding any kind of sleeping behaviour for selected + transactions on the TX path, answer Y. + Enabling atomic mode operations allows any SCMI driver using this + transport to optionally ask for atomic SCMI transactions and operate + in atomic context too, at the price of using a number of busy-waiting + primitives all over instead. If unsure say N. + config ARM_SCMI_TRANSPORT_VIRTIO bool "SCMI transport based on VirtIO" depends on VIRTIO=y || VIRTIO=ARM_SCMI_PROTOCOL @@ -77,6 +104,36 @@ config ARM_SCMI_TRANSPORT_VIRTIO If you want the ARM SCMI PROTOCOL stack to include support for a transport based on VirtIO, answer Y. +config ARM_SCMI_TRANSPORT_VIRTIO_VERSION1_COMPLIANCE + bool "SCMI VirtIO transport Version 1 compliance" + depends on ARM_SCMI_TRANSPORT_VIRTIO + default y + help + This enforces strict compliance with VirtIO Version 1 specification. + + If you want the ARM SCMI VirtIO transport layer to refuse to work + with Legacy VirtIO backends and instead support only VirtIO Version 1 + devices (or above), answer Y. + + If you want instead to support also old Legacy VirtIO backends (like + the ones implemented by kvmtool) and let the core Kernel VirtIO layer + take care of the needed conversions, say N. + +config ARM_SCMI_TRANSPORT_VIRTIO_ATOMIC_ENABLE + bool "Enable atomic mode for SCMI VirtIO transport" + depends on ARM_SCMI_TRANSPORT_VIRTIO + help + Enable support of atomic operation for SCMI VirtIO based transport. + + If you want the SCMI VirtIO based transport to operate in atomic + mode, avoiding any kind of sleeping behaviour for selected + transactions on the TX path, answer Y. + + Enabling atomic mode operations allows any SCMI driver using this + transport to optionally ask for atomic SCMI transactions and operate + in atomic context too, at the price of using a number of busy-waiting + primitives all over instead. If unsure say N. + endif #ARM_SCMI_PROTOCOL config ARM_SCMI_POWER_DOMAIN @@ -92,4 +149,16 @@ config ARM_SCMI_POWER_DOMAIN will be called scmi_pm_domain. Note this may needed early in boot before rootfs may be available. +config ARM_SCMI_POWER_CONTROL + tristate "SCMI system power control driver" + depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF) + help + This enables System Power control logic which binds system shutdown or + reboot actions to SCMI System Power notifications generated by SCP + firmware. + + This driver can also be built as a module. If so, the module will be + called scmi_power_control. Note this may needed early in boot to catch + early shutdown/reboot SCMI requests. + endmenu diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile index 1dcf123d64ab..9ea86f8cc8f7 100644 --- a/drivers/firmware/arm_scmi/Makefile +++ b/drivers/firmware/arm_scmi/Makefile @@ -6,8 +6,17 @@ scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_MAILBOX) += mailbox.o scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_SMC) += smc.o scmi-transport-$(CONFIG_ARM_SCMI_HAVE_MSG) += msg.o scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO) += virtio.o -scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o system.o voltage.o +scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_OPTEE) += optee.o +scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o system.o voltage.o powercap.o scmi-module-objs := $(scmi-bus-y) $(scmi-driver-y) $(scmi-protocols-y) \ $(scmi-transport-y) obj-$(CONFIG_ARM_SCMI_PROTOCOL) += scmi-module.o obj-$(CONFIG_ARM_SCMI_POWER_DOMAIN) += scmi_pm_domain.o +obj-$(CONFIG_ARM_SCMI_POWER_CONTROL) += scmi_power_control.o + +ifeq ($(CONFIG_THUMB2_KERNEL)$(CONFIG_CC_IS_CLANG),yy) +# The use of R7 in the SMCCC conflicts with the compiler's use of R7 as a frame +# pointer in Thumb2 mode, which is forcibly enabled by Clang when profiling +# hooks are inserted via the -pg switch. +CFLAGS_REMOVE_smc.o += $(CC_FLAGS_FTRACE) +endif diff --git a/drivers/firmware/arm_scmi/base.c b/drivers/firmware/arm_scmi/base.c index f5219334fd3a..a52f084a6a87 100644 --- a/drivers/firmware/arm_scmi/base.c +++ b/drivers/firmware/arm_scmi/base.c @@ -36,7 +36,7 @@ struct scmi_msg_resp_base_attributes { struct scmi_msg_resp_base_discover_agent { __le32 agent_id; - u8 name[SCMI_MAX_STR_SIZE]; + u8 name[SCMI_SHORT_NAME_MAX_SIZE]; }; @@ -119,7 +119,7 @@ scmi_base_vendor_id_get(const struct scmi_protocol_handle *ph, bool sub_vendor) ret = ph->xops->do_xfer(ph, t); if (!ret) - memcpy(vendor_id, t->rx.buf, size); + strscpy(vendor_id, t->rx.buf, size); ph->xops->xfer_put(ph, t); @@ -178,6 +178,7 @@ scmi_base_implementation_list_get(const struct scmi_protocol_handle *ph, __le32 *num_skip, *num_ret; u32 tot_num_ret = 0, loop_num_ret; struct device *dev = ph->dev; + struct scmi_revision_info *rev = ph->get_priv(ph); ret = ph->xops->xfer_get_init(ph, BASE_DISCOVER_LIST_PROTOCOLS, sizeof(*num_skip), 0, &t); @@ -189,6 +190,9 @@ scmi_base_implementation_list_get(const struct scmi_protocol_handle *ph, list = t->rx.buf + sizeof(*num_ret); do { + size_t real_list_sz; + u32 calc_list_sz; + /* Set the number of protocols to be skipped/already read */ *num_skip = cpu_to_le32(tot_num_ret); @@ -197,9 +201,37 @@ scmi_base_implementation_list_get(const struct scmi_protocol_handle *ph, break; loop_num_ret = le32_to_cpu(*num_ret); - if (tot_num_ret + loop_num_ret > MAX_PROTOCOLS_IMP) { - dev_err(dev, "No. of Protocol > MAX_PROTOCOLS_IMP"); + if (!loop_num_ret) break; + + if (loop_num_ret > rev->num_protocols - tot_num_ret) { + dev_err(dev, + "No. Returned protocols > Total protocols.\n"); + break; + } + + if (t->rx.len < (sizeof(u32) * 2)) { + dev_err(dev, "Truncated reply - rx.len:%zd\n", + t->rx.len); + ret = -EPROTO; + break; + } + + real_list_sz = t->rx.len - sizeof(u32); + calc_list_sz = (1 + (loop_num_ret - 1) / sizeof(u32)) * + sizeof(u32); + if (calc_list_sz != real_list_sz) { + dev_warn(dev, + "Malformed reply - real_sz:%zd calc_sz:%u (loop_num_ret:%d)\n", + real_list_sz, calc_list_sz, loop_num_ret); + /* + * Bail out if the expected list size is bigger than the + * total payload size of the received reply. + */ + if (calc_list_sz > real_list_sz) { + ret = -EPROTO; + break; + } } for (loop = 0; loop < loop_num_ret; loop++) @@ -208,7 +240,7 @@ scmi_base_implementation_list_get(const struct scmi_protocol_handle *ph, tot_num_ret += loop_num_ret; ph->xops->reset_rx_to_maxsz(ph, t); - } while (loop_num_ret); + } while (tot_num_ret < rev->num_protocols); ph->xops->xfer_put(ph, t); @@ -244,7 +276,7 @@ static int scmi_base_discover_agent_get(const struct scmi_protocol_handle *ph, ret = ph->xops->do_xfer(ph, t); if (!ret) { agent_info = t->rx.buf; - strlcpy(name, agent_info->name, SCMI_MAX_STR_SIZE); + strscpy(name, agent_info->name, SCMI_SHORT_NAME_MAX_SIZE); } ph->xops->xfer_put(ph, t); @@ -343,7 +375,7 @@ static int scmi_base_protocol_init(const struct scmi_protocol_handle *ph) int id, ret; u8 *prot_imp; u32 version; - char name[SCMI_MAX_STR_SIZE]; + char name[SCMI_SHORT_NAME_MAX_SIZE]; struct device *dev = ph->dev; struct scmi_revision_info *rev = scmi_revision_area_get(ph); @@ -351,15 +383,19 @@ static int scmi_base_protocol_init(const struct scmi_protocol_handle *ph) if (ret) return ret; - prot_imp = devm_kcalloc(dev, MAX_PROTOCOLS_IMP, sizeof(u8), GFP_KERNEL); - if (!prot_imp) - return -ENOMEM; - rev->major_ver = PROTOCOL_REV_MAJOR(version), rev->minor_ver = PROTOCOL_REV_MINOR(version); ph->set_priv(ph, rev); - scmi_base_attributes_get(ph); + ret = scmi_base_attributes_get(ph); + if (ret) + return ret; + + prot_imp = devm_kcalloc(dev, rev->num_protocols, sizeof(u8), + GFP_KERNEL); + if (!prot_imp) + return -ENOMEM; + scmi_base_vendor_id_get(ph, false); scmi_base_vendor_id_get(ph, true); scmi_base_implementation_version_get(ph); diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c index f6fe723ab869..35bb70724d44 100644 --- a/drivers/firmware/arm_scmi/bus.c +++ b/drivers/firmware/arm_scmi/bus.c @@ -181,7 +181,7 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol, return NULL; } - id = ida_simple_get(&scmi_bus_id, 1, 0, GFP_KERNEL); + id = ida_alloc_min(&scmi_bus_id, 1, GFP_KERNEL); if (id < 0) { kfree_const(scmi_dev->name); kfree(scmi_dev); @@ -204,7 +204,7 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol, put_dev: kfree_const(scmi_dev->name); put_device(&scmi_dev->dev); - ida_simple_remove(&scmi_bus_id, id); + ida_free(&scmi_bus_id, id); return NULL; } @@ -212,13 +212,24 @@ void scmi_device_destroy(struct scmi_device *scmi_dev) { kfree_const(scmi_dev->name); scmi_handle_put(scmi_dev->handle); - ida_simple_remove(&scmi_bus_id, scmi_dev->id); + ida_free(&scmi_bus_id, scmi_dev->id); device_unregister(&scmi_dev->dev); } +void scmi_device_link_add(struct device *consumer, struct device *supplier) +{ + struct device_link *link; + + link = device_link_add(consumer, supplier, DL_FLAG_AUTOREMOVE_CONSUMER); + + WARN_ON(!link); +} + void scmi_set_handle(struct scmi_device *scmi_dev) { scmi_dev->handle = scmi_handle_get(&scmi_dev->dev); + if (scmi_dev->handle) + scmi_device_link_add(&scmi_dev->dev, scmi_dev->handle->dev); } int scmi_protocol_register(const struct scmi_protocol *proto) diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c index 35b56c8ba0c0..96060bf90a24 100644 --- a/drivers/firmware/arm_scmi/clock.c +++ b/drivers/firmware/arm_scmi/clock.c @@ -2,13 +2,15 @@ /* * System Control and Management Interface (SCMI) Clock Protocol * - * Copyright (C) 2018-2021 ARM Ltd. + * Copyright (C) 2018-2022 ARM Ltd. */ #include <linux/module.h> +#include <linux/limits.h> #include <linux/sort.h> -#include "common.h" +#include "protocols.h" +#include "notify.h" enum scmi_clock_protocol_cmd { CLOCK_ATTRIBUTES = 0x3, @@ -16,6 +18,9 @@ enum scmi_clock_protocol_cmd { CLOCK_RATE_SET = 0x5, CLOCK_RATE_GET = 0x6, CLOCK_CONFIG_SET = 0x7, + CLOCK_NAME_GET = 0x8, + CLOCK_RATE_NOTIFY = 0x9, + CLOCK_RATE_CHANGE_REQUESTED_NOTIFY = 0xA, }; struct scmi_msg_resp_clock_protocol_attributes { @@ -27,7 +32,11 @@ struct scmi_msg_resp_clock_protocol_attributes { struct scmi_msg_resp_clock_attributes { __le32 attributes; #define CLOCK_ENABLE BIT(0) - u8 name[SCMI_MAX_STR_SIZE]; +#define SUPPORTS_RATE_CHANGED_NOTIF(x) ((x) & BIT(31)) +#define SUPPORTS_RATE_CHANGE_REQUESTED_NOTIF(x) ((x) & BIT(30)) +#define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(29)) + u8 name[SCMI_SHORT_NAME_MAX_SIZE]; + __le32 clock_enable_latency; }; struct scmi_clock_set_config { @@ -48,7 +57,7 @@ struct scmi_msg_resp_clock_describe_rates { struct { __le32 value_low; __le32 value_high; - } rate[0]; + } rate[]; #define RATE_TO_U64(X) \ ({ \ typeof(X) x = (X); \ @@ -67,6 +76,24 @@ struct scmi_clock_set_rate { __le32 value_high; }; +struct scmi_msg_resp_set_rate_complete { + __le32 id; + __le32 rate_low; + __le32 rate_high; +}; + +struct scmi_msg_clock_rate_notify { + __le32 clk_id; + __le32 notify_enable; +}; + +struct scmi_clock_rate_notify_payld { + __le32 agent_id; + __le32 clock_id; + __le32 rate_low; + __le32 rate_high; +}; + struct clock_info { u32 version; int num_clocks; @@ -75,6 +102,11 @@ struct clock_info { struct scmi_clock_info *clk; }; +static enum scmi_clock_protocol_cmd evt_2_cmd[] = { + CLOCK_RATE_NOTIFY, + CLOCK_RATE_CHANGE_REQUESTED_NOTIFY, +}; + static int scmi_clock_protocol_attributes_get(const struct scmi_protocol_handle *ph, struct clock_info *ci) @@ -101,9 +133,11 @@ scmi_clock_protocol_attributes_get(const struct scmi_protocol_handle *ph, } static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph, - u32 clk_id, struct scmi_clock_info *clk) + u32 clk_id, struct scmi_clock_info *clk, + u32 version) { int ret; + u32 attributes; struct scmi_xfer *t; struct scmi_msg_resp_clock_attributes *attr; @@ -116,12 +150,34 @@ static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph, attr = t->rx.buf; ret = ph->xops->do_xfer(ph, t); - if (!ret) - strlcpy(clk->name, attr->name, SCMI_MAX_STR_SIZE); - else - clk->name[0] = '\0'; + if (!ret) { + u32 latency = 0; + attributes = le32_to_cpu(attr->attributes); + strscpy(clk->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE); + /* clock_enable_latency field is present only since SCMI v3.1 */ + if (PROTOCOL_REV_MAJOR(version) >= 0x2) + latency = le32_to_cpu(attr->clock_enable_latency); + clk->enable_latency = latency ? : U32_MAX; + } ph->xops->xfer_put(ph, t); + + /* + * If supported overwrite short name with the extended one; + * on error just carry on and use already provided short name. + */ + if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x2) { + if (SUPPORTS_EXTENDED_NAMES(attributes)) + ph->hops->extended_name_get(ph, CLOCK_NAME_GET, clk_id, + clk->name, + SCMI_MAX_STR_SIZE); + + if (SUPPORTS_RATE_CHANGED_NOTIF(attributes)) + clk->rate_changed_notifications = true; + if (SUPPORTS_RATE_CHANGE_REQUESTED_NOTIF(attributes)) + clk->rate_change_requested_notifications = true; + } + return ret; } @@ -137,80 +193,134 @@ static int rate_cmp_func(const void *_r1, const void *_r2) return 1; } -static int -scmi_clock_describe_rates_get(const struct scmi_protocol_handle *ph, u32 clk_id, - struct scmi_clock_info *clk) -{ - u64 *rate = NULL; - int ret, cnt; - bool rate_discrete = false; - u32 tot_rate_cnt = 0, rates_flag; - u16 num_returned, num_remaining; - struct scmi_xfer *t; - struct scmi_msg_clock_describe_rates *clk_desc; - struct scmi_msg_resp_clock_describe_rates *rlist; +struct scmi_clk_ipriv { + struct device *dev; + u32 clk_id; + struct scmi_clock_info *clk; +}; - ret = ph->xops->xfer_get_init(ph, CLOCK_DESCRIBE_RATES, - sizeof(*clk_desc), 0, &t); - if (ret) - return ret; +static void iter_clk_describe_prepare_message(void *message, + const unsigned int desc_index, + const void *priv) +{ + struct scmi_msg_clock_describe_rates *msg = message; + const struct scmi_clk_ipriv *p = priv; - clk_desc = t->tx.buf; - rlist = t->rx.buf; + msg->id = cpu_to_le32(p->clk_id); + /* Set the number of rates to be skipped/already read */ + msg->rate_index = cpu_to_le32(desc_index); +} - do { - clk_desc->id = cpu_to_le32(clk_id); - /* Set the number of rates to be skipped/already read */ - clk_desc->rate_index = cpu_to_le32(tot_rate_cnt); +static int +iter_clk_describe_update_state(struct scmi_iterator_state *st, + const void *response, void *priv) +{ + u32 flags; + struct scmi_clk_ipriv *p = priv; + const struct scmi_msg_resp_clock_describe_rates *r = response; + + flags = le32_to_cpu(r->num_rates_flags); + st->num_remaining = NUM_REMAINING(flags); + st->num_returned = NUM_RETURNED(flags); + p->clk->rate_discrete = RATE_DISCRETE(flags); + + /* Warn about out of spec replies ... */ + if (!p->clk->rate_discrete && + (st->num_returned != 3 || st->num_remaining != 0)) { + dev_warn(p->dev, + "Out-of-spec CLOCK_DESCRIBE_RATES reply for %s - returned:%d remaining:%d rx_len:%zd\n", + p->clk->name, st->num_returned, st->num_remaining, + st->rx_len); - ret = ph->xops->do_xfer(ph, t); - if (ret) - goto err; + /* + * A known quirk: a triplet is returned but num_returned != 3 + * Check for a safe payload size and fix. + */ + if (st->num_returned != 3 && st->num_remaining == 0 && + st->rx_len == sizeof(*r) + sizeof(__le32) * 2 * 3) { + st->num_returned = 3; + st->num_remaining = 0; + } else { + dev_err(p->dev, + "Cannot fix out-of-spec reply !\n"); + return -EPROTO; + } + } - rates_flag = le32_to_cpu(rlist->num_rates_flags); - num_remaining = NUM_REMAINING(rates_flag); - rate_discrete = RATE_DISCRETE(rates_flag); - num_returned = NUM_RETURNED(rates_flag); + return 0; +} - if (tot_rate_cnt + num_returned > SCMI_MAX_NUM_RATES) { - dev_err(ph->dev, "No. of rates > MAX_NUM_RATES"); +static int +iter_clk_describe_process_response(const struct scmi_protocol_handle *ph, + const void *response, + struct scmi_iterator_state *st, void *priv) +{ + int ret = 0; + struct scmi_clk_ipriv *p = priv; + const struct scmi_msg_resp_clock_describe_rates *r = response; + + if (!p->clk->rate_discrete) { + switch (st->desc_index + st->loop_idx) { + case 0: + p->clk->range.min_rate = RATE_TO_U64(r->rate[0]); break; - } - - if (!rate_discrete) { - clk->range.min_rate = RATE_TO_U64(rlist->rate[0]); - clk->range.max_rate = RATE_TO_U64(rlist->rate[1]); - clk->range.step_size = RATE_TO_U64(rlist->rate[2]); - dev_dbg(ph->dev, "Min %llu Max %llu Step %llu Hz\n", - clk->range.min_rate, clk->range.max_rate, - clk->range.step_size); + case 1: + p->clk->range.max_rate = RATE_TO_U64(r->rate[1]); + break; + case 2: + p->clk->range.step_size = RATE_TO_U64(r->rate[2]); + break; + default: + ret = -EINVAL; break; } + } else { + u64 *rate = &p->clk->list.rates[st->desc_index + st->loop_idx]; - rate = &clk->list.rates[tot_rate_cnt]; - for (cnt = 0; cnt < num_returned; cnt++, rate++) { - *rate = RATE_TO_U64(rlist->rate[cnt]); - dev_dbg(ph->dev, "Rate %llu Hz\n", *rate); - } + *rate = RATE_TO_U64(r->rate[st->loop_idx]); + p->clk->list.num_rates++; + } - tot_rate_cnt += num_returned; + return ret; +} - ph->xops->reset_rx_to_maxsz(ph, t); - /* - * check for both returned and remaining to avoid infinite - * loop due to buggy firmware - */ - } while (num_returned && num_remaining); +static int +scmi_clock_describe_rates_get(const struct scmi_protocol_handle *ph, u32 clk_id, + struct scmi_clock_info *clk) +{ + int ret; + void *iter; + struct scmi_iterator_ops ops = { + .prepare_message = iter_clk_describe_prepare_message, + .update_state = iter_clk_describe_update_state, + .process_response = iter_clk_describe_process_response, + }; + struct scmi_clk_ipriv cpriv = { + .clk_id = clk_id, + .clk = clk, + .dev = ph->dev, + }; + + iter = ph->hops->iter_response_init(ph, &ops, SCMI_MAX_NUM_RATES, + CLOCK_DESCRIBE_RATES, + sizeof(struct scmi_msg_clock_describe_rates), + &cpriv); + if (IS_ERR(iter)) + return PTR_ERR(iter); + + ret = ph->hops->iter_response_run(iter); + if (ret) + return ret; - if (rate_discrete && rate) { - clk->list.num_rates = tot_rate_cnt; - sort(rate, tot_rate_cnt, sizeof(*rate), rate_cmp_func, NULL); + if (!clk->rate_discrete) { + dev_dbg(ph->dev, "Min %llu Max %llu Step %llu Hz\n", + clk->range.min_rate, clk->range.max_rate, + clk->range.step_size); + } else if (clk->list.num_rates) { + sort(clk->list.rates, clk->list.num_rates, + sizeof(clk->list.rates[0]), rate_cmp_func, NULL); } - clk->rate_discrete = rate_discrete; - -err: - ph->xops->xfer_put(ph, t); return ret; } @@ -259,10 +369,22 @@ static int scmi_clock_rate_set(const struct scmi_protocol_handle *ph, cfg->value_low = cpu_to_le32(rate & 0xffffffff); cfg->value_high = cpu_to_le32(rate >> 32); - if (flags & CLOCK_SET_ASYNC) + if (flags & CLOCK_SET_ASYNC) { ret = ph->xops->do_xfer_with_response(ph, t); - else + if (!ret) { + struct scmi_msg_resp_set_rate_complete *resp; + + resp = t->rx.buf; + if (le32_to_cpu(resp->id) == clk_id) + dev_dbg(ph->dev, + "Clk ID %d set async to %llu\n", clk_id, + get_unaligned_le64(&resp->rate_low)); + else + ret = -EPROTO; + } + } else { ret = ph->xops->do_xfer(ph, t); + } if (ci->max_async_req) atomic_dec(&ci->cur_async_req); @@ -273,7 +395,7 @@ static int scmi_clock_rate_set(const struct scmi_protocol_handle *ph, static int scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id, - u32 config) + u32 config, bool atomic) { int ret; struct scmi_xfer *t; @@ -284,6 +406,8 @@ scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id, if (ret) return ret; + t->hdr.poll_completion = atomic; + cfg = t->tx.buf; cfg->id = cpu_to_le32(clk_id); cfg->attributes = cpu_to_le32(config); @@ -296,12 +420,24 @@ scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id, static int scmi_clock_enable(const struct scmi_protocol_handle *ph, u32 clk_id) { - return scmi_clock_config_set(ph, clk_id, CLOCK_ENABLE); + return scmi_clock_config_set(ph, clk_id, CLOCK_ENABLE, false); } static int scmi_clock_disable(const struct scmi_protocol_handle *ph, u32 clk_id) { - return scmi_clock_config_set(ph, clk_id, 0); + return scmi_clock_config_set(ph, clk_id, 0, false); +} + +static int scmi_clock_enable_atomic(const struct scmi_protocol_handle *ph, + u32 clk_id) +{ + return scmi_clock_config_set(ph, clk_id, CLOCK_ENABLE, true); +} + +static int scmi_clock_disable_atomic(const struct scmi_protocol_handle *ph, + u32 clk_id) +{ + return scmi_clock_config_set(ph, clk_id, 0, true); } static int scmi_clock_count_get(const struct scmi_protocol_handle *ph) @@ -314,9 +450,13 @@ static int scmi_clock_count_get(const struct scmi_protocol_handle *ph) static const struct scmi_clock_info * scmi_clock_info_get(const struct scmi_protocol_handle *ph, u32 clk_id) { + struct scmi_clock_info *clk; struct clock_info *ci = ph->get_priv(ph); - struct scmi_clock_info *clk = ci->clk + clk_id; + if (clk_id >= ci->num_clocks) + return NULL; + + clk = ci->clk + clk_id; if (!clk->name[0]) return NULL; @@ -330,6 +470,104 @@ static const struct scmi_clk_proto_ops clk_proto_ops = { .rate_set = scmi_clock_rate_set, .enable = scmi_clock_enable, .disable = scmi_clock_disable, + .enable_atomic = scmi_clock_enable_atomic, + .disable_atomic = scmi_clock_disable_atomic, +}; + +static int scmi_clk_rate_notify(const struct scmi_protocol_handle *ph, + u32 clk_id, int message_id, bool enable) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_clock_rate_notify *notify; + + ret = ph->xops->xfer_get_init(ph, message_id, sizeof(*notify), 0, &t); + if (ret) + return ret; + + notify = t->tx.buf; + notify->clk_id = cpu_to_le32(clk_id); + notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0; + + ret = ph->xops->do_xfer(ph, t); + + ph->xops->xfer_put(ph, t); + return ret; +} + +static int scmi_clk_set_notify_enabled(const struct scmi_protocol_handle *ph, + u8 evt_id, u32 src_id, bool enable) +{ + int ret, cmd_id; + + if (evt_id >= ARRAY_SIZE(evt_2_cmd)) + return -EINVAL; + + cmd_id = evt_2_cmd[evt_id]; + ret = scmi_clk_rate_notify(ph, src_id, cmd_id, enable); + if (ret) + pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n", + evt_id, src_id, ret); + + return ret; +} + +static void *scmi_clk_fill_custom_report(const struct scmi_protocol_handle *ph, + u8 evt_id, ktime_t timestamp, + const void *payld, size_t payld_sz, + void *report, u32 *src_id) +{ + const struct scmi_clock_rate_notify_payld *p = payld; + struct scmi_clock_rate_notif_report *r = report; + + if (sizeof(*p) != payld_sz || + (evt_id != SCMI_EVENT_CLOCK_RATE_CHANGED && + evt_id != SCMI_EVENT_CLOCK_RATE_CHANGE_REQUESTED)) + return NULL; + + r->timestamp = timestamp; + r->agent_id = le32_to_cpu(p->agent_id); + r->clock_id = le32_to_cpu(p->clock_id); + r->rate = get_unaligned_le64(&p->rate_low); + *src_id = r->clock_id; + + return r; +} + +static int scmi_clk_get_num_sources(const struct scmi_protocol_handle *ph) +{ + struct clock_info *ci = ph->get_priv(ph); + + if (!ci) + return -EINVAL; + + return ci->num_clocks; +} + +static const struct scmi_event clk_events[] = { + { + .id = SCMI_EVENT_CLOCK_RATE_CHANGED, + .max_payld_sz = sizeof(struct scmi_clock_rate_notify_payld), + .max_report_sz = sizeof(struct scmi_clock_rate_notif_report), + }, + { + .id = SCMI_EVENT_CLOCK_RATE_CHANGE_REQUESTED, + .max_payld_sz = sizeof(struct scmi_clock_rate_notify_payld), + .max_report_sz = sizeof(struct scmi_clock_rate_notif_report), + }, +}; + +static const struct scmi_event_ops clk_event_ops = { + .get_num_sources = scmi_clk_get_num_sources, + .set_notify_enabled = scmi_clk_set_notify_enabled, + .fill_custom_report = scmi_clk_fill_custom_report, +}; + +static const struct scmi_protocol_events clk_protocol_events = { + .queue_sz = SCMI_PROTO_QUEUE_SZ, + .ops = &clk_event_ops, + .evts = clk_events, + .num_events = ARRAY_SIZE(clk_events), }; static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph) @@ -338,7 +576,9 @@ static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph) int clkid, ret; struct clock_info *cinfo; - ph->xops->version_get(ph, &version); + ret = ph->xops->version_get(ph, &version); + if (ret) + return ret; dev_dbg(ph->dev, "Clock Version %d.%d\n", PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version)); @@ -347,7 +587,9 @@ static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph) if (!cinfo) return -ENOMEM; - scmi_clock_protocol_attributes_get(ph, cinfo); + ret = scmi_clock_protocol_attributes_get(ph, cinfo); + if (ret) + return ret; cinfo->clk = devm_kcalloc(ph->dev, cinfo->num_clocks, sizeof(*cinfo->clk), GFP_KERNEL); @@ -357,7 +599,7 @@ static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph) for (clkid = 0; clkid < cinfo->num_clocks; clkid++) { struct scmi_clock_info *clk = cinfo->clk + clkid; - ret = scmi_clock_attributes_get(ph, clkid, clk); + ret = scmi_clock_attributes_get(ph, clkid, clk, version); if (!ret) scmi_clock_describe_rates_get(ph, clkid, clk); } @@ -371,6 +613,7 @@ static const struct scmi_protocol scmi_clock = { .owner = THIS_MODULE, .instance_init = &scmi_clock_protocol_init, .ops = &clk_proto_ops, + .events = &clk_protocol_events, }; DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(clock, scmi_clock) diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h index dea1bfbe1052..a1c0154c31c6 100644 --- a/drivers/firmware/arm_scmi/common.h +++ b/drivers/firmware/arm_scmi/common.h @@ -4,7 +4,7 @@ * driver common header file containing some definitions, structures * and function prototypes used in all the different SCMI protocols. * - * Copyright (C) 2018-2021 ARM Ltd. + * Copyright (C) 2018-2022 ARM Ltd. */ #ifndef _SCMI_COMMON_H #define _SCMI_COMMON_H @@ -24,38 +24,9 @@ #include <asm/unaligned.h> +#include "protocols.h" #include "notify.h" -#define PROTOCOL_REV_MINOR_MASK GENMASK(15, 0) -#define PROTOCOL_REV_MAJOR_MASK GENMASK(31, 16) -#define PROTOCOL_REV_MAJOR(x) (u16)(FIELD_GET(PROTOCOL_REV_MAJOR_MASK, (x))) -#define PROTOCOL_REV_MINOR(x) (u16)(FIELD_GET(PROTOCOL_REV_MINOR_MASK, (x))) -#define MAX_PROTOCOLS_IMP 16 -#define MAX_OPPS 16 - -enum scmi_common_cmd { - PROTOCOL_VERSION = 0x0, - PROTOCOL_ATTRIBUTES = 0x1, - PROTOCOL_MESSAGE_ATTRIBUTES = 0x2, -}; - -/** - * struct scmi_msg_resp_prot_version - Response for a message - * - * @minor_version: Minor version of the ABI that firmware supports - * @major_version: Major version of the ABI that firmware supports - * - * In general, ABI version changes follow the rule that minor version increments - * are backward compatible. Major revision changes in ABI may not be - * backward compatible. - * - * Response to a generic message with message type SCMI_MSG_VERSION - */ -struct scmi_msg_resp_prot_version { - __le16 minor_version; - __le16 major_version; -}; - #define MSG_ID_MASK GENMASK(7, 0) #define MSG_XTRACT_ID(hdr) FIELD_GET(MSG_ID_MASK, (hdr)) #define MSG_TYPE_MASK GENMASK(9, 8) @@ -80,28 +51,6 @@ struct scmi_msg_resp_prot_version { #define SCMI_PENDING_XFERS_HT_ORDER_SZ 9 /** - * struct scmi_msg_hdr - Message(Tx/Rx) header - * - * @id: The identifier of the message being sent - * @protocol_id: The identifier of the protocol used to send @id message - * @type: The SCMI type for this message - * @seq: The token to identify the message. When a message returns, the - * platform returns the whole message header unmodified including the - * token - * @status: Status of the transfer once it's complete - * @poll_completion: Indicate if the transfer needs to be polled for - * completion or interrupt mode is used - */ -struct scmi_msg_hdr { - u8 id; - u8 protocol_id; - u8 type; - u16 seq; - u32 status; - bool poll_completion; -}; - -/** * pack_scmi_header() - packs and returns 32-bit header * * @hdr: pointer to header containing all the information on message id, @@ -130,72 +79,6 @@ static inline void unpack_scmi_header(u32 msg_hdr, struct scmi_msg_hdr *hdr) hdr->type = MSG_XTRACT_TYPE(msg_hdr); } -/** - * struct scmi_msg - Message(Tx/Rx) structure - * - * @buf: Buffer pointer - * @len: Length of data in the Buffer - */ -struct scmi_msg { - void *buf; - size_t len; -}; - -/** - * struct scmi_xfer - Structure representing a message flow - * - * @transfer_id: Unique ID for debug & profiling purpose - * @hdr: Transmit message header - * @tx: Transmit message - * @rx: Receive message, the buffer should be pre-allocated to store - * message. If request-ACK protocol is used, we can reuse the same - * buffer for the rx path as we use for the tx path. - * @done: command message transmit completion event - * @async_done: pointer to delayed response message received event completion - * @pending: True for xfers added to @pending_xfers hashtable - * @node: An hlist_node reference used to store this xfer, alternatively, on - * the free list @free_xfers or in the @pending_xfers hashtable - * @users: A refcount to track the active users for this xfer. - * This is meant to protect against the possibility that, when a command - * transaction times out concurrently with the reception of a valid - * response message, the xfer could be finally put on the TX path, and - * so vanish, while on the RX path scmi_rx_callback() is still - * processing it: in such a case this refcounting will ensure that, even - * though the timed-out transaction will anyway cause the command - * request to be reported as failed by time-out, the underlying xfer - * cannot be discarded and possibly reused until the last one user on - * the RX path has released it. - * @busy: An atomic flag to ensure exclusive write access to this xfer - * @state: The current state of this transfer, with states transitions deemed - * valid being: - * - SCMI_XFER_SENT_OK -> SCMI_XFER_RESP_OK [ -> SCMI_XFER_DRESP_OK ] - * - SCMI_XFER_SENT_OK -> SCMI_XFER_DRESP_OK - * (Missing synchronous response is assumed OK and ignored) - * @lock: A spinlock to protect state and busy fields. - * @priv: A pointer for transport private usage. - */ -struct scmi_xfer { - int transfer_id; - struct scmi_msg_hdr hdr; - struct scmi_msg tx; - struct scmi_msg rx; - struct completion done; - struct completion *async_done; - bool pending; - struct hlist_node node; - refcount_t users; -#define SCMI_XFER_FREE 0 -#define SCMI_XFER_BUSY 1 - atomic_t busy; -#define SCMI_XFER_SENT_OK 0 -#define SCMI_XFER_RESP_OK 1 -#define SCMI_XFER_DRESP_OK 2 - int state; - /* A lock to protect state and busy fields */ - spinlock_t lock; - void *priv; -}; - /* * An helper macro to lookup an xfer from the @pending_xfers hashtable * using the message sequence number token as a key. @@ -211,121 +94,18 @@ struct scmi_xfer { xfer_; \ }) -struct scmi_xfer_ops; - -/** - * struct scmi_protocol_handle - Reference to an initialized protocol instance - * - * @dev: A reference to the associated SCMI instance device (handle->dev). - * @xops: A reference to a struct holding refs to the core xfer operations that - * can be used by the protocol implementation to generate SCMI messages. - * @set_priv: A method to set protocol private data for this instance. - * @get_priv: A method to get protocol private data previously set. - * - * This structure represents a protocol initialized against specific SCMI - * instance and it will be used as follows: - * - as a parameter fed from the core to the protocol initialization code so - * that it can access the core xfer operations to build and generate SCMI - * messages exclusively for the specific underlying protocol instance. - * - as an opaque handle fed by an SCMI driver user when it tries to access - * this protocol through its own protocol operations. - * In this case this handle will be returned as an opaque object together - * with the related protocol operations when the SCMI driver tries to access - * the protocol. - */ -struct scmi_protocol_handle { - struct device *dev; - const struct scmi_xfer_ops *xops; - int (*set_priv)(const struct scmi_protocol_handle *ph, void *priv); - void *(*get_priv)(const struct scmi_protocol_handle *ph); -}; - -/** - * struct scmi_xfer_ops - References to the core SCMI xfer operations. - * @version_get: Get this version protocol. - * @xfer_get_init: Initialize one struct xfer if any xfer slot is free. - * @reset_rx_to_maxsz: Reset rx size to max transport size. - * @do_xfer: Do the SCMI transfer. - * @do_xfer_with_response: Do the SCMI transfer waiting for a response. - * @xfer_put: Free the xfer slot. - * - * Note that all this operations expect a protocol handle as first parameter; - * they then internally use it to infer the underlying protocol number: this - * way is not possible for a protocol implementation to forge messages for - * another protocol. - */ -struct scmi_xfer_ops { - int (*version_get)(const struct scmi_protocol_handle *ph, u32 *version); - int (*xfer_get_init)(const struct scmi_protocol_handle *ph, u8 msg_id, - size_t tx_size, size_t rx_size, - struct scmi_xfer **p); - void (*reset_rx_to_maxsz)(const struct scmi_protocol_handle *ph, - struct scmi_xfer *xfer); - int (*do_xfer)(const struct scmi_protocol_handle *ph, - struct scmi_xfer *xfer); - int (*do_xfer_with_response)(const struct scmi_protocol_handle *ph, - struct scmi_xfer *xfer); - void (*xfer_put)(const struct scmi_protocol_handle *ph, - struct scmi_xfer *xfer); -}; - struct scmi_revision_info * scmi_revision_area_get(const struct scmi_protocol_handle *ph); int scmi_handle_put(const struct scmi_handle *handle); +void scmi_device_link_add(struct device *consumer, struct device *supplier); struct scmi_handle *scmi_handle_get(struct device *dev); void scmi_set_handle(struct scmi_device *scmi_dev); void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph, u8 *prot_imp); -typedef int (*scmi_prot_init_ph_fn_t)(const struct scmi_protocol_handle *); - -/** - * struct scmi_protocol - Protocol descriptor - * @id: Protocol ID. - * @owner: Module reference if any. - * @instance_init: Mandatory protocol initialization function. - * @instance_deinit: Optional protocol de-initialization function. - * @ops: Optional reference to the operations provided by the protocol and - * exposed in scmi_protocol.h. - * @events: An optional reference to the events supported by this protocol. - */ -struct scmi_protocol { - const u8 id; - struct module *owner; - const scmi_prot_init_ph_fn_t instance_init; - const scmi_prot_init_ph_fn_t instance_deinit; - const void *ops; - const struct scmi_protocol_events *events; -}; - int __init scmi_bus_init(void); void __exit scmi_bus_exit(void); -#define DECLARE_SCMI_REGISTER_UNREGISTER(func) \ - int __init scmi_##func##_register(void); \ - void __exit scmi_##func##_unregister(void) -DECLARE_SCMI_REGISTER_UNREGISTER(base); -DECLARE_SCMI_REGISTER_UNREGISTER(clock); -DECLARE_SCMI_REGISTER_UNREGISTER(perf); -DECLARE_SCMI_REGISTER_UNREGISTER(power); -DECLARE_SCMI_REGISTER_UNREGISTER(reset); -DECLARE_SCMI_REGISTER_UNREGISTER(sensors); -DECLARE_SCMI_REGISTER_UNREGISTER(voltage); -DECLARE_SCMI_REGISTER_UNREGISTER(system); - -#define DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(name, proto) \ -static const struct scmi_protocol *__this_proto = &(proto); \ - \ -int __init scmi_##name##_register(void) \ -{ \ - return scmi_protocol_register(__this_proto); \ -} \ - \ -void __exit scmi_##name##_unregister(void) \ -{ \ - scmi_protocol_unregister(__this_proto); \ -} - const struct scmi_protocol *scmi_protocol_get(int protocol_id); void scmi_protocol_put(int protocol_id); @@ -338,12 +118,19 @@ void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id); * * @dev: Reference to device in the SCMI hierarchy corresponding to this * channel + * @rx_timeout_ms: The configured RX timeout in milliseconds. * @handle: Pointer to SCMI entity handle + * @no_completion_irq: Flag to indicate that this channel has no completion + * interrupt mechanism for synchronous commands. + * This can be dynamically set by transports at run-time + * inside their provided .chan_setup(). * @transport_info: Transport layer related information */ struct scmi_chan_info { struct device *dev; + unsigned int rx_timeout_ms; struct scmi_handle *handle; + bool no_completion_irq; void *transport_info; }; @@ -373,7 +160,8 @@ struct scmi_transport_ops { unsigned int (*get_max_msg)(struct scmi_chan_info *base_cinfo); int (*send_message)(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer); - void (*mark_txdone)(struct scmi_chan_info *cinfo, int ret); + void (*mark_txdone)(struct scmi_chan_info *cinfo, int ret, + struct scmi_xfer *xfer); void (*fetch_response)(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer); void (*fetch_notification)(struct scmi_chan_info *cinfo, @@ -402,6 +190,18 @@ struct scmi_device *scmi_child_dev_find(struct device *parent, * be pending simultaneously in the system. May be overridden by the * get_max_msg op. * @max_msg_size: Maximum size of data per message that can be handled. + * @force_polling: Flag to force this whole transport to use SCMI core polling + * mechanism instead of completion interrupts even if available. + * @sync_cmds_completed_on_ret: Flag to indicate that the transport assures + * synchronous-command messages are atomically + * completed on .send_message: no need to poll + * actively waiting for a response. + * Used by core internally only when polling is + * selected as a waiting for reply method: i.e. + * if a completion irq was found use that anyway. + * @atomic_enabled: Flag to indicate that this transport, which is assured not + * to sleep anywhere on the TX path, can be used in atomic mode + * when requested. */ struct scmi_desc { int (*transport_init)(void); @@ -410,6 +210,9 @@ struct scmi_desc { int max_rx_timeout_ms; int max_msg; int max_msg_size; + const bool force_polling; + const bool sync_cmds_completed_on_ret; + const bool atomic_enabled; }; #ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX @@ -421,6 +224,9 @@ extern const struct scmi_desc scmi_smc_desc; #ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO extern const struct scmi_desc scmi_virtio_desc; #endif +#ifdef CONFIG_ARM_SCMI_TRANSPORT_OPTEE +extern const struct scmi_desc scmi_optee_desc; +#endif void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv); void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id); @@ -429,7 +235,7 @@ void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id); struct scmi_shared_mem; void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem, - struct scmi_xfer *xfer); + struct scmi_xfer *xfer, struct scmi_chan_info *cinfo); u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem); void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem, struct scmi_xfer *xfer); diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index b406b3f78f46..f818d00bb2c6 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -19,6 +19,7 @@ #include <linux/export.h> #include <linux/idr.h> #include <linux/io.h> +#include <linux/io-64-nonatomic-hi-lo.h> #include <linux/kernel.h> #include <linux/ktime.h> #include <linux/hashtable.h> @@ -60,6 +61,11 @@ static atomic_t transfer_last_id; static DEFINE_IDR(scmi_requested_devices); static DEFINE_MUTEX(scmi_requested_devices_mtx); +/* Track globally the creation of SCMI SystemPower related devices */ +static bool scmi_syspower_registered; +/* Protect access to scmi_syspower_registered */ +static DEFINE_MUTEX(scmi_syspower_mtx); + struct scmi_requested_dev { const struct scmi_device_id *id_table; struct list_head node; @@ -128,9 +134,16 @@ struct scmi_protocol_instance { * usage. * @protocols_mtx: A mutex to protect protocols instances initialization. * @protocols_imp: List of protocols implemented, currently maximum of - * MAX_PROTOCOLS_IMP elements allocated by the base protocol + * scmi_revision_info.num_protocols elements allocated by the + * base protocol * @active_protocols: IDR storing device_nodes for protocols actually defined * in the DT and confirmed as implemented by fw. + * @atomic_threshold: Optional system wide DT-configured threshold, expressed + * in microseconds, for atomic operations. + * Only SCMI synchronous commands reported by the platform + * to have an execution latency lesser-equal to the threshold + * should be considered for atomic mode operation: such + * decision is finally left up to the SCMI drivers. * @notify_priv: Pointer to private data structure specific to notifications. * @node: List head * @users: Number of users of this instance @@ -149,6 +162,7 @@ struct scmi_info { struct mutex protocols_mtx; u8 *protocols_imp; struct idr active_protocols; + unsigned int atomic_threshold; void *notify_priv; struct list_head node; int users; @@ -609,6 +623,25 @@ static inline void scmi_clear_channel(struct scmi_info *info, info->desc->ops->clear_channel(cinfo); } +static inline bool is_polling_required(struct scmi_chan_info *cinfo, + struct scmi_info *info) +{ + return cinfo->no_completion_irq || info->desc->force_polling; +} + +static inline bool is_transport_polling_capable(struct scmi_info *info) +{ + return info->desc->ops->poll_done || + info->desc->sync_cmds_completed_on_ret; +} + +static inline bool is_polling_enabled(struct scmi_chan_info *cinfo, + struct scmi_info *info) +{ + return is_polling_required(cinfo, info) && + is_transport_polling_capable(info); +} + static void scmi_handle_notification(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv) { @@ -629,9 +662,15 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo, unpack_scmi_header(msg_hdr, &xfer->hdr); if (priv) - xfer->priv = priv; + /* Ensure order between xfer->priv store and following ops */ + smp_store_mb(xfer->priv, priv); info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size, xfer); + + trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id, "NOTI", + xfer->hdr.seq, xfer->hdr.status, + xfer->rx.buf, xfer->rx.len); + scmi_notify(cinfo->handle, xfer->hdr.protocol_id, xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts); @@ -652,7 +691,8 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo, xfer = scmi_xfer_command_acquire(cinfo, msg_hdr); if (IS_ERR(xfer)) { - scmi_clear_channel(info, cinfo); + if (MSG_XTRACT_TYPE(msg_hdr) == MSG_TYPE_DELAYED_RESP) + scmi_clear_channel(info, cinfo); return; } @@ -661,9 +701,16 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo, xfer->rx.len = info->desc->max_msg_size; if (priv) - xfer->priv = priv; + /* Ensure order between xfer->priv store and following ops */ + smp_store_mb(xfer->priv, priv); info->desc->ops->fetch_response(cinfo, xfer); + trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id, + xfer->hdr.type == MSG_TYPE_DELAYED_RESP ? + "DLYD" : "RESP", + xfer->hdr.seq, xfer->hdr.status, + xfer->rx.buf, xfer->rx.len); + trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id, xfer->hdr.protocol_id, xfer->hdr.seq, xfer->hdr.type); @@ -724,8 +771,6 @@ static void xfer_put(const struct scmi_protocol_handle *ph, __scmi_xfer_put(&info->tx_minfo, xfer); } -#define SCMI_MAX_POLL_TO_NS (100 * NSEC_PER_USEC) - static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer, ktime_t stop) { @@ -741,6 +786,85 @@ static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo, } /** + * scmi_wait_for_message_response - An helper to group all the possible ways of + * waiting for a synchronous message response. + * + * @cinfo: SCMI channel info + * @xfer: Reference to the transfer being waited for. + * + * Chooses waiting strategy (sleep-waiting vs busy-waiting) depending on + * configuration flags like xfer->hdr.poll_completion. + * + * Return: 0 on Success, error otherwise. + */ +static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo, + struct scmi_xfer *xfer) +{ + struct scmi_info *info = handle_to_scmi_info(cinfo->handle); + struct device *dev = info->dev; + int ret = 0, timeout_ms = info->desc->max_rx_timeout_ms; + + trace_scmi_xfer_response_wait(xfer->transfer_id, xfer->hdr.id, + xfer->hdr.protocol_id, xfer->hdr.seq, + timeout_ms, + xfer->hdr.poll_completion); + + if (xfer->hdr.poll_completion) { + /* + * Real polling is needed only if transport has NOT declared + * itself to support synchronous commands replies. + */ + if (!info->desc->sync_cmds_completed_on_ret) { + /* + * Poll on xfer using transport provided .poll_done(); + * assumes no completion interrupt was available. + */ + ktime_t stop = ktime_add_ms(ktime_get(), timeout_ms); + + spin_until_cond(scmi_xfer_done_no_timeout(cinfo, + xfer, stop)); + if (ktime_after(ktime_get(), stop)) { + dev_err(dev, + "timed out in resp(caller: %pS) - polling\n", + (void *)_RET_IP_); + ret = -ETIMEDOUT; + } + } + + if (!ret) { + unsigned long flags; + + /* + * Do not fetch_response if an out-of-order delayed + * response is being processed. + */ + spin_lock_irqsave(&xfer->lock, flags); + if (xfer->state == SCMI_XFER_SENT_OK) { + info->desc->ops->fetch_response(cinfo, xfer); + xfer->state = SCMI_XFER_RESP_OK; + } + spin_unlock_irqrestore(&xfer->lock, flags); + + /* Trace polled replies. */ + trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id, + "RESP", + xfer->hdr.seq, xfer->hdr.status, + xfer->rx.buf, xfer->rx.len); + } + } else { + /* And we wait for the response. */ + if (!wait_for_completion_timeout(&xfer->done, + msecs_to_jiffies(timeout_ms))) { + dev_err(dev, "timed out in resp(caller: %pS)\n", + (void *)_RET_IP_); + ret = -ETIMEDOUT; + } + } + + return ret; +} + +/** * do_xfer() - Do one transfer * * @ph: Pointer to SCMI protocol handle @@ -754,18 +878,26 @@ static int do_xfer(const struct scmi_protocol_handle *ph, struct scmi_xfer *xfer) { int ret; - int timeout; const struct scmi_protocol_instance *pi = ph_to_pi(ph); struct scmi_info *info = handle_to_scmi_info(pi->handle); struct device *dev = info->dev; struct scmi_chan_info *cinfo; - if (xfer->hdr.poll_completion && !info->desc->ops->poll_done) { + /* Check for polling request on custom command xfers at first */ + if (xfer->hdr.poll_completion && !is_transport_polling_capable(info)) { dev_warn_once(dev, "Polling mode is not supported by transport.\n"); return -EINVAL; } + cinfo = idr_find(&info->tx_idr, pi->proto->id); + if (unlikely(!cinfo)) + return -EINVAL; + + /* True ONLY if also supported by transport. */ + if (is_polling_enabled(cinfo, info)) + xfer->hdr.poll_completion = true; + /* * Initialise protocol id now from protocol handle to avoid it being * overridden by mistake (or malice) by the protocol code mangling with @@ -774,10 +906,6 @@ static int do_xfer(const struct scmi_protocol_handle *ph, xfer->hdr.protocol_id = pi->proto->id; reinit_completion(&xfer->done); - cinfo = idr_find(&info->tx_idr, xfer->hdr.protocol_id); - if (unlikely(!cinfo)) - return -EINVAL; - trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id, xfer->hdr.protocol_id, xfer->hdr.seq, xfer->hdr.poll_completion); @@ -798,41 +926,16 @@ static int do_xfer(const struct scmi_protocol_handle *ph, return ret; } - if (xfer->hdr.poll_completion) { - ktime_t stop = ktime_add_ns(ktime_get(), SCMI_MAX_POLL_TO_NS); - - spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop)); - if (ktime_before(ktime_get(), stop)) { - unsigned long flags; - - /* - * Do not fetch_response if an out-of-order delayed - * response is being processed. - */ - spin_lock_irqsave(&xfer->lock, flags); - if (xfer->state == SCMI_XFER_SENT_OK) { - info->desc->ops->fetch_response(cinfo, xfer); - xfer->state = SCMI_XFER_RESP_OK; - } - spin_unlock_irqrestore(&xfer->lock, flags); - } else { - ret = -ETIMEDOUT; - } - } else { - /* And we wait for the response. */ - timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms); - if (!wait_for_completion_timeout(&xfer->done, timeout)) { - dev_err(dev, "timed out in resp(caller: %pS)\n", - (void *)_RET_IP_); - ret = -ETIMEDOUT; - } - } + trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id, "CMND", + xfer->hdr.seq, xfer->hdr.status, + xfer->tx.buf, xfer->tx.len); + ret = scmi_wait_for_message_response(cinfo, xfer); if (!ret && xfer->hdr.status) ret = scmi_to_linux_errno(xfer->hdr.status); if (info->desc->ops->mark_txdone) - info->desc->ops->mark_txdone(cinfo, ret); + info->desc->ops->mark_txdone(cinfo, ret, xfer); trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id, xfer->hdr.protocol_id, xfer->hdr.seq, ret); @@ -858,6 +961,20 @@ static void reset_rx_to_maxsz(const struct scmi_protocol_handle *ph, * @ph: Pointer to SCMI protocol handle * @xfer: Transfer to initiate and wait for response * + * Using asynchronous commands in atomic/polling mode should be avoided since + * it could cause long busy-waiting here, so ignore polling for the delayed + * response and WARN if it was requested for this command transaction since + * upper layers should refrain from issuing such kind of requests. + * + * The only other option would have been to refrain from using any asynchronous + * command even if made available, when an atomic transport is detected, and + * instead forcibly use the synchronous version (thing that can be easily + * attained at the protocol layer), but this would also have led to longer + * stalls of the channel for synchronous commands and possibly timeouts. + * (in other words there is usually a good reason if a platform provides an + * asynchronous version of a command and we should prefer to use it...just not + * when using atomic/polling mode) + * * Return: -ETIMEDOUT in case of no delayed response, if transmit error, * return corresponding error, else if all goes well, return 0. */ @@ -869,12 +986,24 @@ static int do_xfer_with_response(const struct scmi_protocol_handle *ph, xfer->async_done = &async_response; + /* + * Delayed responses should not be polled, so an async command should + * not have been used when requiring an atomic/poll context; WARN and + * perform instead a sleeping wait. + * (Note Async + IgnoreDelayedResponses are sent via do_xfer) + */ + WARN_ON_ONCE(xfer->hdr.poll_completion); + ret = do_xfer(ph, xfer); if (!ret) { - if (!wait_for_completion_timeout(xfer->async_done, timeout)) + if (!wait_for_completion_timeout(xfer->async_done, timeout)) { + dev_err(ph->dev, + "timed out in delayed resp(caller: %pS)\n", + (void *)_RET_IP_); ret = -ETIMEDOUT; - else if (xfer->hdr.status) + } else if (xfer->hdr.status) { ret = scmi_to_linux_errno(xfer->hdr.status); + } } xfer->async_done = NULL; @@ -1001,6 +1130,332 @@ static const struct scmi_xfer_ops xfer_ops = { .xfer_put = xfer_put, }; +struct scmi_msg_resp_domain_name_get { + __le32 flags; + u8 name[SCMI_MAX_STR_SIZE]; +}; + +/** + * scmi_common_extended_name_get - Common helper to get extended resources name + * @ph: A protocol handle reference. + * @cmd_id: The specific command ID to use. + * @res_id: The specific resource ID to use. + * @name: A pointer to the preallocated area where the retrieved name will be + * stored as a NULL terminated string. + * @len: The len in bytes of the @name char array. + * + * Return: 0 on Succcess + */ +static int scmi_common_extended_name_get(const struct scmi_protocol_handle *ph, + u8 cmd_id, u32 res_id, char *name, + size_t len) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_resp_domain_name_get *resp; + + ret = ph->xops->xfer_get_init(ph, cmd_id, sizeof(res_id), + sizeof(*resp), &t); + if (ret) + goto out; + + put_unaligned_le32(res_id, t->tx.buf); + resp = t->rx.buf; + + ret = ph->xops->do_xfer(ph, t); + if (!ret) + strscpy(name, resp->name, len); + + ph->xops->xfer_put(ph, t); +out: + if (ret) + dev_warn(ph->dev, + "Failed to get extended name - id:%u (ret:%d). Using %s\n", + res_id, ret, name); + return ret; +} + +/** + * struct scmi_iterator - Iterator descriptor + * @msg: A reference to the message TX buffer; filled by @prepare_message with + * a proper custom command payload for each multi-part command request. + * @resp: A reference to the response RX buffer; used by @update_state and + * @process_response to parse the multi-part replies. + * @t: A reference to the underlying xfer initialized and used transparently by + * the iterator internal routines. + * @ph: A reference to the associated protocol handle to be used. + * @ops: A reference to the custom provided iterator operations. + * @state: The current iterator state; used and updated in turn by the iterators + * internal routines and by the caller-provided @scmi_iterator_ops. + * @priv: A reference to optional private data as provided by the caller and + * passed back to the @@scmi_iterator_ops. + */ +struct scmi_iterator { + void *msg; + void *resp; + struct scmi_xfer *t; + const struct scmi_protocol_handle *ph; + struct scmi_iterator_ops *ops; + struct scmi_iterator_state state; + void *priv; +}; + +static void *scmi_iterator_init(const struct scmi_protocol_handle *ph, + struct scmi_iterator_ops *ops, + unsigned int max_resources, u8 msg_id, + size_t tx_size, void *priv) +{ + int ret; + struct scmi_iterator *i; + + i = devm_kzalloc(ph->dev, sizeof(*i), GFP_KERNEL); + if (!i) + return ERR_PTR(-ENOMEM); + + i->ph = ph; + i->ops = ops; + i->priv = priv; + + ret = ph->xops->xfer_get_init(ph, msg_id, tx_size, 0, &i->t); + if (ret) { + devm_kfree(ph->dev, i); + return ERR_PTR(ret); + } + + i->state.max_resources = max_resources; + i->msg = i->t->tx.buf; + i->resp = i->t->rx.buf; + + return i; +} + +static int scmi_iterator_run(void *iter) +{ + int ret = -EINVAL; + struct scmi_iterator_ops *iops; + const struct scmi_protocol_handle *ph; + struct scmi_iterator_state *st; + struct scmi_iterator *i = iter; + + if (!i || !i->ops || !i->ph) + return ret; + + iops = i->ops; + ph = i->ph; + st = &i->state; + + do { + iops->prepare_message(i->msg, st->desc_index, i->priv); + ret = ph->xops->do_xfer(ph, i->t); + if (ret) + break; + + st->rx_len = i->t->rx.len; + ret = iops->update_state(st, i->resp, i->priv); + if (ret) + break; + + if (st->num_returned > st->max_resources - st->desc_index) { + dev_err(ph->dev, + "No. of resources can't exceed %d\n", + st->max_resources); + ret = -EINVAL; + break; + } + + for (st->loop_idx = 0; st->loop_idx < st->num_returned; + st->loop_idx++) { + ret = iops->process_response(ph, i->resp, st, i->priv); + if (ret) + goto out; + } + + st->desc_index += st->num_returned; + ph->xops->reset_rx_to_maxsz(ph, i->t); + /* + * check for both returned and remaining to avoid infinite + * loop due to buggy firmware + */ + } while (st->num_returned && st->num_remaining); + +out: + /* Finalize and destroy iterator */ + ph->xops->xfer_put(ph, i->t); + devm_kfree(ph->dev, i); + + return ret; +} + +struct scmi_msg_get_fc_info { + __le32 domain; + __le32 message_id; +}; + +struct scmi_msg_resp_desc_fc { + __le32 attr; +#define SUPPORTS_DOORBELL(x) ((x) & BIT(0)) +#define DOORBELL_REG_WIDTH(x) FIELD_GET(GENMASK(2, 1), (x)) + __le32 rate_limit; + __le32 chan_addr_low; + __le32 chan_addr_high; + __le32 chan_size; + __le32 db_addr_low; + __le32 db_addr_high; + __le32 db_set_lmask; + __le32 db_set_hmask; + __le32 db_preserve_lmask; + __le32 db_preserve_hmask; +}; + +static void +scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph, + u8 describe_id, u32 message_id, u32 valid_size, + u32 domain, void __iomem **p_addr, + struct scmi_fc_db_info **p_db) +{ + int ret; + u32 flags; + u64 phys_addr; + u8 size; + void __iomem *addr; + struct scmi_xfer *t; + struct scmi_fc_db_info *db = NULL; + struct scmi_msg_get_fc_info *info; + struct scmi_msg_resp_desc_fc *resp; + const struct scmi_protocol_instance *pi = ph_to_pi(ph); + + if (!p_addr) { + ret = -EINVAL; + goto err_out; + } + + ret = ph->xops->xfer_get_init(ph, describe_id, + sizeof(*info), sizeof(*resp), &t); + if (ret) + goto err_out; + + info = t->tx.buf; + info->domain = cpu_to_le32(domain); + info->message_id = cpu_to_le32(message_id); + + /* + * Bail out on error leaving fc_info addresses zeroed; this includes + * the case in which the requested domain/message_id does NOT support + * fastchannels at all. + */ + ret = ph->xops->do_xfer(ph, t); + if (ret) + goto err_xfer; + + resp = t->rx.buf; + flags = le32_to_cpu(resp->attr); + size = le32_to_cpu(resp->chan_size); + if (size != valid_size) { + ret = -EINVAL; + goto err_xfer; + } + + phys_addr = le32_to_cpu(resp->chan_addr_low); + phys_addr |= (u64)le32_to_cpu(resp->chan_addr_high) << 32; + addr = devm_ioremap(ph->dev, phys_addr, size); + if (!addr) { + ret = -EADDRNOTAVAIL; + goto err_xfer; + } + + *p_addr = addr; + + if (p_db && SUPPORTS_DOORBELL(flags)) { + db = devm_kzalloc(ph->dev, sizeof(*db), GFP_KERNEL); + if (!db) { + ret = -ENOMEM; + goto err_db; + } + + size = 1 << DOORBELL_REG_WIDTH(flags); + phys_addr = le32_to_cpu(resp->db_addr_low); + phys_addr |= (u64)le32_to_cpu(resp->db_addr_high) << 32; + addr = devm_ioremap(ph->dev, phys_addr, size); + if (!addr) { + ret = -EADDRNOTAVAIL; + goto err_db_mem; + } + + db->addr = addr; + db->width = size; + db->set = le32_to_cpu(resp->db_set_lmask); + db->set |= (u64)le32_to_cpu(resp->db_set_hmask) << 32; + db->mask = le32_to_cpu(resp->db_preserve_lmask); + db->mask |= (u64)le32_to_cpu(resp->db_preserve_hmask) << 32; + + *p_db = db; + } + + ph->xops->xfer_put(ph, t); + + dev_dbg(ph->dev, + "Using valid FC for protocol %X [MSG_ID:%u / RES_ID:%u]\n", + pi->proto->id, message_id, domain); + + return; + +err_db_mem: + devm_kfree(ph->dev, db); + +err_db: + *p_addr = NULL; + +err_xfer: + ph->xops->xfer_put(ph, t); + +err_out: + dev_warn(ph->dev, + "Failed to get FC for protocol %X [MSG_ID:%u / RES_ID:%u] - ret:%d. Using regular messaging.\n", + pi->proto->id, message_id, domain, ret); +} + +#define SCMI_PROTO_FC_RING_DB(w) \ +do { \ + u##w val = 0; \ + \ + if (db->mask) \ + val = ioread##w(db->addr) & db->mask; \ + iowrite##w((u##w)db->set | val, db->addr); \ +} while (0) + +static void scmi_common_fastchannel_db_ring(struct scmi_fc_db_info *db) +{ + if (!db || !db->addr) + return; + + if (db->width == 1) + SCMI_PROTO_FC_RING_DB(8); + else if (db->width == 2) + SCMI_PROTO_FC_RING_DB(16); + else if (db->width == 4) + SCMI_PROTO_FC_RING_DB(32); + else /* db->width == 8 */ +#ifdef CONFIG_64BIT + SCMI_PROTO_FC_RING_DB(64); +#else + { + u64 val = 0; + + if (db->mask) + val = ioread64_hi_lo(db->addr) & db->mask; + iowrite64_hi_lo(db->set | val, db->addr); + } +#endif +} + +static const struct scmi_proto_helpers_ops helpers_ops = { + .extended_name_get = scmi_common_extended_name_get, + .iter_response_init = scmi_iterator_init, + .iter_response_run = scmi_iterator_run, + .fastchannel_init = scmi_common_fastchannel_init, + .fastchannel_db_ring = scmi_common_fastchannel_db_ring, +}; + /** * scmi_revision_area_get - Retrieve version memory area. * @@ -1061,6 +1516,7 @@ scmi_alloc_init_protocol_instance(struct scmi_info *info, pi->handle = handle; pi->ph.dev = handle->dev; pi->ph.xops = &xfer_ops; + pi->ph.hops = &helpers_ops; pi->ph.set_priv = scmi_set_protocol_priv; pi->ph.get_priv = scmi_get_protocol_priv; refcount_set(&pi->users, 1); @@ -1209,11 +1665,12 @@ scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id) { int i; struct scmi_info *info = handle_to_scmi_info(handle); + struct scmi_revision_info *rev = handle->version; if (!info->protocols_imp) return false; - for (i = 0; i < MAX_PROTOCOLS_IMP; i++) + for (i = 0; i < rev->num_protocols; i++) if (info->protocols_imp[i] == prot_id) return true; return false; @@ -1231,6 +1688,30 @@ static void scmi_devm_release_protocol(struct device *dev, void *res) scmi_protocol_release(dres->handle, dres->protocol_id); } +static struct scmi_protocol_instance __must_check * +scmi_devres_protocol_instance_get(struct scmi_device *sdev, u8 protocol_id) +{ + struct scmi_protocol_instance *pi; + struct scmi_protocol_devres *dres; + + dres = devres_alloc(scmi_devm_release_protocol, + sizeof(*dres), GFP_KERNEL); + if (!dres) + return ERR_PTR(-ENOMEM); + + pi = scmi_get_protocol_instance(sdev->handle, protocol_id); + if (IS_ERR(pi)) { + devres_free(dres); + return pi; + } + + dres->handle = sdev->handle; + dres->protocol_id = protocol_id; + devres_add(&sdev->dev, dres); + + return pi; +} + /** * scmi_devm_protocol_get - Devres managed get protocol operations and handle * @sdev: A reference to an scmi_device whose embedded struct device is to @@ -1254,32 +1735,47 @@ scmi_devm_protocol_get(struct scmi_device *sdev, u8 protocol_id, struct scmi_protocol_handle **ph) { struct scmi_protocol_instance *pi; - struct scmi_protocol_devres *dres; - struct scmi_handle *handle = sdev->handle; if (!ph) return ERR_PTR(-EINVAL); - dres = devres_alloc(scmi_devm_release_protocol, - sizeof(*dres), GFP_KERNEL); - if (!dres) - return ERR_PTR(-ENOMEM); - - pi = scmi_get_protocol_instance(handle, protocol_id); - if (IS_ERR(pi)) { - devres_free(dres); + pi = scmi_devres_protocol_instance_get(sdev, protocol_id); + if (IS_ERR(pi)) return pi; - } - - dres->handle = handle; - dres->protocol_id = protocol_id; - devres_add(&sdev->dev, dres); *ph = &pi->ph; return pi->proto->ops; } +/** + * scmi_devm_protocol_acquire - Devres managed helper to get hold of a protocol + * @sdev: A reference to an scmi_device whose embedded struct device is to + * be used for devres accounting. + * @protocol_id: The protocol being requested. + * + * Get hold of a protocol accounting for its usage, possibly triggering its + * initialization but without getting access to its protocol specific operations + * and handle. + * + * Being a devres based managed method, protocol hold will be automatically + * released, and possibly de-initialized on last user, once the SCMI driver + * owning the scmi_device is unbound from it. + * + * Return: 0 on SUCCESS + */ +static int __must_check scmi_devm_protocol_acquire(struct scmi_device *sdev, + u8 protocol_id) +{ + struct scmi_protocol_instance *pi; + + pi = scmi_devres_protocol_instance_get(sdev, protocol_id); + if (IS_ERR(pi)) + return PTR_ERR(pi); + + return 0; +} + static int scmi_devm_protocol_match(struct device *dev, void *res, void *data) { struct scmi_protocol_devres *dres = res; @@ -1308,6 +1804,29 @@ static void scmi_devm_protocol_put(struct scmi_device *sdev, u8 protocol_id) WARN_ON(ret); } +/** + * scmi_is_transport_atomic - Method to check if underlying transport for an + * SCMI instance is configured as atomic. + * + * @handle: A reference to the SCMI platform instance. + * @atomic_threshold: An optional return value for the system wide currently + * configured threshold for atomic operations. + * + * Return: True if transport is configured as atomic + */ +static bool scmi_is_transport_atomic(const struct scmi_handle *handle, + unsigned int *atomic_threshold) +{ + bool ret; + struct scmi_info *info = handle_to_scmi_info(handle); + + ret = info->desc->atomic_enabled && is_transport_polling_capable(info); + if (ret && atomic_threshold) + *atomic_threshold = info->atomic_threshold; + + return ret; +} + static inline struct scmi_handle *scmi_handle_get_from_info_unlocked(struct scmi_info *info) { @@ -1494,11 +2013,22 @@ static int scmi_chan_setup(struct scmi_info *info, struct device *dev, return -ENOMEM; cinfo->dev = dev; + cinfo->rx_timeout_ms = info->desc->max_rx_timeout_ms; ret = info->desc->ops->chan_setup(cinfo, info->dev, tx); if (ret) return ret; + if (tx && is_polling_required(cinfo, info)) { + if (is_transport_polling_capable(info)) + dev_info(dev, + "Enabled polling mode TX channel - prot_id:%d\n", + prot_id); + else + dev_warn(dev, + "Polling mode NOT supported by transport.\n"); + } + idr_alloc: ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL); if (ret != prot_id) { @@ -1515,8 +2045,12 @@ scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id) { int ret = scmi_chan_setup(info, dev, prot_id, true); - if (!ret) /* Rx is optional, hence no error check */ - scmi_chan_setup(info, dev, prot_id, false); + if (!ret) { + /* Rx is optional, report only memory errors */ + ret = scmi_chan_setup(info, dev, prot_id, false); + if (ret && ret != -ENOMEM) + ret = 0; + } return ret; } @@ -1550,21 +2084,39 @@ scmi_get_protocol_device(struct device_node *np, struct scmi_info *info, if (sdev) return sdev; + mutex_lock(&scmi_syspower_mtx); + if (prot_id == SCMI_PROTOCOL_SYSTEM && scmi_syspower_registered) { + dev_warn(info->dev, + "SCMI SystemPower protocol device must be unique !\n"); + mutex_unlock(&scmi_syspower_mtx); + + return NULL; + } + pr_debug("Creating SCMI device (%s) for protocol %x\n", name, prot_id); sdev = scmi_device_create(np, info->dev, prot_id, name); if (!sdev) { dev_err(info->dev, "failed to create %d protocol device\n", prot_id); + mutex_unlock(&scmi_syspower_mtx); + return NULL; } if (scmi_txrx_setup(info, &sdev->dev, prot_id)) { dev_err(&sdev->dev, "failed to setup transport\n"); scmi_device_destroy(sdev); + mutex_unlock(&scmi_syspower_mtx); + return NULL; } + if (prot_id == SCMI_PROTOCOL_SYSTEM) + scmi_syspower_registered = true; + + mutex_unlock(&scmi_syspower_mtx); + return sdev; } @@ -1726,10 +2278,16 @@ int scmi_protocol_device_request(const struct scmi_device_id *id_table) sdev = scmi_get_protocol_device(child, info, id_table->protocol_id, id_table->name); - /* Set handle if not already set: device existed */ - if (sdev && !sdev->handle) - sdev->handle = - scmi_handle_get_from_info_unlocked(info); + if (sdev) { + /* Set handle if not already set: device existed */ + if (!sdev->handle) + sdev->handle = + scmi_handle_get_from_info_unlocked(info); + /* Relink consumer and suppliers */ + if (sdev->handle) + scmi_device_link_add(&sdev->dev, + sdev->handle->dev); + } } else { dev_err(info->dev, "Failed. SCMI protocol %d not active.\n", @@ -1833,9 +2391,18 @@ static int scmi_probe(struct platform_device *pdev) handle = &info->handle; handle->dev = info->dev; handle->version = &info->version; + handle->devm_protocol_acquire = scmi_devm_protocol_acquire; handle->devm_protocol_get = scmi_devm_protocol_get; handle->devm_protocol_put = scmi_devm_protocol_put; + /* System wide atomic threshold for atomic ops .. if any */ + if (!of_property_read_u32(np, "atomic-threshold-us", + &info->atomic_threshold)) + dev_info(dev, + "SCMI System wide atomic threshold set to %d us\n", + info->atomic_threshold); + handle->is_transport_atomic = scmi_is_transport_atomic; + if (desc->ops->link_supplier) { ret = desc->ops->link_supplier(dev); if (ret) @@ -1853,6 +2420,10 @@ static int scmi_probe(struct platform_device *pdev) if (scmi_notification_init(handle)) dev_err(dev, "SCMI Notifications NOT available.\n"); + if (info->desc->atomic_enabled && !is_transport_polling_capable(info)) + dev_err(dev, + "Transport is not polling capable. Atomic mode not supported.\n"); + /* * Trigger SCMI Base protocol initialization. * It's mandatory and won't be ever released/deinit until the @@ -1915,20 +2486,17 @@ void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id) static int scmi_remove(struct platform_device *pdev) { - int ret = 0, id; + int ret, id; struct scmi_info *info = platform_get_drvdata(pdev); struct device_node *child; mutex_lock(&scmi_list_mutex); if (info->users) - ret = -EBUSY; - else - list_del(&info->node); + dev_warn(&pdev->dev, + "Still active SCMI users will be forcibly unbound.\n"); + list_del(&info->node); mutex_unlock(&scmi_list_mutex); - if (ret) - return ret; - scmi_notification_exit(&info->handle); mutex_lock(&info->protocols_mtx); @@ -1940,7 +2508,11 @@ static int scmi_remove(struct platform_device *pdev) idr_destroy(&info->active_protocols); /* Safe to free channels since no more users */ - return scmi_cleanup_txrx_channels(info); + ret = scmi_cleanup_txrx_channels(info); + if (ret) + dev_warn(&pdev->dev, "Failed to cleanup SCMI channels.\n"); + + return 0; } static ssize_t protocol_version_show(struct device *dev, @@ -1994,6 +2566,9 @@ static const struct of_device_id scmi_of_match[] = { #ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX { .compatible = "arm,scmi", .data = &scmi_mailbox_desc }, #endif +#ifdef CONFIG_ARM_SCMI_TRANSPORT_OPTEE + { .compatible = "linaro,scmi-optee", .data = &scmi_optee_desc }, +#endif #ifdef CONFIG_ARM_SCMI_TRANSPORT_SMC { .compatible = "arm,scmi-smc", .data = &scmi_smc_desc}, #endif @@ -2008,6 +2583,7 @@ MODULE_DEVICE_TABLE(of, scmi_of_match); static struct platform_driver scmi_driver = { .driver = { .name = "arm-scmi", + .suppress_bind_attrs = true, .of_match_table = scmi_of_match, .dev_groups = versions_groups, }, @@ -2087,6 +2663,7 @@ static int __init scmi_driver_init(void) scmi_sensors_register(); scmi_voltage_register(); scmi_system_register(); + scmi_powercap_register(); return platform_driver_register(&scmi_driver); } @@ -2103,6 +2680,7 @@ static void __exit scmi_driver_exit(void) scmi_sensors_unregister(); scmi_voltage_unregister(); scmi_system_unregister(); + scmi_powercap_unregister(); scmi_bus_exit(); @@ -2112,7 +2690,7 @@ static void __exit scmi_driver_exit(void) } module_exit(scmi_driver_exit); -MODULE_ALIAS("platform: arm-scmi"); +MODULE_ALIAS("platform:arm-scmi"); MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>"); MODULE_DESCRIPTION("ARM SCMI protocol driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c index e09eb12bf421..1e40cb035044 100644 --- a/drivers/firmware/arm_scmi/mailbox.c +++ b/drivers/firmware/arm_scmi/mailbox.c @@ -36,7 +36,7 @@ static void tx_prepare(struct mbox_client *cl, void *m) { struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl); - shmem_tx_prepare(smbox->shmem, m); + shmem_tx_prepare(smbox->shmem, m, smbox->cinfo); } static void rx_callback(struct mbox_client *cl, void *m) @@ -140,7 +140,8 @@ static int mailbox_send_message(struct scmi_chan_info *cinfo, return ret; } -static void mailbox_mark_txdone(struct scmi_chan_info *cinfo, int ret) +static void mailbox_mark_txdone(struct scmi_chan_info *cinfo, int ret, + struct scmi_xfer *__unused) { struct scmi_mailbox *smbox = cinfo->transport_info; diff --git a/drivers/firmware/arm_scmi/optee.c b/drivers/firmware/arm_scmi/optee.c new file mode 100644 index 000000000000..2a7aeab40e54 --- /dev/null +++ b/drivers/firmware/arm_scmi/optee.c @@ -0,0 +1,645 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019-2021 Linaro Ltd. + */ + +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/slab.h> +#include <linux/tee_drv.h> +#include <linux/uuid.h> +#include <uapi/linux/tee.h> + +#include "common.h" + +#define SCMI_OPTEE_MAX_MSG_SIZE 128 + +enum scmi_optee_pta_cmd { + /* + * PTA_SCMI_CMD_CAPABILITIES - Get channel capabilities + * + * [out] value[0].a: Capability bit mask (enum pta_scmi_caps) + * [out] value[0].b: Extended capabilities or 0 + */ + PTA_SCMI_CMD_CAPABILITIES = 0, + + /* + * PTA_SCMI_CMD_PROCESS_SMT_CHANNEL - Process SCMI message in SMT buffer + * + * [in] value[0].a: Channel handle + * + * Shared memory used for SCMI message/response exhange is expected + * already identified and bound to channel handle in both SCMI agent + * and SCMI server (OP-TEE) parts. + * The memory uses SMT header to carry SCMI meta-data (protocol ID and + * protocol message ID). + */ + PTA_SCMI_CMD_PROCESS_SMT_CHANNEL = 1, + + /* + * PTA_SCMI_CMD_PROCESS_SMT_CHANNEL_MESSAGE - Process SMT/SCMI message + * + * [in] value[0].a: Channel handle + * [in/out] memref[1]: Message/response buffer (SMT and SCMI payload) + * + * Shared memory used for SCMI message/response is a SMT buffer + * referenced by param[1]. It shall be 128 bytes large to fit response + * payload whatever message playload size. + * The memory uses SMT header to carry SCMI meta-data (protocol ID and + * protocol message ID). + */ + PTA_SCMI_CMD_PROCESS_SMT_CHANNEL_MESSAGE = 2, + + /* + * PTA_SCMI_CMD_GET_CHANNEL - Get channel handle + * + * SCMI shm information are 0 if agent expects to use OP-TEE regular SHM + * + * [in] value[0].a: Channel identifier + * [out] value[0].a: Returned channel handle + * [in] value[0].b: Requested capabilities mask (enum pta_scmi_caps) + */ + PTA_SCMI_CMD_GET_CHANNEL = 3, + + /* + * PTA_SCMI_CMD_PROCESS_MSG_CHANNEL - Process SCMI message in a MSG + * buffer pointed by memref parameters + * + * [in] value[0].a: Channel handle + * [in] memref[1]: Message buffer (MSG and SCMI payload) + * [out] memref[2]: Response buffer (MSG and SCMI payload) + * + * Shared memories used for SCMI message/response are MSG buffers + * referenced by param[1] and param[2]. MSG transport protocol + * uses a 32bit header to carry SCMI meta-data (protocol ID and + * protocol message ID) followed by the effective SCMI message + * payload. + */ + PTA_SCMI_CMD_PROCESS_MSG_CHANNEL = 4, +}; + +/* + * OP-TEE SCMI service capabilities bit flags (32bit) + * + * PTA_SCMI_CAPS_SMT_HEADER + * When set, OP-TEE supports command using SMT header protocol (SCMI shmem) in + * shared memory buffers to carry SCMI protocol synchronisation information. + * + * PTA_SCMI_CAPS_MSG_HEADER + * When set, OP-TEE supports command using MSG header protocol in an OP-TEE + * shared memory to carry SCMI protocol synchronisation information and SCMI + * message payload. + */ +#define PTA_SCMI_CAPS_NONE 0 +#define PTA_SCMI_CAPS_SMT_HEADER BIT(0) +#define PTA_SCMI_CAPS_MSG_HEADER BIT(1) +#define PTA_SCMI_CAPS_MASK (PTA_SCMI_CAPS_SMT_HEADER | \ + PTA_SCMI_CAPS_MSG_HEADER) + +/** + * struct scmi_optee_channel - Description of an OP-TEE SCMI channel + * + * @channel_id: OP-TEE channel ID used for this transport + * @tee_session: TEE session identifier + * @caps: OP-TEE SCMI channel capabilities + * @rx_len: Response size + * @mu: Mutex protection on channel access + * @cinfo: SCMI channel information + * @shmem: Virtual base address of the shared memory + * @req: Shared memory protocol handle for SCMI request and synchronous response + * @tee_shm: TEE shared memory handle @req or NULL if using IOMEM shmem + * @link: Reference in agent's channel list + */ +struct scmi_optee_channel { + u32 channel_id; + u32 tee_session; + u32 caps; + u32 rx_len; + struct mutex mu; + struct scmi_chan_info *cinfo; + union { + struct scmi_shared_mem __iomem *shmem; + struct scmi_msg_payld *msg; + } req; + struct tee_shm *tee_shm; + struct list_head link; +}; + +/** + * struct scmi_optee_agent - OP-TEE transport private data + * + * @dev: Device used for communication with TEE + * @tee_ctx: TEE context used for communication + * @caps: Supported channel capabilities + * @mu: Mutex for protection of @channel_list + * @channel_list: List of all created channels for the agent + */ +struct scmi_optee_agent { + struct device *dev; + struct tee_context *tee_ctx; + u32 caps; + struct mutex mu; + struct list_head channel_list; +}; + +/* There can be only 1 SCMI service in OP-TEE we connect to */ +static struct scmi_optee_agent *scmi_optee_private; + +/* Forward reference to scmi_optee transport initialization */ +static int scmi_optee_init(void); + +/* Open a session toward SCMI OP-TEE service with REE_KERNEL identity */ +static int open_session(struct scmi_optee_agent *agent, u32 *tee_session) +{ + struct device *dev = agent->dev; + struct tee_client_device *scmi_pta = to_tee_client_device(dev); + struct tee_ioctl_open_session_arg arg = { }; + int ret; + + memcpy(arg.uuid, scmi_pta->id.uuid.b, TEE_IOCTL_UUID_LEN); + arg.clnt_login = TEE_IOCTL_LOGIN_REE_KERNEL; + + ret = tee_client_open_session(agent->tee_ctx, &arg, NULL); + if (ret < 0 || arg.ret) { + dev_err(dev, "Can't open tee session: %d / %#x\n", ret, arg.ret); + return -EOPNOTSUPP; + } + + *tee_session = arg.session; + + return 0; +} + +static void close_session(struct scmi_optee_agent *agent, u32 tee_session) +{ + tee_client_close_session(agent->tee_ctx, tee_session); +} + +static int get_capabilities(struct scmi_optee_agent *agent) +{ + struct tee_ioctl_invoke_arg arg = { }; + struct tee_param param[1] = { }; + u32 caps; + u32 tee_session; + int ret; + + ret = open_session(agent, &tee_session); + if (ret) + return ret; + + arg.func = PTA_SCMI_CMD_CAPABILITIES; + arg.session = tee_session; + arg.num_params = 1; + + param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT; + + ret = tee_client_invoke_func(agent->tee_ctx, &arg, param); + + close_session(agent, tee_session); + + if (ret < 0 || arg.ret) { + dev_err(agent->dev, "Can't get capabilities: %d / %#x\n", ret, arg.ret); + return -EOPNOTSUPP; + } + + caps = param[0].u.value.a; + + if (!(caps & (PTA_SCMI_CAPS_SMT_HEADER | PTA_SCMI_CAPS_MSG_HEADER))) { + dev_err(agent->dev, "OP-TEE SCMI PTA doesn't support SMT and MSG\n"); + return -EOPNOTSUPP; + } + + agent->caps = caps; + + return 0; +} + +static int get_channel(struct scmi_optee_channel *channel) +{ + struct device *dev = scmi_optee_private->dev; + struct tee_ioctl_invoke_arg arg = { }; + struct tee_param param[1] = { }; + unsigned int caps = 0; + int ret; + + if (channel->tee_shm) + caps = PTA_SCMI_CAPS_MSG_HEADER; + else + caps = PTA_SCMI_CAPS_SMT_HEADER; + + arg.func = PTA_SCMI_CMD_GET_CHANNEL; + arg.session = channel->tee_session; + arg.num_params = 1; + + param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT; + param[0].u.value.a = channel->channel_id; + param[0].u.value.b = caps; + + ret = tee_client_invoke_func(scmi_optee_private->tee_ctx, &arg, param); + + if (ret || arg.ret) { + dev_err(dev, "Can't get channel with caps %#x: %d / %#x\n", caps, ret, arg.ret); + return -EOPNOTSUPP; + } + + /* From now on use channel identifer provided by OP-TEE SCMI service */ + channel->channel_id = param[0].u.value.a; + channel->caps = caps; + + return 0; +} + +static int invoke_process_smt_channel(struct scmi_optee_channel *channel) +{ + struct tee_ioctl_invoke_arg arg = { + .func = PTA_SCMI_CMD_PROCESS_SMT_CHANNEL, + .session = channel->tee_session, + .num_params = 1, + }; + struct tee_param param[1] = { }; + int ret; + + param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT; + param[0].u.value.a = channel->channel_id; + + ret = tee_client_invoke_func(scmi_optee_private->tee_ctx, &arg, param); + if (ret < 0 || arg.ret) { + dev_err(scmi_optee_private->dev, "Can't invoke channel %u: %d / %#x\n", + channel->channel_id, ret, arg.ret); + return -EIO; + } + + return 0; +} + +static int invoke_process_msg_channel(struct scmi_optee_channel *channel, size_t msg_size) +{ + struct tee_ioctl_invoke_arg arg = { + .func = PTA_SCMI_CMD_PROCESS_MSG_CHANNEL, + .session = channel->tee_session, + .num_params = 3, + }; + struct tee_param param[3] = { }; + int ret; + + param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT; + param[0].u.value.a = channel->channel_id; + + param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT; + param[1].u.memref.shm = channel->tee_shm; + param[1].u.memref.size = msg_size; + + param[2].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT; + param[2].u.memref.shm = channel->tee_shm; + param[2].u.memref.size = SCMI_OPTEE_MAX_MSG_SIZE; + + ret = tee_client_invoke_func(scmi_optee_private->tee_ctx, &arg, param); + if (ret < 0 || arg.ret) { + dev_err(scmi_optee_private->dev, "Can't invoke channel %u: %d / %#x\n", + channel->channel_id, ret, arg.ret); + return -EIO; + } + + /* Save response size */ + channel->rx_len = param[2].u.memref.size; + + return 0; +} + +static int scmi_optee_link_supplier(struct device *dev) +{ + if (!scmi_optee_private) { + if (scmi_optee_init()) + dev_dbg(dev, "Optee bus not yet ready\n"); + + /* Wait for optee bus */ + return -EPROBE_DEFER; + } + + if (!device_link_add(dev, scmi_optee_private->dev, DL_FLAG_AUTOREMOVE_CONSUMER)) { + dev_err(dev, "Adding link to supplier optee device failed\n"); + return -ECANCELED; + } + + return 0; +} + +static bool scmi_optee_chan_available(struct device *dev, int idx) +{ + u32 channel_id; + + return !of_property_read_u32_index(dev->of_node, "linaro,optee-channel-id", + idx, &channel_id); +} + +static void scmi_optee_clear_channel(struct scmi_chan_info *cinfo) +{ + struct scmi_optee_channel *channel = cinfo->transport_info; + + if (!channel->tee_shm) + shmem_clear_channel(channel->req.shmem); +} + +static int setup_dynamic_shmem(struct device *dev, struct scmi_optee_channel *channel) +{ + const size_t msg_size = SCMI_OPTEE_MAX_MSG_SIZE; + void *shbuf; + + channel->tee_shm = tee_shm_alloc_kernel_buf(scmi_optee_private->tee_ctx, msg_size); + if (IS_ERR(channel->tee_shm)) { + dev_err(channel->cinfo->dev, "shmem allocation failed\n"); + return -ENOMEM; + } + + shbuf = tee_shm_get_va(channel->tee_shm, 0); + memset(shbuf, 0, msg_size); + channel->req.msg = shbuf; + channel->rx_len = msg_size; + + return 0; +} + +static int setup_static_shmem(struct device *dev, struct scmi_chan_info *cinfo, + struct scmi_optee_channel *channel) +{ + struct device_node *np; + resource_size_t size; + struct resource res; + int ret; + + np = of_parse_phandle(cinfo->dev->of_node, "shmem", 0); + if (!of_device_is_compatible(np, "arm,scmi-shmem")) { + ret = -ENXIO; + goto out; + } + + ret = of_address_to_resource(np, 0, &res); + if (ret) { + dev_err(dev, "Failed to get SCMI Tx shared memory\n"); + goto out; + } + + size = resource_size(&res); + + channel->req.shmem = devm_ioremap(dev, res.start, size); + if (!channel->req.shmem) { + dev_err(dev, "Failed to ioremap SCMI Tx shared memory\n"); + ret = -EADDRNOTAVAIL; + goto out; + } + + ret = 0; + +out: + of_node_put(np); + + return ret; +} + +static int setup_shmem(struct device *dev, struct scmi_chan_info *cinfo, + struct scmi_optee_channel *channel) +{ + if (of_find_property(cinfo->dev->of_node, "shmem", NULL)) + return setup_static_shmem(dev, cinfo, channel); + else + return setup_dynamic_shmem(dev, channel); +} + +static int scmi_optee_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, bool tx) +{ + struct scmi_optee_channel *channel; + uint32_t channel_id; + int ret; + + if (!tx) + return -ENODEV; + + channel = devm_kzalloc(dev, sizeof(*channel), GFP_KERNEL); + if (!channel) + return -ENOMEM; + + ret = of_property_read_u32_index(cinfo->dev->of_node, "linaro,optee-channel-id", + 0, &channel_id); + if (ret) + return ret; + + cinfo->transport_info = channel; + channel->cinfo = cinfo; + channel->channel_id = channel_id; + mutex_init(&channel->mu); + + ret = setup_shmem(dev, cinfo, channel); + if (ret) + return ret; + + ret = open_session(scmi_optee_private, &channel->tee_session); + if (ret) + goto err_free_shm; + + ret = get_channel(channel); + if (ret) + goto err_close_sess; + + /* Enable polling */ + cinfo->no_completion_irq = true; + + mutex_lock(&scmi_optee_private->mu); + list_add(&channel->link, &scmi_optee_private->channel_list); + mutex_unlock(&scmi_optee_private->mu); + + return 0; + +err_close_sess: + close_session(scmi_optee_private, channel->tee_session); +err_free_shm: + if (channel->tee_shm) + tee_shm_free(channel->tee_shm); + + return ret; +} + +static int scmi_optee_chan_free(int id, void *p, void *data) +{ + struct scmi_chan_info *cinfo = p; + struct scmi_optee_channel *channel = cinfo->transport_info; + + mutex_lock(&scmi_optee_private->mu); + list_del(&channel->link); + mutex_unlock(&scmi_optee_private->mu); + + close_session(scmi_optee_private, channel->tee_session); + + if (channel->tee_shm) { + tee_shm_free(channel->tee_shm); + channel->tee_shm = NULL; + } + + cinfo->transport_info = NULL; + channel->cinfo = NULL; + + scmi_free_channel(cinfo, data, id); + + return 0; +} + +static int scmi_optee_send_message(struct scmi_chan_info *cinfo, + struct scmi_xfer *xfer) +{ + struct scmi_optee_channel *channel = cinfo->transport_info; + int ret; + + mutex_lock(&channel->mu); + + if (channel->tee_shm) { + msg_tx_prepare(channel->req.msg, xfer); + ret = invoke_process_msg_channel(channel, msg_command_size(xfer)); + } else { + shmem_tx_prepare(channel->req.shmem, xfer, cinfo); + ret = invoke_process_smt_channel(channel); + } + + if (ret) + mutex_unlock(&channel->mu); + + return ret; +} + +static void scmi_optee_fetch_response(struct scmi_chan_info *cinfo, + struct scmi_xfer *xfer) +{ + struct scmi_optee_channel *channel = cinfo->transport_info; + + if (channel->tee_shm) + msg_fetch_response(channel->req.msg, channel->rx_len, xfer); + else + shmem_fetch_response(channel->req.shmem, xfer); +} + +static void scmi_optee_mark_txdone(struct scmi_chan_info *cinfo, int ret, + struct scmi_xfer *__unused) +{ + struct scmi_optee_channel *channel = cinfo->transport_info; + + mutex_unlock(&channel->mu); +} + +static struct scmi_transport_ops scmi_optee_ops = { + .link_supplier = scmi_optee_link_supplier, + .chan_available = scmi_optee_chan_available, + .chan_setup = scmi_optee_chan_setup, + .chan_free = scmi_optee_chan_free, + .send_message = scmi_optee_send_message, + .mark_txdone = scmi_optee_mark_txdone, + .fetch_response = scmi_optee_fetch_response, + .clear_channel = scmi_optee_clear_channel, +}; + +static int scmi_optee_ctx_match(struct tee_ioctl_version_data *ver, const void *data) +{ + return ver->impl_id == TEE_IMPL_ID_OPTEE; +} + +static int scmi_optee_service_probe(struct device *dev) +{ + struct scmi_optee_agent *agent; + struct tee_context *tee_ctx; + int ret; + + /* Only one SCMI OP-TEE device allowed */ + if (scmi_optee_private) { + dev_err(dev, "An SCMI OP-TEE device was already initialized: only one allowed\n"); + return -EBUSY; + } + + tee_ctx = tee_client_open_context(NULL, scmi_optee_ctx_match, NULL, NULL); + if (IS_ERR(tee_ctx)) + return -ENODEV; + + agent = devm_kzalloc(dev, sizeof(*agent), GFP_KERNEL); + if (!agent) { + ret = -ENOMEM; + goto err; + } + + agent->dev = dev; + agent->tee_ctx = tee_ctx; + INIT_LIST_HEAD(&agent->channel_list); + mutex_init(&agent->mu); + + ret = get_capabilities(agent); + if (ret) + goto err; + + /* Ensure agent resources are all visible before scmi_optee_private is */ + smp_mb(); + scmi_optee_private = agent; + + return 0; + +err: + tee_client_close_context(tee_ctx); + + return ret; +} + +static int scmi_optee_service_remove(struct device *dev) +{ + struct scmi_optee_agent *agent = scmi_optee_private; + + if (!scmi_optee_private) + return -EINVAL; + + if (!list_empty(&scmi_optee_private->channel_list)) + return -EBUSY; + + /* Ensure cleared reference is visible before resources are released */ + smp_store_mb(scmi_optee_private, NULL); + + tee_client_close_context(agent->tee_ctx); + + return 0; +} + +static const struct tee_client_device_id scmi_optee_service_id[] = { + { + UUID_INIT(0xa8cfe406, 0xd4f5, 0x4a2e, + 0x9f, 0x8d, 0xa2, 0x5d, 0xc7, 0x54, 0xc0, 0x99) + }, + { } +}; + +MODULE_DEVICE_TABLE(tee, scmi_optee_service_id); + +static struct tee_client_driver scmi_optee_driver = { + .id_table = scmi_optee_service_id, + .driver = { + .name = "scmi-optee", + .bus = &tee_bus_type, + .probe = scmi_optee_service_probe, + .remove = scmi_optee_service_remove, + }, +}; + +static int scmi_optee_init(void) +{ + return driver_register(&scmi_optee_driver.driver); +} + +static void scmi_optee_exit(void) +{ + if (scmi_optee_private) + driver_unregister(&scmi_optee_driver.driver); +} + +const struct scmi_desc scmi_optee_desc = { + .transport_exit = scmi_optee_exit, + .ops = &scmi_optee_ops, + .max_rx_timeout_ms = 30, + .max_msg = 20, + .max_msg_size = SCMI_OPTEE_MAX_MSG_SIZE, + .sync_cmds_completed_on_ret = true, +}; diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c index f4cd5193b961..ecf5c4de851b 100644 --- a/drivers/firmware/arm_scmi/perf.c +++ b/drivers/firmware/arm_scmi/perf.c @@ -2,7 +2,7 @@ /* * System Control and Management Interface (SCMI) Performance Protocol * - * Copyright (C) 2018-2021 ARM Ltd. + * Copyright (C) 2018-2022 ARM Ltd. */ #define pr_fmt(fmt) "SCMI Notifications PERF - " fmt @@ -10,16 +10,19 @@ #include <linux/bits.h> #include <linux/of.h> #include <linux/io.h> -#include <linux/io-64-nonatomic-hi-lo.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/pm_opp.h> #include <linux/scmi_protocol.h> #include <linux/sort.h> -#include "common.h" +#include <trace/events/scmi.h> + +#include "protocols.h" #include "notify.h" +#define MAX_OPPS 16 + enum scmi_performance_protocol_cmd { PERF_DOMAIN_ATTRIBUTES = 0x3, PERF_DESCRIBE_LEVELS = 0x4, @@ -30,6 +33,13 @@ enum scmi_performance_protocol_cmd { PERF_NOTIFY_LIMITS = 0x9, PERF_NOTIFY_LEVEL = 0xa, PERF_DESCRIBE_FASTCHANNEL = 0xb, + PERF_DOMAIN_NAME_GET = 0xc, +}; + +enum { + PERF_FC_LEVEL, + PERF_FC_LIMIT, + PERF_FC_MAX, }; struct scmi_opp { @@ -42,6 +52,7 @@ struct scmi_msg_resp_perf_attributes { __le16 num_domains; __le16 flags; #define POWER_SCALE_IN_MILLIWATT(x) ((x) & BIT(0)) +#define POWER_SCALE_IN_MICROWATT(x) ((x) & BIT(1)) __le32 stats_addr_low; __le32 stats_addr_high; __le32 stats_size; @@ -54,10 +65,11 @@ struct scmi_msg_resp_perf_domain_attributes { #define SUPPORTS_PERF_LIMIT_NOTIFY(x) ((x) & BIT(29)) #define SUPPORTS_PERF_LEVEL_NOTIFY(x) ((x) & BIT(28)) #define SUPPORTS_PERF_FASTCHANNELS(x) ((x) & BIT(27)) +#define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(26)) __le32 rate_limit_us; __le32 sustained_freq_khz; __le32 sustained_perf_level; - u8 name[SCMI_MAX_STR_SIZE]; + u8 name[SCMI_SHORT_NAME_MAX_SIZE]; }; struct scmi_msg_perf_describe_levels { @@ -110,43 +122,6 @@ struct scmi_msg_resp_perf_describe_levels { } opp[]; }; -struct scmi_perf_get_fc_info { - __le32 domain; - __le32 message_id; -}; - -struct scmi_msg_resp_perf_desc_fc { - __le32 attr; -#define SUPPORTS_DOORBELL(x) ((x) & BIT(0)) -#define DOORBELL_REG_WIDTH(x) FIELD_GET(GENMASK(2, 1), (x)) - __le32 rate_limit; - __le32 chan_addr_low; - __le32 chan_addr_high; - __le32 chan_size; - __le32 db_addr_low; - __le32 db_addr_high; - __le32 db_set_lmask; - __le32 db_set_hmask; - __le32 db_preserve_lmask; - __le32 db_preserve_hmask; -}; - -struct scmi_fc_db_info { - int width; - u64 set; - u64 mask; - void __iomem *addr; -}; - -struct scmi_fc_info { - void __iomem *level_set_addr; - void __iomem *limit_set_addr; - void __iomem *level_get_addr; - void __iomem *limit_get_addr; - struct scmi_fc_db_info *level_set_db; - struct scmi_fc_db_info *limit_set_db; -}; - struct perf_dom_info { bool set_limits; bool set_perf; @@ -165,7 +140,7 @@ struct perf_dom_info { struct scmi_perf_info { u32 version; int num_domains; - bool power_scale_mw; + enum scmi_power_scale power_scale; u64 stats_addr; u32 stats_size; struct perf_dom_info *dom_info; @@ -195,7 +170,13 @@ static int scmi_perf_attributes_get(const struct scmi_protocol_handle *ph, u16 flags = le16_to_cpu(attr->flags); pi->num_domains = le16_to_cpu(attr->num_domains); - pi->power_scale_mw = POWER_SCALE_IN_MILLIWATT(flags); + + if (POWER_SCALE_IN_MILLIWATT(flags)) + pi->power_scale = SCMI_POWER_MILLIWATTS; + if (PROTOCOL_REV_MAJOR(pi->version) >= 0x3) + if (POWER_SCALE_IN_MICROWATT(flags)) + pi->power_scale = SCMI_POWER_MICROWATTS; + pi->stats_addr = le32_to_cpu(attr->stats_addr_low) | (u64)le32_to_cpu(attr->stats_addr_high) << 32; pi->stats_size = le32_to_cpu(attr->stats_size); @@ -207,9 +188,11 @@ static int scmi_perf_attributes_get(const struct scmi_protocol_handle *ph, static int scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph, - u32 domain, struct perf_dom_info *dom_info) + u32 domain, struct perf_dom_info *dom_info, + u32 version) { int ret; + u32 flags; struct scmi_xfer *t; struct scmi_msg_resp_perf_domain_attributes *attr; @@ -223,7 +206,7 @@ scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph, ret = ph->xops->do_xfer(ph, t); if (!ret) { - u32 flags = le32_to_cpu(attr->flags); + flags = le32_to_cpu(attr->flags); dom_info->set_limits = SUPPORTS_SET_LIMITS(flags); dom_info->set_perf = SUPPORTS_SET_PERF_LVL(flags); @@ -242,10 +225,20 @@ scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph, dom_info->mult_factor = (dom_info->sustained_freq_khz * 1000) / dom_info->sustained_perf_level; - strlcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE); + strscpy(dom_info->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE); } ph->xops->xfer_put(ph, t); + + /* + * If supported overwrite short name with the extended one; + * on error just carry on and use already provided short name. + */ + if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x3 && + SUPPORTS_EXTENDED_NAMES(flags)) + ph->hops->extended_name_get(ph, PERF_DOMAIN_NAME_GET, domain, + dom_info->name, SCMI_MAX_STR_SIZE); + return ret; } @@ -256,101 +249,88 @@ static int opp_cmp_func(const void *opp1, const void *opp2) return t1->perf - t2->perf; } -static int -scmi_perf_describe_levels_get(const struct scmi_protocol_handle *ph, u32 domain, - struct perf_dom_info *perf_dom) -{ - int ret, cnt; - u32 tot_opp_cnt = 0; - u16 num_returned, num_remaining; - struct scmi_xfer *t; - struct scmi_opp *opp; - struct scmi_msg_perf_describe_levels *dom_info; - struct scmi_msg_resp_perf_describe_levels *level_info; - - ret = ph->xops->xfer_get_init(ph, PERF_DESCRIBE_LEVELS, - sizeof(*dom_info), 0, &t); - if (ret) - return ret; - - dom_info = t->tx.buf; - level_info = t->rx.buf; +struct scmi_perf_ipriv { + u32 domain; + struct perf_dom_info *perf_dom; +}; - do { - dom_info->domain = cpu_to_le32(domain); - /* Set the number of OPPs to be skipped/already read */ - dom_info->level_index = cpu_to_le32(tot_opp_cnt); +static void iter_perf_levels_prepare_message(void *message, + unsigned int desc_index, + const void *priv) +{ + struct scmi_msg_perf_describe_levels *msg = message; + const struct scmi_perf_ipriv *p = priv; - ret = ph->xops->do_xfer(ph, t); - if (ret) - break; + msg->domain = cpu_to_le32(p->domain); + /* Set the number of OPPs to be skipped/already read */ + msg->level_index = cpu_to_le32(desc_index); +} - num_returned = le16_to_cpu(level_info->num_returned); - num_remaining = le16_to_cpu(level_info->num_remaining); - if (tot_opp_cnt + num_returned > MAX_OPPS) { - dev_err(ph->dev, "No. of OPPs exceeded MAX_OPPS"); - break; - } +static int iter_perf_levels_update_state(struct scmi_iterator_state *st, + const void *response, void *priv) +{ + const struct scmi_msg_resp_perf_describe_levels *r = response; - opp = &perf_dom->opp[tot_opp_cnt]; - for (cnt = 0; cnt < num_returned; cnt++, opp++) { - opp->perf = le32_to_cpu(level_info->opp[cnt].perf_val); - opp->power = le32_to_cpu(level_info->opp[cnt].power); - opp->trans_latency_us = le16_to_cpu - (level_info->opp[cnt].transition_latency_us); + st->num_returned = le16_to_cpu(r->num_returned); + st->num_remaining = le16_to_cpu(r->num_remaining); - dev_dbg(ph->dev, "Level %d Power %d Latency %dus\n", - opp->perf, opp->power, opp->trans_latency_us); - } + return 0; +} - tot_opp_cnt += num_returned; +static int +iter_perf_levels_process_response(const struct scmi_protocol_handle *ph, + const void *response, + struct scmi_iterator_state *st, void *priv) +{ + struct scmi_opp *opp; + const struct scmi_msg_resp_perf_describe_levels *r = response; + struct scmi_perf_ipriv *p = priv; - ph->xops->reset_rx_to_maxsz(ph, t); - /* - * check for both returned and remaining to avoid infinite - * loop due to buggy firmware - */ - } while (num_returned && num_remaining); + opp = &p->perf_dom->opp[st->desc_index + st->loop_idx]; + opp->perf = le32_to_cpu(r->opp[st->loop_idx].perf_val); + opp->power = le32_to_cpu(r->opp[st->loop_idx].power); + opp->trans_latency_us = + le16_to_cpu(r->opp[st->loop_idx].transition_latency_us); + p->perf_dom->opp_count++; - perf_dom->opp_count = tot_opp_cnt; - ph->xops->xfer_put(ph, t); + dev_dbg(ph->dev, "Level %d Power %d Latency %dus\n", + opp->perf, opp->power, opp->trans_latency_us); - sort(perf_dom->opp, tot_opp_cnt, sizeof(*opp), opp_cmp_func, NULL); - return ret; + return 0; } -#define SCMI_PERF_FC_RING_DB(w) \ -do { \ - u##w val = 0; \ - \ - if (db->mask) \ - val = ioread##w(db->addr) & db->mask; \ - iowrite##w((u##w)db->set | val, db->addr); \ -} while (0) - -static void scmi_perf_fc_ring_db(struct scmi_fc_db_info *db) +static int +scmi_perf_describe_levels_get(const struct scmi_protocol_handle *ph, u32 domain, + struct perf_dom_info *perf_dom) { - if (!db || !db->addr) - return; + int ret; + void *iter; + struct scmi_iterator_ops ops = { + .prepare_message = iter_perf_levels_prepare_message, + .update_state = iter_perf_levels_update_state, + .process_response = iter_perf_levels_process_response, + }; + struct scmi_perf_ipriv ppriv = { + .domain = domain, + .perf_dom = perf_dom, + }; + + iter = ph->hops->iter_response_init(ph, &ops, MAX_OPPS, + PERF_DESCRIBE_LEVELS, + sizeof(struct scmi_msg_perf_describe_levels), + &ppriv); + if (IS_ERR(iter)) + return PTR_ERR(iter); + + ret = ph->hops->iter_response_run(iter); + if (ret) + return ret; - if (db->width == 1) - SCMI_PERF_FC_RING_DB(8); - else if (db->width == 2) - SCMI_PERF_FC_RING_DB(16); - else if (db->width == 4) - SCMI_PERF_FC_RING_DB(32); - else /* db->width == 8 */ -#ifdef CONFIG_64BIT - SCMI_PERF_FC_RING_DB(64); -#else - { - u64 val = 0; + if (perf_dom->opp_count) + sort(perf_dom->opp, perf_dom->opp_count, + sizeof(struct scmi_opp), opp_cmp_func, NULL); - if (db->mask) - val = ioread64_hi_lo(db->addr) & db->mask; - iowrite64_hi_lo(db->set | val, db->addr); - } -#endif + return ret; } static int scmi_perf_mb_limits_set(const struct scmi_protocol_handle *ph, @@ -382,10 +362,17 @@ static int scmi_perf_limits_set(const struct scmi_protocol_handle *ph, struct scmi_perf_info *pi = ph->get_priv(ph); struct perf_dom_info *dom = pi->dom_info + domain; - if (dom->fc_info && dom->fc_info->limit_set_addr) { - iowrite32(max_perf, dom->fc_info->limit_set_addr); - iowrite32(min_perf, dom->fc_info->limit_set_addr + 4); - scmi_perf_fc_ring_db(dom->fc_info->limit_set_db); + if (PROTOCOL_REV_MAJOR(pi->version) >= 0x3 && !max_perf && !min_perf) + return -EINVAL; + + if (dom->fc_info && dom->fc_info[PERF_FC_LIMIT].set_addr) { + struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LIMIT]; + + trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LIMITS_SET, + domain, min_perf, max_perf); + iowrite32(max_perf, fci->set_addr); + iowrite32(min_perf, fci->set_addr + 4); + ph->hops->fastchannel_db_ring(fci->set_db); return 0; } @@ -424,9 +411,13 @@ static int scmi_perf_limits_get(const struct scmi_protocol_handle *ph, struct scmi_perf_info *pi = ph->get_priv(ph); struct perf_dom_info *dom = pi->dom_info + domain; - if (dom->fc_info && dom->fc_info->limit_get_addr) { - *max_perf = ioread32(dom->fc_info->limit_get_addr); - *min_perf = ioread32(dom->fc_info->limit_get_addr + 4); + if (dom->fc_info && dom->fc_info[PERF_FC_LIMIT].get_addr) { + struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LIMIT]; + + *max_perf = ioread32(fci->get_addr); + *min_perf = ioread32(fci->get_addr + 4); + trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LIMITS_GET, + domain, *min_perf, *max_perf); return 0; } @@ -461,9 +452,13 @@ static int scmi_perf_level_set(const struct scmi_protocol_handle *ph, struct scmi_perf_info *pi = ph->get_priv(ph); struct perf_dom_info *dom = pi->dom_info + domain; - if (dom->fc_info && dom->fc_info->level_set_addr) { - iowrite32(level, dom->fc_info->level_set_addr); - scmi_perf_fc_ring_db(dom->fc_info->level_set_db); + if (dom->fc_info && dom->fc_info[PERF_FC_LEVEL].set_addr) { + struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LEVEL]; + + trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LEVEL_SET, + domain, level, 0); + iowrite32(level, fci->set_addr); + ph->hops->fastchannel_db_ring(fci->set_db); return 0; } @@ -498,8 +493,10 @@ static int scmi_perf_level_get(const struct scmi_protocol_handle *ph, struct scmi_perf_info *pi = ph->get_priv(ph); struct perf_dom_info *dom = pi->dom_info + domain; - if (dom->fc_info && dom->fc_info->level_get_addr) { - *level = ioread32(dom->fc_info->level_get_addr); + if (dom->fc_info && dom->fc_info[PERF_FC_LEVEL].get_addr) { + *level = ioread32(dom->fc_info[PERF_FC_LEVEL].get_addr); + trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LEVEL_GET, + domain, *level, 0); return 0; } @@ -528,100 +525,33 @@ static int scmi_perf_level_limits_notify(const struct scmi_protocol_handle *ph, return ret; } -static bool scmi_perf_fc_size_is_valid(u32 msg, u32 size) -{ - if ((msg == PERF_LEVEL_GET || msg == PERF_LEVEL_SET) && size == 4) - return true; - if ((msg == PERF_LIMITS_GET || msg == PERF_LIMITS_SET) && size == 8) - return true; - return false; -} - -static void -scmi_perf_domain_desc_fc(const struct scmi_protocol_handle *ph, u32 domain, - u32 message_id, void __iomem **p_addr, - struct scmi_fc_db_info **p_db) -{ - int ret; - u32 flags; - u64 phys_addr; - u8 size; - void __iomem *addr; - struct scmi_xfer *t; - struct scmi_fc_db_info *db; - struct scmi_perf_get_fc_info *info; - struct scmi_msg_resp_perf_desc_fc *resp; - - if (!p_addr) - return; - - ret = ph->xops->xfer_get_init(ph, PERF_DESCRIBE_FASTCHANNEL, - sizeof(*info), sizeof(*resp), &t); - if (ret) - return; - - info = t->tx.buf; - info->domain = cpu_to_le32(domain); - info->message_id = cpu_to_le32(message_id); - - ret = ph->xops->do_xfer(ph, t); - if (ret) - goto err_xfer; - - resp = t->rx.buf; - flags = le32_to_cpu(resp->attr); - size = le32_to_cpu(resp->chan_size); - if (!scmi_perf_fc_size_is_valid(message_id, size)) - goto err_xfer; - - phys_addr = le32_to_cpu(resp->chan_addr_low); - phys_addr |= (u64)le32_to_cpu(resp->chan_addr_high) << 32; - addr = devm_ioremap(ph->dev, phys_addr, size); - if (!addr) - goto err_xfer; - *p_addr = addr; - - if (p_db && SUPPORTS_DOORBELL(flags)) { - db = devm_kzalloc(ph->dev, sizeof(*db), GFP_KERNEL); - if (!db) - goto err_xfer; - - size = 1 << DOORBELL_REG_WIDTH(flags); - phys_addr = le32_to_cpu(resp->db_addr_low); - phys_addr |= (u64)le32_to_cpu(resp->db_addr_high) << 32; - addr = devm_ioremap(ph->dev, phys_addr, size); - if (!addr) - goto err_xfer; - - db->addr = addr; - db->width = size; - db->set = le32_to_cpu(resp->db_set_lmask); - db->set |= (u64)le32_to_cpu(resp->db_set_hmask) << 32; - db->mask = le32_to_cpu(resp->db_preserve_lmask); - db->mask |= (u64)le32_to_cpu(resp->db_preserve_hmask) << 32; - *p_db = db; - } -err_xfer: - ph->xops->xfer_put(ph, t); -} - static void scmi_perf_domain_init_fc(const struct scmi_protocol_handle *ph, u32 domain, struct scmi_fc_info **p_fc) { struct scmi_fc_info *fc; - fc = devm_kzalloc(ph->dev, sizeof(*fc), GFP_KERNEL); + fc = devm_kcalloc(ph->dev, PERF_FC_MAX, sizeof(*fc), GFP_KERNEL); if (!fc) return; - scmi_perf_domain_desc_fc(ph, domain, PERF_LEVEL_SET, - &fc->level_set_addr, &fc->level_set_db); - scmi_perf_domain_desc_fc(ph, domain, PERF_LEVEL_GET, - &fc->level_get_addr, NULL); - scmi_perf_domain_desc_fc(ph, domain, PERF_LIMITS_SET, - &fc->limit_set_addr, &fc->limit_set_db); - scmi_perf_domain_desc_fc(ph, domain, PERF_LIMITS_GET, - &fc->limit_get_addr, NULL); + ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL, + PERF_LEVEL_SET, 4, domain, + &fc[PERF_FC_LEVEL].set_addr, + &fc[PERF_FC_LEVEL].set_db); + + ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL, + PERF_LEVEL_GET, 4, domain, + &fc[PERF_FC_LEVEL].get_addr, NULL); + + ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL, + PERF_LIMITS_SET, 8, domain, + &fc[PERF_FC_LIMIT].set_addr, + &fc[PERF_FC_LIMIT].set_db); + + ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL, + PERF_LIMITS_GET, 8, domain, + &fc[PERF_FC_LIMIT].get_addr, NULL); + *p_fc = fc; } @@ -745,14 +675,15 @@ static bool scmi_fast_switch_possible(const struct scmi_protocol_handle *ph, dom = pi->dom_info + scmi_dev_domain_id(dev); - return dom->fc_info && dom->fc_info->level_set_addr; + return dom->fc_info && dom->fc_info[PERF_FC_LEVEL].set_addr; } -static bool scmi_power_scale_mw_get(const struct scmi_protocol_handle *ph) +static enum scmi_power_scale +scmi_power_scale_get(const struct scmi_protocol_handle *ph) { struct scmi_perf_info *pi = ph->get_priv(ph); - return pi->power_scale_mw; + return pi->power_scale; } static const struct scmi_perf_proto_ops perf_proto_ops = { @@ -767,7 +698,7 @@ static const struct scmi_perf_proto_ops perf_proto_ops = { .freq_get = scmi_dvfs_freq_get, .est_power_get = scmi_dvfs_est_power_get, .fast_switch_possible = scmi_fast_switch_possible, - .power_scale_mw_get = scmi_power_scale_mw_get, + .power_scale_get = scmi_power_scale_get, }; static int scmi_perf_set_notify_enabled(const struct scmi_protocol_handle *ph, @@ -873,11 +804,13 @@ static const struct scmi_protocol_events perf_protocol_events = { static int scmi_perf_protocol_init(const struct scmi_protocol_handle *ph) { - int domain; + int domain, ret; u32 version; struct scmi_perf_info *pinfo; - ph->xops->version_get(ph, &version); + ret = ph->xops->version_get(ph, &version); + if (ret) + return ret; dev_dbg(ph->dev, "Performance Version %d.%d\n", PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version)); @@ -886,7 +819,9 @@ static int scmi_perf_protocol_init(const struct scmi_protocol_handle *ph) if (!pinfo) return -ENOMEM; - scmi_perf_attributes_get(ph, pinfo); + ret = scmi_perf_attributes_get(ph, pinfo); + if (ret) + return ret; pinfo->dom_info = devm_kcalloc(ph->dev, pinfo->num_domains, sizeof(*pinfo->dom_info), GFP_KERNEL); @@ -896,7 +831,7 @@ static int scmi_perf_protocol_init(const struct scmi_protocol_handle *ph) for (domain = 0; domain < pinfo->num_domains; domain++) { struct perf_dom_info *dom = pinfo->dom_info + domain; - scmi_perf_domain_attributes_get(ph, domain, dom); + scmi_perf_domain_attributes_get(ph, domain, dom, version); scmi_perf_describe_levels_get(ph, domain, dom); if (dom->perf_fastchannels) diff --git a/drivers/firmware/arm_scmi/power.c b/drivers/firmware/arm_scmi/power.c index ad2ab080f344..356e83631664 100644 --- a/drivers/firmware/arm_scmi/power.c +++ b/drivers/firmware/arm_scmi/power.c @@ -2,7 +2,7 @@ /* * System Control and Management Interface (SCMI) Power Protocol * - * Copyright (C) 2018-2021 ARM Ltd. + * Copyright (C) 2018-2022 ARM Ltd. */ #define pr_fmt(fmt) "SCMI Notifications POWER - " fmt @@ -10,7 +10,7 @@ #include <linux/module.h> #include <linux/scmi_protocol.h> -#include "common.h" +#include "protocols.h" #include "notify.h" enum scmi_power_protocol_cmd { @@ -18,6 +18,7 @@ enum scmi_power_protocol_cmd { POWER_STATE_SET = 0x4, POWER_STATE_GET = 0x5, POWER_STATE_NOTIFY = 0x6, + POWER_DOMAIN_NAME_GET = 0x8, }; struct scmi_msg_resp_power_attributes { @@ -33,7 +34,8 @@ struct scmi_msg_resp_power_domain_attributes { #define SUPPORTS_STATE_SET_NOTIFY(x) ((x) & BIT(31)) #define SUPPORTS_STATE_SET_ASYNC(x) ((x) & BIT(30)) #define SUPPORTS_STATE_SET_SYNC(x) ((x) & BIT(29)) - u8 name[SCMI_MAX_STR_SIZE]; +#define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(27)) + u8 name[SCMI_SHORT_NAME_MAX_SIZE]; }; struct scmi_power_set_state { @@ -97,9 +99,11 @@ static int scmi_power_attributes_get(const struct scmi_protocol_handle *ph, static int scmi_power_domain_attributes_get(const struct scmi_protocol_handle *ph, - u32 domain, struct power_dom_info *dom_info) + u32 domain, struct power_dom_info *dom_info, + u32 version) { int ret; + u32 flags; struct scmi_xfer *t; struct scmi_msg_resp_power_domain_attributes *attr; @@ -113,15 +117,26 @@ scmi_power_domain_attributes_get(const struct scmi_protocol_handle *ph, ret = ph->xops->do_xfer(ph, t); if (!ret) { - u32 flags = le32_to_cpu(attr->flags); + flags = le32_to_cpu(attr->flags); dom_info->state_set_notify = SUPPORTS_STATE_SET_NOTIFY(flags); dom_info->state_set_async = SUPPORTS_STATE_SET_ASYNC(flags); dom_info->state_set_sync = SUPPORTS_STATE_SET_SYNC(flags); - strlcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE); + strscpy(dom_info->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE); } - ph->xops->xfer_put(ph, t); + + /* + * If supported overwrite short name with the extended one; + * on error just carry on and use already provided short name. + */ + if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x3 && + SUPPORTS_EXTENDED_NAMES(flags)) { + ph->hops->extended_name_get(ph, POWER_DOMAIN_NAME_GET, + domain, dom_info->name, + SCMI_MAX_STR_SIZE); + } + return ret; } @@ -174,8 +189,9 @@ static int scmi_power_num_domains_get(const struct scmi_protocol_handle *ph) return pi->num_domains; } -static char *scmi_power_name_get(const struct scmi_protocol_handle *ph, - u32 domain) +static const char * +scmi_power_name_get(const struct scmi_protocol_handle *ph, + u32 domain) { struct scmi_power_info *pi = ph->get_priv(ph); struct power_dom_info *dom = pi->dom_info + domain; @@ -280,11 +296,13 @@ static const struct scmi_protocol_events power_protocol_events = { static int scmi_power_protocol_init(const struct scmi_protocol_handle *ph) { - int domain; + int domain, ret; u32 version; struct scmi_power_info *pinfo; - ph->xops->version_get(ph, &version); + ret = ph->xops->version_get(ph, &version); + if (ret) + return ret; dev_dbg(ph->dev, "Power Version %d.%d\n", PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version)); @@ -293,7 +311,9 @@ static int scmi_power_protocol_init(const struct scmi_protocol_handle *ph) if (!pinfo) return -ENOMEM; - scmi_power_attributes_get(ph, pinfo); + ret = scmi_power_attributes_get(ph, pinfo); + if (ret) + return ret; pinfo->dom_info = devm_kcalloc(ph->dev, pinfo->num_domains, sizeof(*pinfo->dom_info), GFP_KERNEL); @@ -303,7 +323,7 @@ static int scmi_power_protocol_init(const struct scmi_protocol_handle *ph) for (domain = 0; domain < pinfo->num_domains; domain++) { struct power_dom_info *dom = pinfo->dom_info + domain; - scmi_power_domain_attributes_get(ph, domain, dom); + scmi_power_domain_attributes_get(ph, domain, dom, version); } pinfo->version = version; diff --git a/drivers/firmware/arm_scmi/powercap.c b/drivers/firmware/arm_scmi/powercap.c new file mode 100644 index 000000000000..83b90bde755c --- /dev/null +++ b/drivers/firmware/arm_scmi/powercap.c @@ -0,0 +1,866 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * System Control and Management Interface (SCMI) Powercap Protocol + * + * Copyright (C) 2022 ARM Ltd. + */ + +#define pr_fmt(fmt) "SCMI Notifications POWERCAP - " fmt + +#include <linux/bitfield.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/scmi_protocol.h> + +#include <trace/events/scmi.h> + +#include "protocols.h" +#include "notify.h" + +enum scmi_powercap_protocol_cmd { + POWERCAP_DOMAIN_ATTRIBUTES = 0x3, + POWERCAP_CAP_GET = 0x4, + POWERCAP_CAP_SET = 0x5, + POWERCAP_PAI_GET = 0x6, + POWERCAP_PAI_SET = 0x7, + POWERCAP_DOMAIN_NAME_GET = 0x8, + POWERCAP_MEASUREMENTS_GET = 0x9, + POWERCAP_CAP_NOTIFY = 0xa, + POWERCAP_MEASUREMENTS_NOTIFY = 0xb, + POWERCAP_DESCRIBE_FASTCHANNEL = 0xc, +}; + +enum { + POWERCAP_FC_CAP, + POWERCAP_FC_PAI, + POWERCAP_FC_MAX, +}; + +struct scmi_msg_resp_powercap_domain_attributes { + __le32 attributes; +#define SUPPORTS_POWERCAP_CAP_CHANGE_NOTIFY(x) ((x) & BIT(31)) +#define SUPPORTS_POWERCAP_MEASUREMENTS_CHANGE_NOTIFY(x) ((x) & BIT(30)) +#define SUPPORTS_ASYNC_POWERCAP_CAP_SET(x) ((x) & BIT(29)) +#define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(28)) +#define SUPPORTS_POWERCAP_CAP_CONFIGURATION(x) ((x) & BIT(27)) +#define SUPPORTS_POWERCAP_MONITORING(x) ((x) & BIT(26)) +#define SUPPORTS_POWERCAP_PAI_CONFIGURATION(x) ((x) & BIT(25)) +#define SUPPORTS_POWERCAP_FASTCHANNELS(x) ((x) & BIT(22)) +#define POWERCAP_POWER_UNIT(x) \ + (FIELD_GET(GENMASK(24, 23), (x))) +#define SUPPORTS_POWER_UNITS_MW(x) \ + (POWERCAP_POWER_UNIT(x) == 0x2) +#define SUPPORTS_POWER_UNITS_UW(x) \ + (POWERCAP_POWER_UNIT(x) == 0x1) + u8 name[SCMI_SHORT_NAME_MAX_SIZE]; + __le32 min_pai; + __le32 max_pai; + __le32 pai_step; + __le32 min_power_cap; + __le32 max_power_cap; + __le32 power_cap_step; + __le32 sustainable_power; + __le32 accuracy; + __le32 parent_id; +}; + +struct scmi_msg_powercap_set_cap_or_pai { + __le32 domain; + __le32 flags; +#define CAP_SET_ASYNC BIT(1) +#define CAP_SET_IGNORE_DRESP BIT(0) + __le32 value; +}; + +struct scmi_msg_resp_powercap_cap_set_complete { + __le32 domain; + __le32 power_cap; +}; + +struct scmi_msg_resp_powercap_meas_get { + __le32 power; + __le32 pai; +}; + +struct scmi_msg_powercap_notify_cap { + __le32 domain; + __le32 notify_enable; +}; + +struct scmi_msg_powercap_notify_thresh { + __le32 domain; + __le32 notify_enable; + __le32 power_thresh_low; + __le32 power_thresh_high; +}; + +struct scmi_powercap_cap_changed_notify_payld { + __le32 agent_id; + __le32 domain_id; + __le32 power_cap; + __le32 pai; +}; + +struct scmi_powercap_meas_changed_notify_payld { + __le32 agent_id; + __le32 domain_id; + __le32 power; +}; + +struct scmi_powercap_state { + bool meas_notif_enabled; + u64 thresholds; +#define THRESH_LOW(p, id) \ + (lower_32_bits((p)->states[(id)].thresholds)) +#define THRESH_HIGH(p, id) \ + (upper_32_bits((p)->states[(id)].thresholds)) +}; + +struct powercap_info { + u32 version; + int num_domains; + struct scmi_powercap_state *states; + struct scmi_powercap_info *powercaps; +}; + +static enum scmi_powercap_protocol_cmd evt_2_cmd[] = { + POWERCAP_CAP_NOTIFY, + POWERCAP_MEASUREMENTS_NOTIFY, +}; + +static int scmi_powercap_notify(const struct scmi_protocol_handle *ph, + u32 domain, int message_id, bool enable); + +static int +scmi_powercap_attributes_get(const struct scmi_protocol_handle *ph, + struct powercap_info *pi) +{ + int ret; + struct scmi_xfer *t; + + ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0, + sizeof(u32), &t); + if (ret) + return ret; + + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + u32 attributes; + + attributes = get_unaligned_le32(t->rx.buf); + pi->num_domains = FIELD_GET(GENMASK(15, 0), attributes); + } + + ph->xops->xfer_put(ph, t); + return ret; +} + +static inline int +scmi_powercap_validate(unsigned int min_val, unsigned int max_val, + unsigned int step_val, bool configurable) +{ + if (!min_val || !max_val) + return -EPROTO; + + if ((configurable && min_val == max_val) || + (!configurable && min_val != max_val)) + return -EPROTO; + + if (min_val != max_val && !step_val) + return -EPROTO; + + return 0; +} + +static int +scmi_powercap_domain_attributes_get(const struct scmi_protocol_handle *ph, + struct powercap_info *pinfo, u32 domain) +{ + int ret; + u32 flags; + struct scmi_xfer *t; + struct scmi_powercap_info *dom_info = pinfo->powercaps + domain; + struct scmi_msg_resp_powercap_domain_attributes *resp; + + ret = ph->xops->xfer_get_init(ph, POWERCAP_DOMAIN_ATTRIBUTES, + sizeof(domain), sizeof(*resp), &t); + if (ret) + return ret; + + put_unaligned_le32(domain, t->tx.buf); + resp = t->rx.buf; + + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + flags = le32_to_cpu(resp->attributes); + + dom_info->id = domain; + dom_info->notify_powercap_cap_change = + SUPPORTS_POWERCAP_CAP_CHANGE_NOTIFY(flags); + dom_info->notify_powercap_measurement_change = + SUPPORTS_POWERCAP_MEASUREMENTS_CHANGE_NOTIFY(flags); + dom_info->async_powercap_cap_set = + SUPPORTS_ASYNC_POWERCAP_CAP_SET(flags); + dom_info->powercap_cap_config = + SUPPORTS_POWERCAP_CAP_CONFIGURATION(flags); + dom_info->powercap_monitoring = + SUPPORTS_POWERCAP_MONITORING(flags); + dom_info->powercap_pai_config = + SUPPORTS_POWERCAP_PAI_CONFIGURATION(flags); + dom_info->powercap_scale_mw = + SUPPORTS_POWER_UNITS_MW(flags); + dom_info->powercap_scale_uw = + SUPPORTS_POWER_UNITS_UW(flags); + dom_info->fastchannels = + SUPPORTS_POWERCAP_FASTCHANNELS(flags); + + strscpy(dom_info->name, resp->name, SCMI_SHORT_NAME_MAX_SIZE); + + dom_info->min_pai = le32_to_cpu(resp->min_pai); + dom_info->max_pai = le32_to_cpu(resp->max_pai); + dom_info->pai_step = le32_to_cpu(resp->pai_step); + ret = scmi_powercap_validate(dom_info->min_pai, + dom_info->max_pai, + dom_info->pai_step, + dom_info->powercap_pai_config); + if (ret) { + dev_err(ph->dev, + "Platform reported inconsistent PAI config for domain %d - %s\n", + dom_info->id, dom_info->name); + goto clean; + } + + dom_info->min_power_cap = le32_to_cpu(resp->min_power_cap); + dom_info->max_power_cap = le32_to_cpu(resp->max_power_cap); + dom_info->power_cap_step = le32_to_cpu(resp->power_cap_step); + ret = scmi_powercap_validate(dom_info->min_power_cap, + dom_info->max_power_cap, + dom_info->power_cap_step, + dom_info->powercap_cap_config); + if (ret) { + dev_err(ph->dev, + "Platform reported inconsistent CAP config for domain %d - %s\n", + dom_info->id, dom_info->name); + goto clean; + } + + dom_info->sustainable_power = + le32_to_cpu(resp->sustainable_power); + dom_info->accuracy = le32_to_cpu(resp->accuracy); + + dom_info->parent_id = le32_to_cpu(resp->parent_id); + if (dom_info->parent_id != SCMI_POWERCAP_ROOT_ZONE_ID && + (dom_info->parent_id >= pinfo->num_domains || + dom_info->parent_id == dom_info->id)) { + dev_err(ph->dev, + "Platform reported inconsistent parent ID for domain %d - %s\n", + dom_info->id, dom_info->name); + ret = -ENODEV; + } + } + +clean: + ph->xops->xfer_put(ph, t); + + /* + * If supported overwrite short name with the extended one; + * on error just carry on and use already provided short name. + */ + if (!ret && SUPPORTS_EXTENDED_NAMES(flags)) + ph->hops->extended_name_get(ph, POWERCAP_DOMAIN_NAME_GET, + domain, dom_info->name, + SCMI_MAX_STR_SIZE); + + return ret; +} + +static int scmi_powercap_num_domains_get(const struct scmi_protocol_handle *ph) +{ + struct powercap_info *pi = ph->get_priv(ph); + + return pi->num_domains; +} + +static const struct scmi_powercap_info * +scmi_powercap_dom_info_get(const struct scmi_protocol_handle *ph, u32 domain_id) +{ + struct powercap_info *pi = ph->get_priv(ph); + + if (domain_id >= pi->num_domains) + return NULL; + + return pi->powercaps + domain_id; +} + +static int scmi_powercap_xfer_cap_get(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 *power_cap) +{ + int ret; + struct scmi_xfer *t; + + ret = ph->xops->xfer_get_init(ph, POWERCAP_CAP_GET, sizeof(u32), + sizeof(u32), &t); + if (ret) + return ret; + + put_unaligned_le32(domain_id, t->tx.buf); + ret = ph->xops->do_xfer(ph, t); + if (!ret) + *power_cap = get_unaligned_le32(t->rx.buf); + + ph->xops->xfer_put(ph, t); + + return ret; +} + +static int scmi_powercap_cap_get(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 *power_cap) +{ + struct scmi_powercap_info *dom; + struct powercap_info *pi = ph->get_priv(ph); + + if (!power_cap || domain_id >= pi->num_domains) + return -EINVAL; + + dom = pi->powercaps + domain_id; + if (dom->fc_info && dom->fc_info[POWERCAP_FC_CAP].get_addr) { + *power_cap = ioread32(dom->fc_info[POWERCAP_FC_CAP].get_addr); + trace_scmi_fc_call(SCMI_PROTOCOL_POWERCAP, POWERCAP_CAP_GET, + domain_id, *power_cap, 0); + return 0; + } + + return scmi_powercap_xfer_cap_get(ph, domain_id, power_cap); +} + +static int scmi_powercap_xfer_cap_set(const struct scmi_protocol_handle *ph, + const struct scmi_powercap_info *pc, + u32 power_cap, bool ignore_dresp) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_powercap_set_cap_or_pai *msg; + + ret = ph->xops->xfer_get_init(ph, POWERCAP_CAP_SET, + sizeof(*msg), 0, &t); + if (ret) + return ret; + + msg = t->tx.buf; + msg->domain = cpu_to_le32(pc->id); + msg->flags = + cpu_to_le32(FIELD_PREP(CAP_SET_ASYNC, !!pc->async_powercap_cap_set) | + FIELD_PREP(CAP_SET_IGNORE_DRESP, !!ignore_dresp)); + msg->value = cpu_to_le32(power_cap); + + if (!pc->async_powercap_cap_set || ignore_dresp) { + ret = ph->xops->do_xfer(ph, t); + } else { + ret = ph->xops->do_xfer_with_response(ph, t); + if (!ret) { + struct scmi_msg_resp_powercap_cap_set_complete *resp; + + resp = t->rx.buf; + if (le32_to_cpu(resp->domain) == pc->id) + dev_dbg(ph->dev, + "Powercap ID %d CAP set async to %u\n", + pc->id, + get_unaligned_le32(&resp->power_cap)); + else + ret = -EPROTO; + } + } + + ph->xops->xfer_put(ph, t); + return ret; +} + +static int scmi_powercap_cap_set(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 power_cap, + bool ignore_dresp) +{ + const struct scmi_powercap_info *pc; + + pc = scmi_powercap_dom_info_get(ph, domain_id); + if (!pc || !pc->powercap_cap_config || !power_cap || + power_cap < pc->min_power_cap || + power_cap > pc->max_power_cap) + return -EINVAL; + + if (pc->fc_info && pc->fc_info[POWERCAP_FC_CAP].set_addr) { + struct scmi_fc_info *fci = &pc->fc_info[POWERCAP_FC_CAP]; + + iowrite32(power_cap, fci->set_addr); + ph->hops->fastchannel_db_ring(fci->set_db); + trace_scmi_fc_call(SCMI_PROTOCOL_POWERCAP, POWERCAP_CAP_SET, + domain_id, power_cap, 0); + return 0; + } + + return scmi_powercap_xfer_cap_set(ph, pc, power_cap, ignore_dresp); +} + +static int scmi_powercap_xfer_pai_get(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 *pai) +{ + int ret; + struct scmi_xfer *t; + + ret = ph->xops->xfer_get_init(ph, POWERCAP_PAI_GET, sizeof(u32), + sizeof(u32), &t); + if (ret) + return ret; + + put_unaligned_le32(domain_id, t->tx.buf); + ret = ph->xops->do_xfer(ph, t); + if (!ret) + *pai = get_unaligned_le32(t->rx.buf); + + ph->xops->xfer_put(ph, t); + + return ret; +} + +static int scmi_powercap_pai_get(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 *pai) +{ + struct scmi_powercap_info *dom; + struct powercap_info *pi = ph->get_priv(ph); + + if (!pai || domain_id >= pi->num_domains) + return -EINVAL; + + dom = pi->powercaps + domain_id; + if (dom->fc_info && dom->fc_info[POWERCAP_FC_PAI].get_addr) { + *pai = ioread32(dom->fc_info[POWERCAP_FC_PAI].get_addr); + trace_scmi_fc_call(SCMI_PROTOCOL_POWERCAP, POWERCAP_PAI_GET, + domain_id, *pai, 0); + return 0; + } + + return scmi_powercap_xfer_pai_get(ph, domain_id, pai); +} + +static int scmi_powercap_xfer_pai_set(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 pai) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_powercap_set_cap_or_pai *msg; + + ret = ph->xops->xfer_get_init(ph, POWERCAP_PAI_SET, + sizeof(*msg), 0, &t); + if (ret) + return ret; + + msg = t->tx.buf; + msg->domain = cpu_to_le32(domain_id); + msg->flags = cpu_to_le32(0); + msg->value = cpu_to_le32(pai); + + ret = ph->xops->do_xfer(ph, t); + + ph->xops->xfer_put(ph, t); + return ret; +} + +static int scmi_powercap_pai_set(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 pai) +{ + const struct scmi_powercap_info *pc; + + pc = scmi_powercap_dom_info_get(ph, domain_id); + if (!pc || !pc->powercap_pai_config || !pai || + pai < pc->min_pai || pai > pc->max_pai) + return -EINVAL; + + if (pc->fc_info && pc->fc_info[POWERCAP_FC_PAI].set_addr) { + struct scmi_fc_info *fci = &pc->fc_info[POWERCAP_FC_PAI]; + + trace_scmi_fc_call(SCMI_PROTOCOL_POWERCAP, POWERCAP_PAI_SET, + domain_id, pai, 0); + iowrite32(pai, fci->set_addr); + ph->hops->fastchannel_db_ring(fci->set_db); + return 0; + } + + return scmi_powercap_xfer_pai_set(ph, domain_id, pai); +} + +static int scmi_powercap_measurements_get(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 *average_power, + u32 *pai) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_resp_powercap_meas_get *resp; + const struct scmi_powercap_info *pc; + + pc = scmi_powercap_dom_info_get(ph, domain_id); + if (!pc || !pc->powercap_monitoring || !pai || !average_power) + return -EINVAL; + + ret = ph->xops->xfer_get_init(ph, POWERCAP_MEASUREMENTS_GET, + sizeof(u32), sizeof(*resp), &t); + if (ret) + return ret; + + resp = t->rx.buf; + put_unaligned_le32(domain_id, t->tx.buf); + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + *average_power = le32_to_cpu(resp->power); + *pai = le32_to_cpu(resp->pai); + } + + ph->xops->xfer_put(ph, t); + return ret; +} + +static int +scmi_powercap_measurements_threshold_get(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 *power_thresh_low, + u32 *power_thresh_high) +{ + struct powercap_info *pi = ph->get_priv(ph); + + if (!power_thresh_low || !power_thresh_high || + domain_id >= pi->num_domains) + return -EINVAL; + + *power_thresh_low = THRESH_LOW(pi, domain_id); + *power_thresh_high = THRESH_HIGH(pi, domain_id); + + return 0; +} + +static int +scmi_powercap_measurements_threshold_set(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 power_thresh_low, + u32 power_thresh_high) +{ + int ret = 0; + struct powercap_info *pi = ph->get_priv(ph); + + if (domain_id >= pi->num_domains || + power_thresh_low > power_thresh_high) + return -EINVAL; + + /* Anything to do ? */ + if (THRESH_LOW(pi, domain_id) == power_thresh_low && + THRESH_HIGH(pi, domain_id) == power_thresh_high) + return ret; + + pi->states[domain_id].thresholds = + (FIELD_PREP(GENMASK_ULL(31, 0), power_thresh_low) | + FIELD_PREP(GENMASK_ULL(63, 32), power_thresh_high)); + + /* Update thresholds if notification already enabled */ + if (pi->states[domain_id].meas_notif_enabled) + ret = scmi_powercap_notify(ph, domain_id, + POWERCAP_MEASUREMENTS_NOTIFY, + true); + + return ret; +} + +static const struct scmi_powercap_proto_ops powercap_proto_ops = { + .num_domains_get = scmi_powercap_num_domains_get, + .info_get = scmi_powercap_dom_info_get, + .cap_get = scmi_powercap_cap_get, + .cap_set = scmi_powercap_cap_set, + .pai_get = scmi_powercap_pai_get, + .pai_set = scmi_powercap_pai_set, + .measurements_get = scmi_powercap_measurements_get, + .measurements_threshold_set = scmi_powercap_measurements_threshold_set, + .measurements_threshold_get = scmi_powercap_measurements_threshold_get, +}; + +static void scmi_powercap_domain_init_fc(const struct scmi_protocol_handle *ph, + u32 domain, struct scmi_fc_info **p_fc) +{ + struct scmi_fc_info *fc; + + fc = devm_kcalloc(ph->dev, POWERCAP_FC_MAX, sizeof(*fc), GFP_KERNEL); + if (!fc) + return; + + ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL, + POWERCAP_CAP_SET, 4, domain, + &fc[POWERCAP_FC_CAP].set_addr, + &fc[POWERCAP_FC_CAP].set_db); + + ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL, + POWERCAP_CAP_GET, 4, domain, + &fc[POWERCAP_FC_CAP].get_addr, NULL); + + ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL, + POWERCAP_PAI_SET, 4, domain, + &fc[POWERCAP_FC_PAI].set_addr, + &fc[POWERCAP_FC_PAI].set_db); + + ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL, + POWERCAP_PAI_GET, 4, domain, + &fc[POWERCAP_FC_PAI].get_addr, NULL); + + *p_fc = fc; +} + +static int scmi_powercap_notify(const struct scmi_protocol_handle *ph, + u32 domain, int message_id, bool enable) +{ + int ret; + struct scmi_xfer *t; + + switch (message_id) { + case POWERCAP_CAP_NOTIFY: + { + struct scmi_msg_powercap_notify_cap *notify; + + ret = ph->xops->xfer_get_init(ph, message_id, + sizeof(*notify), 0, &t); + if (ret) + return ret; + + notify = t->tx.buf; + notify->domain = cpu_to_le32(domain); + notify->notify_enable = cpu_to_le32(enable ? BIT(0) : 0); + break; + } + case POWERCAP_MEASUREMENTS_NOTIFY: + { + u32 low, high; + struct scmi_msg_powercap_notify_thresh *notify; + + /* + * Note that we have to pick the most recently configured + * thresholds to build a proper POWERCAP_MEASUREMENTS_NOTIFY + * enable request and we fail, complaining, if no thresholds + * were ever set, since this is an indication the API has been + * used wrongly. + */ + ret = scmi_powercap_measurements_threshold_get(ph, domain, + &low, &high); + if (ret) + return ret; + + if (enable && !low && !high) { + dev_err(ph->dev, + "Invalid Measurements Notify thresholds: %u/%u\n", + low, high); + return -EINVAL; + } + + ret = ph->xops->xfer_get_init(ph, message_id, + sizeof(*notify), 0, &t); + if (ret) + return ret; + + notify = t->tx.buf; + notify->domain = cpu_to_le32(domain); + notify->notify_enable = cpu_to_le32(enable ? BIT(0) : 0); + notify->power_thresh_low = cpu_to_le32(low); + notify->power_thresh_high = cpu_to_le32(high); + break; + } + default: + return -EINVAL; + } + + ret = ph->xops->do_xfer(ph, t); + + ph->xops->xfer_put(ph, t); + return ret; +} + +static int +scmi_powercap_set_notify_enabled(const struct scmi_protocol_handle *ph, + u8 evt_id, u32 src_id, bool enable) +{ + int ret, cmd_id; + struct powercap_info *pi = ph->get_priv(ph); + + if (evt_id >= ARRAY_SIZE(evt_2_cmd) || src_id >= pi->num_domains) + return -EINVAL; + + cmd_id = evt_2_cmd[evt_id]; + ret = scmi_powercap_notify(ph, src_id, cmd_id, enable); + if (ret) + pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n", + evt_id, src_id, ret); + else if (cmd_id == POWERCAP_MEASUREMENTS_NOTIFY) + /* + * On success save the current notification enabled state, so + * as to be able to properly update the notification thresholds + * when they are modified on a domain for which measurement + * notifications were currently enabled. + * + * This is needed because the SCMI Notification core machinery + * and API does not support passing per-notification custom + * arguments at callback registration time. + * + * Note that this can be done here with a simple flag since the + * SCMI core Notifications code takes care of keeping proper + * per-domain enables refcounting, so that this helper function + * will be called only once (for enables) when the first user + * registers a callback on this domain and once more (disable) + * when the last user de-registers its callback. + */ + pi->states[src_id].meas_notif_enabled = enable; + + return ret; +} + +static void * +scmi_powercap_fill_custom_report(const struct scmi_protocol_handle *ph, + u8 evt_id, ktime_t timestamp, + const void *payld, size_t payld_sz, + void *report, u32 *src_id) +{ + void *rep = NULL; + + switch (evt_id) { + case SCMI_EVENT_POWERCAP_CAP_CHANGED: + { + const struct scmi_powercap_cap_changed_notify_payld *p = payld; + struct scmi_powercap_cap_changed_report *r = report; + + if (sizeof(*p) != payld_sz) + break; + + r->timestamp = timestamp; + r->agent_id = le32_to_cpu(p->agent_id); + r->domain_id = le32_to_cpu(p->domain_id); + r->power_cap = le32_to_cpu(p->power_cap); + r->pai = le32_to_cpu(p->pai); + *src_id = r->domain_id; + rep = r; + break; + } + case SCMI_EVENT_POWERCAP_MEASUREMENTS_CHANGED: + { + const struct scmi_powercap_meas_changed_notify_payld *p = payld; + struct scmi_powercap_meas_changed_report *r = report; + + if (sizeof(*p) != payld_sz) + break; + + r->timestamp = timestamp; + r->agent_id = le32_to_cpu(p->agent_id); + r->domain_id = le32_to_cpu(p->domain_id); + r->power = le32_to_cpu(p->power); + *src_id = r->domain_id; + rep = r; + break; + } + default: + break; + } + + return rep; +} + +static int +scmi_powercap_get_num_sources(const struct scmi_protocol_handle *ph) +{ + struct powercap_info *pi = ph->get_priv(ph); + + if (!pi) + return -EINVAL; + + return pi->num_domains; +} + +static const struct scmi_event powercap_events[] = { + { + .id = SCMI_EVENT_POWERCAP_CAP_CHANGED, + .max_payld_sz = + sizeof(struct scmi_powercap_cap_changed_notify_payld), + .max_report_sz = + sizeof(struct scmi_powercap_cap_changed_report), + }, + { + .id = SCMI_EVENT_POWERCAP_MEASUREMENTS_CHANGED, + .max_payld_sz = + sizeof(struct scmi_powercap_meas_changed_notify_payld), + .max_report_sz = + sizeof(struct scmi_powercap_meas_changed_report), + }, +}; + +static const struct scmi_event_ops powercap_event_ops = { + .get_num_sources = scmi_powercap_get_num_sources, + .set_notify_enabled = scmi_powercap_set_notify_enabled, + .fill_custom_report = scmi_powercap_fill_custom_report, +}; + +static const struct scmi_protocol_events powercap_protocol_events = { + .queue_sz = SCMI_PROTO_QUEUE_SZ, + .ops = &powercap_event_ops, + .evts = powercap_events, + .num_events = ARRAY_SIZE(powercap_events), +}; + +static int +scmi_powercap_protocol_init(const struct scmi_protocol_handle *ph) +{ + int domain, ret; + u32 version; + struct powercap_info *pinfo; + + ret = ph->xops->version_get(ph, &version); + if (ret) + return ret; + + dev_dbg(ph->dev, "Powercap Version %d.%d\n", + PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version)); + + pinfo = devm_kzalloc(ph->dev, sizeof(*pinfo), GFP_KERNEL); + if (!pinfo) + return -ENOMEM; + + ret = scmi_powercap_attributes_get(ph, pinfo); + if (ret) + return ret; + + pinfo->powercaps = devm_kcalloc(ph->dev, pinfo->num_domains, + sizeof(*pinfo->powercaps), + GFP_KERNEL); + if (!pinfo->powercaps) + return -ENOMEM; + + /* + * Note that any failure in retrieving any domain attribute leads to + * the whole Powercap protocol initialization failure: this way the + * reported Powercap domains are all assured, when accessed, to be well + * formed and correlated by sane parent-child relationship (if any). + */ + for (domain = 0; domain < pinfo->num_domains; domain++) { + ret = scmi_powercap_domain_attributes_get(ph, pinfo, domain); + if (ret) + return ret; + + if (pinfo->powercaps[domain].fastchannels) + scmi_powercap_domain_init_fc(ph, domain, + &pinfo->powercaps[domain].fc_info); + } + + pinfo->states = devm_kcalloc(ph->dev, pinfo->num_domains, + sizeof(*pinfo->states), GFP_KERNEL); + if (!pinfo->states) + return -ENOMEM; + + pinfo->version = version; + + return ph->set_priv(ph, pinfo); +} + +static const struct scmi_protocol scmi_powercap = { + .id = SCMI_PROTOCOL_POWERCAP, + .owner = THIS_MODULE, + .instance_init = &scmi_powercap_protocol_init, + .ops = &powercap_proto_ops, + .events = &powercap_protocol_events, +}; + +DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(powercap, scmi_powercap) diff --git a/drivers/firmware/arm_scmi/protocols.h b/drivers/firmware/arm_scmi/protocols.h new file mode 100644 index 000000000000..2f3bf691db7c --- /dev/null +++ b/drivers/firmware/arm_scmi/protocols.h @@ -0,0 +1,342 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * System Control and Management Interface (SCMI) Message Protocol + * protocols common header file containing some definitions, structures + * and function prototypes used in all the different SCMI protocols. + * + * Copyright (C) 2022 ARM Ltd. + */ +#ifndef _SCMI_PROTOCOLS_H +#define _SCMI_PROTOCOLS_H + +#include <linux/bitfield.h> +#include <linux/completion.h> +#include <linux/device.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/hashtable.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/refcount.h> +#include <linux/scmi_protocol.h> +#include <linux/spinlock.h> +#include <linux/types.h> + +#include <asm/unaligned.h> + +#define PROTOCOL_REV_MINOR_MASK GENMASK(15, 0) +#define PROTOCOL_REV_MAJOR_MASK GENMASK(31, 16) +#define PROTOCOL_REV_MAJOR(x) ((u16)(FIELD_GET(PROTOCOL_REV_MAJOR_MASK, (x)))) +#define PROTOCOL_REV_MINOR(x) ((u16)(FIELD_GET(PROTOCOL_REV_MINOR_MASK, (x)))) + +enum scmi_common_cmd { + PROTOCOL_VERSION = 0x0, + PROTOCOL_ATTRIBUTES = 0x1, + PROTOCOL_MESSAGE_ATTRIBUTES = 0x2, +}; + +/** + * struct scmi_msg_resp_prot_version - Response for a message + * + * @minor_version: Minor version of the ABI that firmware supports + * @major_version: Major version of the ABI that firmware supports + * + * In general, ABI version changes follow the rule that minor version increments + * are backward compatible. Major revision changes in ABI may not be + * backward compatible. + * + * Response to a generic message with message type SCMI_MSG_VERSION + */ +struct scmi_msg_resp_prot_version { + __le16 minor_version; + __le16 major_version; +}; + +/** + * struct scmi_msg - Message(Tx/Rx) structure + * + * @buf: Buffer pointer + * @len: Length of data in the Buffer + */ +struct scmi_msg { + void *buf; + size_t len; +}; + +/** + * struct scmi_msg_hdr - Message(Tx/Rx) header + * + * @id: The identifier of the message being sent + * @protocol_id: The identifier of the protocol used to send @id message + * @type: The SCMI type for this message + * @seq: The token to identify the message. When a message returns, the + * platform returns the whole message header unmodified including the + * token + * @status: Status of the transfer once it's complete + * @poll_completion: Indicate if the transfer needs to be polled for + * completion or interrupt mode is used + */ +struct scmi_msg_hdr { + u8 id; + u8 protocol_id; + u8 type; + u16 seq; + u32 status; + bool poll_completion; +}; + +/** + * struct scmi_xfer - Structure representing a message flow + * + * @transfer_id: Unique ID for debug & profiling purpose + * @hdr: Transmit message header + * @tx: Transmit message + * @rx: Receive message, the buffer should be pre-allocated to store + * message. If request-ACK protocol is used, we can reuse the same + * buffer for the rx path as we use for the tx path. + * @done: command message transmit completion event + * @async_done: pointer to delayed response message received event completion + * @pending: True for xfers added to @pending_xfers hashtable + * @node: An hlist_node reference used to store this xfer, alternatively, on + * the free list @free_xfers or in the @pending_xfers hashtable + * @users: A refcount to track the active users for this xfer. + * This is meant to protect against the possibility that, when a command + * transaction times out concurrently with the reception of a valid + * response message, the xfer could be finally put on the TX path, and + * so vanish, while on the RX path scmi_rx_callback() is still + * processing it: in such a case this refcounting will ensure that, even + * though the timed-out transaction will anyway cause the command + * request to be reported as failed by time-out, the underlying xfer + * cannot be discarded and possibly reused until the last one user on + * the RX path has released it. + * @busy: An atomic flag to ensure exclusive write access to this xfer + * @state: The current state of this transfer, with states transitions deemed + * valid being: + * - SCMI_XFER_SENT_OK -> SCMI_XFER_RESP_OK [ -> SCMI_XFER_DRESP_OK ] + * - SCMI_XFER_SENT_OK -> SCMI_XFER_DRESP_OK + * (Missing synchronous response is assumed OK and ignored) + * @lock: A spinlock to protect state and busy fields. + * @priv: A pointer for transport private usage. + */ +struct scmi_xfer { + int transfer_id; + struct scmi_msg_hdr hdr; + struct scmi_msg tx; + struct scmi_msg rx; + struct completion done; + struct completion *async_done; + bool pending; + struct hlist_node node; + refcount_t users; +#define SCMI_XFER_FREE 0 +#define SCMI_XFER_BUSY 1 + atomic_t busy; +#define SCMI_XFER_SENT_OK 0 +#define SCMI_XFER_RESP_OK 1 +#define SCMI_XFER_DRESP_OK 2 + int state; + /* A lock to protect state and busy fields */ + spinlock_t lock; + void *priv; +}; + +struct scmi_xfer_ops; +struct scmi_proto_helpers_ops; + +/** + * struct scmi_protocol_handle - Reference to an initialized protocol instance + * + * @dev: A reference to the associated SCMI instance device (handle->dev). + * @xops: A reference to a struct holding refs to the core xfer operations that + * can be used by the protocol implementation to generate SCMI messages. + * @set_priv: A method to set protocol private data for this instance. + * @get_priv: A method to get protocol private data previously set. + * + * This structure represents a protocol initialized against specific SCMI + * instance and it will be used as follows: + * - as a parameter fed from the core to the protocol initialization code so + * that it can access the core xfer operations to build and generate SCMI + * messages exclusively for the specific underlying protocol instance. + * - as an opaque handle fed by an SCMI driver user when it tries to access + * this protocol through its own protocol operations. + * In this case this handle will be returned as an opaque object together + * with the related protocol operations when the SCMI driver tries to access + * the protocol. + */ +struct scmi_protocol_handle { + struct device *dev; + const struct scmi_xfer_ops *xops; + const struct scmi_proto_helpers_ops *hops; + int (*set_priv)(const struct scmi_protocol_handle *ph, void *priv); + void *(*get_priv)(const struct scmi_protocol_handle *ph); +}; + +/** + * struct scmi_iterator_state - Iterator current state descriptor + * @desc_index: Starting index for the current mulit-part request. + * @num_returned: Number of returned items in the last multi-part reply. + * @num_remaining: Number of remaining items in the multi-part message. + * @max_resources: Maximum acceptable number of items, configured by the caller + * depending on the underlying resources that it is querying. + * @loop_idx: The iterator loop index in the current multi-part reply. + * @rx_len: Size in bytes of the currenly processed message; it can be used by + * the user of the iterator to verify a reply size. + * @priv: Optional pointer to some additional state-related private data setup + * by the caller during the iterations. + */ +struct scmi_iterator_state { + unsigned int desc_index; + unsigned int num_returned; + unsigned int num_remaining; + unsigned int max_resources; + unsigned int loop_idx; + size_t rx_len; + void *priv; +}; + +/** + * struct scmi_iterator_ops - Custom iterator operations + * @prepare_message: An operation to provide the custom logic to fill in the + * SCMI command request pointed by @message. @desc_index is + * a reference to the next index to use in the multi-part + * request. + * @update_state: An operation to provide the custom logic to update the + * iterator state from the actual message response. + * @process_response: An operation to provide the custom logic needed to process + * each chunk of the multi-part message. + */ +struct scmi_iterator_ops { + void (*prepare_message)(void *message, unsigned int desc_index, + const void *priv); + int (*update_state)(struct scmi_iterator_state *st, + const void *response, void *priv); + int (*process_response)(const struct scmi_protocol_handle *ph, + const void *response, + struct scmi_iterator_state *st, void *priv); +}; + +struct scmi_fc_db_info { + int width; + u64 set; + u64 mask; + void __iomem *addr; +}; + +struct scmi_fc_info { + void __iomem *set_addr; + void __iomem *get_addr; + struct scmi_fc_db_info *set_db; +}; + +/** + * struct scmi_proto_helpers_ops - References to common protocol helpers + * @extended_name_get: A common helper function to retrieve extended naming + * for the specified resource using the specified command. + * Result is returned as a NULL terminated string in the + * pre-allocated area pointed to by @name with maximum + * capacity of @len bytes. + * @iter_response_init: A common helper to initialize a generic iterator to + * parse multi-message responses: when run the iterator + * will take care to send the initial command request as + * specified by @msg_id and @tx_size and then to parse the + * multi-part responses using the custom operations + * provided in @ops. + * @iter_response_run: A common helper to trigger the run of a previously + * initialized iterator. + * @fastchannel_init: A common helper used to initialize FC descriptors by + * gathering FC descriptions from the SCMI platform server. + * @fastchannel_db_ring: A common helper to ring a FC doorbell. + */ +struct scmi_proto_helpers_ops { + int (*extended_name_get)(const struct scmi_protocol_handle *ph, + u8 cmd_id, u32 res_id, char *name, size_t len); + void *(*iter_response_init)(const struct scmi_protocol_handle *ph, + struct scmi_iterator_ops *ops, + unsigned int max_resources, u8 msg_id, + size_t tx_size, void *priv); + int (*iter_response_run)(void *iter); + void (*fastchannel_init)(const struct scmi_protocol_handle *ph, + u8 describe_id, u32 message_id, + u32 valid_size, u32 domain, + void __iomem **p_addr, + struct scmi_fc_db_info **p_db); + void (*fastchannel_db_ring)(struct scmi_fc_db_info *db); +}; + +/** + * struct scmi_xfer_ops - References to the core SCMI xfer operations. + * @version_get: Get this version protocol. + * @xfer_get_init: Initialize one struct xfer if any xfer slot is free. + * @reset_rx_to_maxsz: Reset rx size to max transport size. + * @do_xfer: Do the SCMI transfer. + * @do_xfer_with_response: Do the SCMI transfer waiting for a response. + * @xfer_put: Free the xfer slot. + * + * Note that all this operations expect a protocol handle as first parameter; + * they then internally use it to infer the underlying protocol number: this + * way is not possible for a protocol implementation to forge messages for + * another protocol. + */ +struct scmi_xfer_ops { + int (*version_get)(const struct scmi_protocol_handle *ph, u32 *version); + int (*xfer_get_init)(const struct scmi_protocol_handle *ph, u8 msg_id, + size_t tx_size, size_t rx_size, + struct scmi_xfer **p); + void (*reset_rx_to_maxsz)(const struct scmi_protocol_handle *ph, + struct scmi_xfer *xfer); + int (*do_xfer)(const struct scmi_protocol_handle *ph, + struct scmi_xfer *xfer); + int (*do_xfer_with_response)(const struct scmi_protocol_handle *ph, + struct scmi_xfer *xfer); + void (*xfer_put)(const struct scmi_protocol_handle *ph, + struct scmi_xfer *xfer); +}; + +typedef int (*scmi_prot_init_ph_fn_t)(const struct scmi_protocol_handle *); + +/** + * struct scmi_protocol - Protocol descriptor + * @id: Protocol ID. + * @owner: Module reference if any. + * @instance_init: Mandatory protocol initialization function. + * @instance_deinit: Optional protocol de-initialization function. + * @ops: Optional reference to the operations provided by the protocol and + * exposed in scmi_protocol.h. + * @events: An optional reference to the events supported by this protocol. + */ +struct scmi_protocol { + const u8 id; + struct module *owner; + const scmi_prot_init_ph_fn_t instance_init; + const scmi_prot_init_ph_fn_t instance_deinit; + const void *ops; + const struct scmi_protocol_events *events; +}; + +#define DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(name, proto) \ +static const struct scmi_protocol *__this_proto = &(proto); \ + \ +int __init scmi_##name##_register(void) \ +{ \ + return scmi_protocol_register(__this_proto); \ +} \ + \ +void __exit scmi_##name##_unregister(void) \ +{ \ + scmi_protocol_unregister(__this_proto); \ +} + +#define DECLARE_SCMI_REGISTER_UNREGISTER(func) \ + int __init scmi_##func##_register(void); \ + void __exit scmi_##func##_unregister(void) +DECLARE_SCMI_REGISTER_UNREGISTER(base); +DECLARE_SCMI_REGISTER_UNREGISTER(clock); +DECLARE_SCMI_REGISTER_UNREGISTER(perf); +DECLARE_SCMI_REGISTER_UNREGISTER(power); +DECLARE_SCMI_REGISTER_UNREGISTER(reset); +DECLARE_SCMI_REGISTER_UNREGISTER(sensors); +DECLARE_SCMI_REGISTER_UNREGISTER(voltage); +DECLARE_SCMI_REGISTER_UNREGISTER(system); +DECLARE_SCMI_REGISTER_UNREGISTER(powercap); + +#endif /* _SCMI_PROTOCOLS_H */ diff --git a/drivers/firmware/arm_scmi/reset.c b/drivers/firmware/arm_scmi/reset.c index 9bf2478ec6d1..e9afa8cab730 100644 --- a/drivers/firmware/arm_scmi/reset.c +++ b/drivers/firmware/arm_scmi/reset.c @@ -2,7 +2,7 @@ /* * System Control and Management Interface (SCMI) Reset Protocol * - * Copyright (C) 2019-2021 ARM Ltd. + * Copyright (C) 2019-2022 ARM Ltd. */ #define pr_fmt(fmt) "SCMI Notifications RESET - " fmt @@ -10,13 +10,14 @@ #include <linux/module.h> #include <linux/scmi_protocol.h> -#include "common.h" +#include "protocols.h" #include "notify.h" enum scmi_reset_protocol_cmd { RESET_DOMAIN_ATTRIBUTES = 0x3, RESET = 0x4, RESET_NOTIFY = 0x5, + RESET_DOMAIN_NAME_GET = 0x6, }; #define NUM_RESET_DOMAIN_MASK 0xffff @@ -26,8 +27,9 @@ struct scmi_msg_resp_reset_domain_attributes { __le32 attributes; #define SUPPORTS_ASYNC_RESET(x) ((x) & BIT(31)) #define SUPPORTS_NOTIFY_RESET(x) ((x) & BIT(30)) +#define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(29)) __le32 latency; - u8 name[SCMI_MAX_STR_SIZE]; + u8 name[SCMI_SHORT_NAME_MAX_SIZE]; }; struct scmi_msg_reset_domain_reset { @@ -89,9 +91,11 @@ static int scmi_reset_attributes_get(const struct scmi_protocol_handle *ph, static int scmi_reset_domain_attributes_get(const struct scmi_protocol_handle *ph, - u32 domain, struct reset_dom_info *dom_info) + u32 domain, struct reset_dom_info *dom_info, + u32 version) { int ret; + u32 attributes; struct scmi_xfer *t; struct scmi_msg_resp_reset_domain_attributes *attr; @@ -105,17 +109,27 @@ scmi_reset_domain_attributes_get(const struct scmi_protocol_handle *ph, ret = ph->xops->do_xfer(ph, t); if (!ret) { - u32 attributes = le32_to_cpu(attr->attributes); + attributes = le32_to_cpu(attr->attributes); dom_info->async_reset = SUPPORTS_ASYNC_RESET(attributes); dom_info->reset_notify = SUPPORTS_NOTIFY_RESET(attributes); dom_info->latency_us = le32_to_cpu(attr->latency); if (dom_info->latency_us == U32_MAX) dom_info->latency_us = 0; - strlcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE); + strscpy(dom_info->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE); } ph->xops->xfer_put(ph, t); + + /* + * If supported overwrite short name with the extended one; + * on error just carry on and use already provided short name. + */ + if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x3 && + SUPPORTS_EXTENDED_NAMES(attributes)) + ph->hops->extended_name_get(ph, RESET_DOMAIN_NAME_GET, domain, + dom_info->name, SCMI_MAX_STR_SIZE); + return ret; } @@ -126,8 +140,8 @@ static int scmi_reset_num_domains_get(const struct scmi_protocol_handle *ph) return pi->num_domains; } -static char *scmi_reset_name_get(const struct scmi_protocol_handle *ph, - u32 domain) +static const char * +scmi_reset_name_get(const struct scmi_protocol_handle *ph, u32 domain) { struct scmi_reset_info *pi = ph->get_priv(ph); @@ -152,9 +166,13 @@ static int scmi_domain_reset(const struct scmi_protocol_handle *ph, u32 domain, struct scmi_xfer *t; struct scmi_msg_reset_domain_reset *dom; struct scmi_reset_info *pi = ph->get_priv(ph); - struct reset_dom_info *rdom = pi->dom_info + domain; + struct reset_dom_info *rdom; + + if (domain >= pi->num_domains) + return -EINVAL; - if (rdom->async_reset) + rdom = pi->dom_info + domain; + if (rdom->async_reset && flags & AUTONOMOUS_RESET) flags |= ASYNCHRONOUS_RESET; ret = ph->xops->xfer_get_init(ph, RESET, sizeof(*dom), 0, &t); @@ -166,7 +184,7 @@ static int scmi_domain_reset(const struct scmi_protocol_handle *ph, u32 domain, dom->flags = cpu_to_le32(flags); dom->reset_state = cpu_to_le32(state); - if (rdom->async_reset) + if (flags & ASYNCHRONOUS_RESET) ret = ph->xops->do_xfer_with_response(ph, t); else ret = ph->xops->do_xfer(ph, t); @@ -293,11 +311,13 @@ static const struct scmi_protocol_events reset_protocol_events = { static int scmi_reset_protocol_init(const struct scmi_protocol_handle *ph) { - int domain; + int domain, ret; u32 version; struct scmi_reset_info *pinfo; - ph->xops->version_get(ph, &version); + ret = ph->xops->version_get(ph, &version); + if (ret) + return ret; dev_dbg(ph->dev, "Reset Version %d.%d\n", PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version)); @@ -306,7 +326,9 @@ static int scmi_reset_protocol_init(const struct scmi_protocol_handle *ph) if (!pinfo) return -ENOMEM; - scmi_reset_attributes_get(ph, pinfo); + ret = scmi_reset_attributes_get(ph, pinfo); + if (ret) + return ret; pinfo->dom_info = devm_kcalloc(ph->dev, pinfo->num_domains, sizeof(*pinfo->dom_info), GFP_KERNEL); @@ -316,7 +338,7 @@ static int scmi_reset_protocol_init(const struct scmi_protocol_handle *ph) for (domain = 0; domain < pinfo->num_domains; domain++) { struct reset_dom_info *dom = pinfo->dom_info + domain; - scmi_reset_domain_attributes_get(ph, domain, dom); + scmi_reset_domain_attributes_get(ph, domain, dom, version); } pinfo->version = version; diff --git a/drivers/firmware/arm_scmi/scmi_pm_domain.c b/drivers/firmware/arm_scmi/scmi_pm_domain.c index 581d34c95769..0e05a79de82d 100644 --- a/drivers/firmware/arm_scmi/scmi_pm_domain.c +++ b/drivers/firmware/arm_scmi/scmi_pm_domain.c @@ -8,7 +8,6 @@ #include <linux/err.h> #include <linux/io.h> #include <linux/module.h> -#include <linux/pm_clock.h> #include <linux/pm_domain.h> #include <linux/scmi_protocol.h> @@ -53,27 +52,6 @@ static int scmi_pd_power_off(struct generic_pm_domain *domain) return scmi_pd_power(domain, false); } -static int scmi_pd_attach_dev(struct generic_pm_domain *pd, struct device *dev) -{ - int ret; - - ret = pm_clk_create(dev); - if (ret) - return ret; - - ret = of_pm_clk_add_clks(dev); - if (ret >= 0) - return 0; - - pm_clk_destroy(dev); - return ret; -} - -static void scmi_pd_detach_dev(struct generic_pm_domain *pd, struct device *dev) -{ - pm_clk_destroy(dev); -} - static int scmi_pm_domain_probe(struct scmi_device *sdev) { int num_domains, i; @@ -124,10 +102,6 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev) scmi_pd->genpd.name = scmi_pd->name; scmi_pd->genpd.power_off = scmi_pd_power_off; scmi_pd->genpd.power_on = scmi_pd_power_on; - scmi_pd->genpd.attach_dev = scmi_pd_attach_dev; - scmi_pd->genpd.detach_dev = scmi_pd_detach_dev; - scmi_pd->genpd.flags = GENPD_FLAG_PM_CLK | - GENPD_FLAG_ACTIVE_WAKEUP; pm_genpd_init(&scmi_pd->genpd, NULL, state == SCMI_POWER_STATE_GENERIC_OFF); @@ -138,9 +112,28 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev) scmi_pd_data->domains = domains; scmi_pd_data->num_domains = num_domains; + dev_set_drvdata(dev, scmi_pd_data); + return of_genpd_add_provider_onecell(np, scmi_pd_data); } +static void scmi_pm_domain_remove(struct scmi_device *sdev) +{ + int i; + struct genpd_onecell_data *scmi_pd_data; + struct device *dev = &sdev->dev; + struct device_node *np = dev->of_node; + + of_genpd_del_provider(np); + + scmi_pd_data = dev_get_drvdata(dev); + for (i = 0; i < scmi_pd_data->num_domains; i++) { + if (!scmi_pd_data->domains[i]) + continue; + pm_genpd_remove(scmi_pd_data->domains[i]); + } +} + static const struct scmi_device_id scmi_id_table[] = { { SCMI_PROTOCOL_POWER, "genpd" }, { }, @@ -150,6 +143,7 @@ MODULE_DEVICE_TABLE(scmi, scmi_id_table); static struct scmi_driver scmi_power_domain_driver = { .name = "scmi-power-domain", .probe = scmi_pm_domain_probe, + .remove = scmi_pm_domain_remove, .id_table = scmi_id_table, }; module_scmi_driver(scmi_power_domain_driver); diff --git a/drivers/firmware/arm_scmi/scmi_power_control.c b/drivers/firmware/arm_scmi/scmi_power_control.c new file mode 100644 index 000000000000..6eb7d2a4b6b1 --- /dev/null +++ b/drivers/firmware/arm_scmi/scmi_power_control.c @@ -0,0 +1,362 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SCMI Generic SystemPower Control driver. + * + * Copyright (C) 2020-2022 ARM Ltd. + */ +/* + * In order to handle platform originated SCMI SystemPower requests (like + * shutdowns or cold/warm resets) we register an SCMI Notification notifier + * block to react when such SCMI SystemPower events are emitted by platform. + * + * Once such a notification is received we act accordingly to perform the + * required system transition depending on the kind of request. + * + * Graceful requests are routed to userspace through the same API methods + * (orderly_poweroff/reboot()) used by ACPI when handling ACPI Shutdown bus + * events. + * + * Direct forceful requests are not supported since are not meant to be sent + * by the SCMI platform to an OSPM like Linux. + * + * Additionally, graceful request notifications can carry an optional timeout + * field stating the maximum amount of time allowed by the platform for + * completion after which they are converted to forceful ones: the assumption + * here is that even graceful requests can be upper-bound by a maximum final + * timeout strictly enforced by the platform itself which can ultimately cut + * the power off at will anytime; in order to avoid such extreme scenario, we + * track progress of graceful requests through the means of a reboot notifier + * converting timed-out graceful requests to forceful ones, so at least we + * try to perform a clean sync and shutdown/restart before the power is cut. + * + * Given the peculiar nature of SCMI SystemPower protocol, that is being in + * charge of triggering system wide shutdown/reboot events, there should be + * only one SCMI platform actively emitting SystemPower events. + * For this reason the SCMI core takes care to enforce the creation of one + * single unique device associated to the SCMI System Power protocol; no matter + * how many SCMI platforms are defined on the system, only one can be designated + * to support System Power: as a consequence this driver will never be probed + * more than once. + * + * For similar reasons as soon as the first valid SystemPower is received by + * this driver and the shutdown/reboot is started, any further notification + * possibly emitted by the platform will be ignored. + */ + +#include <linux/math.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/printk.h> +#include <linux/reboot.h> +#include <linux/scmi_protocol.h> +#include <linux/slab.h> +#include <linux/time64.h> +#include <linux/timer.h> +#include <linux/types.h> +#include <linux/workqueue.h> + +#ifndef MODULE +#include <linux/fs.h> +#endif + +enum scmi_syspower_state { + SCMI_SYSPOWER_IDLE, + SCMI_SYSPOWER_IN_PROGRESS, + SCMI_SYSPOWER_REBOOTING +}; + +/** + * struct scmi_syspower_conf - Common configuration + * + * @dev: A reference device + * @state: Current SystemPower state + * @state_mtx: @state related mutex + * @required_transition: The requested transition as decribed in the received + * SCMI SystemPower notification + * @userspace_nb: The notifier_block registered against the SCMI SystemPower + * notification to start the needed userspace interactions. + * @reboot_nb: A notifier_block optionally used to track reboot progress + * @forceful_work: A worker used to trigger a forceful transition once a + * graceful has timed out. + */ +struct scmi_syspower_conf { + struct device *dev; + enum scmi_syspower_state state; + /* Protect access to state */ + struct mutex state_mtx; + enum scmi_system_events required_transition; + + struct notifier_block userspace_nb; + struct notifier_block reboot_nb; + + struct delayed_work forceful_work; +}; + +#define userspace_nb_to_sconf(x) \ + container_of(x, struct scmi_syspower_conf, userspace_nb) + +#define reboot_nb_to_sconf(x) \ + container_of(x, struct scmi_syspower_conf, reboot_nb) + +#define dwork_to_sconf(x) \ + container_of(x, struct scmi_syspower_conf, forceful_work) + +/** + * scmi_reboot_notifier - A reboot notifier to catch an ongoing successful + * system transition + * @nb: Reference to the related notifier block + * @reason: The reason for the ongoing reboot + * @__unused: The cmd being executed on a restart request (unused) + * + * When an ongoing system transition is detected, compatible with the one + * requested by SCMI, cancel the delayed work. + * + * Return: NOTIFY_OK in any case + */ +static int scmi_reboot_notifier(struct notifier_block *nb, + unsigned long reason, void *__unused) +{ + struct scmi_syspower_conf *sc = reboot_nb_to_sconf(nb); + + mutex_lock(&sc->state_mtx); + switch (reason) { + case SYS_HALT: + case SYS_POWER_OFF: + if (sc->required_transition == SCMI_SYSTEM_SHUTDOWN) + sc->state = SCMI_SYSPOWER_REBOOTING; + break; + case SYS_RESTART: + if (sc->required_transition == SCMI_SYSTEM_COLDRESET || + sc->required_transition == SCMI_SYSTEM_WARMRESET) + sc->state = SCMI_SYSPOWER_REBOOTING; + break; + default: + break; + } + + if (sc->state == SCMI_SYSPOWER_REBOOTING) { + dev_dbg(sc->dev, "Reboot in progress...cancel delayed work.\n"); + cancel_delayed_work_sync(&sc->forceful_work); + } + mutex_unlock(&sc->state_mtx); + + return NOTIFY_OK; +} + +/** + * scmi_request_forceful_transition - Request forceful SystemPower transition + * @sc: A reference to the configuration data + * + * Initiates the required SystemPower transition without involving userspace: + * just trigger the action at the kernel level after issuing an emergency + * sync. (if possible at all) + */ +static inline void +scmi_request_forceful_transition(struct scmi_syspower_conf *sc) +{ + dev_dbg(sc->dev, "Serving forceful request:%d\n", + sc->required_transition); + +#ifndef MODULE + emergency_sync(); +#endif + switch (sc->required_transition) { + case SCMI_SYSTEM_SHUTDOWN: + kernel_power_off(); + break; + case SCMI_SYSTEM_COLDRESET: + case SCMI_SYSTEM_WARMRESET: + kernel_restart(NULL); + break; + default: + break; + } +} + +static void scmi_forceful_work_func(struct work_struct *work) +{ + struct scmi_syspower_conf *sc; + struct delayed_work *dwork; + + if (system_state > SYSTEM_RUNNING) + return; + + dwork = to_delayed_work(work); + sc = dwork_to_sconf(dwork); + + dev_dbg(sc->dev, "Graceful request timed out...forcing !\n"); + mutex_lock(&sc->state_mtx); + /* avoid deadlock by unregistering reboot notifier first */ + unregister_reboot_notifier(&sc->reboot_nb); + if (sc->state == SCMI_SYSPOWER_IN_PROGRESS) + scmi_request_forceful_transition(sc); + mutex_unlock(&sc->state_mtx); +} + +/** + * scmi_request_graceful_transition - Request graceful SystemPower transition + * @sc: A reference to the configuration data + * @timeout_ms: The desired timeout to wait for the shutdown to complete before + * system is forcibly shutdown. + * + * Initiates the required SystemPower transition, requesting userspace + * co-operation: it uses the same orderly_ methods used by ACPI Shutdown event + * processing. + * + * Takes care also to register a reboot notifier and to schedule a delayed work + * in order to detect if userspace actions are taking too long and in such a + * case to trigger a forceful transition. + */ +static void scmi_request_graceful_transition(struct scmi_syspower_conf *sc, + unsigned int timeout_ms) +{ + unsigned int adj_timeout_ms = 0; + + if (timeout_ms) { + int ret; + + sc->reboot_nb.notifier_call = &scmi_reboot_notifier; + ret = register_reboot_notifier(&sc->reboot_nb); + if (!ret) { + /* Wait only up to 75% of the advertised timeout */ + adj_timeout_ms = mult_frac(timeout_ms, 3, 4); + INIT_DELAYED_WORK(&sc->forceful_work, + scmi_forceful_work_func); + schedule_delayed_work(&sc->forceful_work, + msecs_to_jiffies(adj_timeout_ms)); + } else { + /* Carry on best effort even without a reboot notifier */ + dev_warn(sc->dev, + "Cannot register reboot notifier !\n"); + } + } + + dev_dbg(sc->dev, + "Serving graceful req:%d (timeout_ms:%u adj_timeout_ms:%u)\n", + sc->required_transition, timeout_ms, adj_timeout_ms); + + switch (sc->required_transition) { + case SCMI_SYSTEM_SHUTDOWN: + /* + * When triggered early at boot-time the 'orderly' call will + * partially fail due to the lack of userspace itself, but + * the force=true argument will start anyway a successful + * forced shutdown. + */ + orderly_poweroff(true); + break; + case SCMI_SYSTEM_COLDRESET: + case SCMI_SYSTEM_WARMRESET: + orderly_reboot(); + break; + default: + break; + } +} + +/** + * scmi_userspace_notifier - Notifier callback to act on SystemPower + * Notifications + * @nb: Reference to the related notifier block + * @event: The SystemPower notification event id + * @data: The SystemPower event report + * + * This callback is in charge of decoding the received SystemPower report + * and act accordingly triggering a graceful or forceful system transition. + * + * Note that once a valid SCMI SystemPower event starts being served, any + * other following SystemPower notification received from the same SCMI + * instance (handle) will be ignored. + * + * Return: NOTIFY_OK once a valid SystemPower event has been successfully + * processed. + */ +static int scmi_userspace_notifier(struct notifier_block *nb, + unsigned long event, void *data) +{ + struct scmi_system_power_state_notifier_report *er = data; + struct scmi_syspower_conf *sc = userspace_nb_to_sconf(nb); + + if (er->system_state >= SCMI_SYSTEM_POWERUP) { + dev_err(sc->dev, "Ignoring unsupported system_state: 0x%X\n", + er->system_state); + return NOTIFY_DONE; + } + + if (!SCMI_SYSPOWER_IS_REQUEST_GRACEFUL(er->flags)) { + dev_err(sc->dev, "Ignoring forceful notification.\n"); + return NOTIFY_DONE; + } + + /* + * Bail out if system is already shutting down or an SCMI SystemPower + * requested is already being served. + */ + if (system_state > SYSTEM_RUNNING) + return NOTIFY_DONE; + mutex_lock(&sc->state_mtx); + if (sc->state != SCMI_SYSPOWER_IDLE) { + dev_dbg(sc->dev, + "Transition already in progress...ignore.\n"); + mutex_unlock(&sc->state_mtx); + return NOTIFY_DONE; + } + sc->state = SCMI_SYSPOWER_IN_PROGRESS; + mutex_unlock(&sc->state_mtx); + + sc->required_transition = er->system_state; + + /* Leaving a trace in logs of who triggered the shutdown/reboot. */ + dev_info(sc->dev, "Serving shutdown/reboot request: %d\n", + sc->required_transition); + + scmi_request_graceful_transition(sc, er->timeout); + + return NOTIFY_OK; +} + +static int scmi_syspower_probe(struct scmi_device *sdev) +{ + int ret; + struct scmi_syspower_conf *sc; + struct scmi_handle *handle = sdev->handle; + + if (!handle) + return -ENODEV; + + ret = handle->devm_protocol_acquire(sdev, SCMI_PROTOCOL_SYSTEM); + if (ret) + return ret; + + sc = devm_kzalloc(&sdev->dev, sizeof(*sc), GFP_KERNEL); + if (!sc) + return -ENOMEM; + + sc->state = SCMI_SYSPOWER_IDLE; + mutex_init(&sc->state_mtx); + sc->required_transition = SCMI_SYSTEM_MAX; + sc->userspace_nb.notifier_call = &scmi_userspace_notifier; + sc->dev = &sdev->dev; + + return handle->notify_ops->devm_event_notifier_register(sdev, + SCMI_PROTOCOL_SYSTEM, + SCMI_EVENT_SYSTEM_POWER_STATE_NOTIFIER, + NULL, &sc->userspace_nb); +} + +static const struct scmi_device_id scmi_id_table[] = { + { SCMI_PROTOCOL_SYSTEM, "syspower" }, + { }, +}; +MODULE_DEVICE_TABLE(scmi, scmi_id_table); + +static struct scmi_driver scmi_system_power_driver = { + .name = "scmi-system-power", + .probe = scmi_syspower_probe, + .id_table = scmi_id_table, +}; +module_scmi_driver(scmi_system_power_driver); + +MODULE_AUTHOR("Cristian Marussi <cristian.marussi@arm.com>"); +MODULE_DESCRIPTION("ARM SCMI SystemPower Control driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c index cdbb287bd8bc..0b5853fa9d87 100644 --- a/drivers/firmware/arm_scmi/sensors.c +++ b/drivers/firmware/arm_scmi/sensors.c @@ -2,7 +2,7 @@ /* * System Control and Management Interface (SCMI) Sensor Protocol * - * Copyright (C) 2018-2021 ARM Ltd. + * Copyright (C) 2018-2022 ARM Ltd. */ #define pr_fmt(fmt) "SCMI Notifications SENSOR - " fmt @@ -11,7 +11,7 @@ #include <linux/module.h> #include <linux/scmi_protocol.h> -#include "common.h" +#include "protocols.h" #include "notify.h" #define SCMI_MAX_NUM_SENSOR_AXIS 63 @@ -27,6 +27,8 @@ enum scmi_sensor_protocol_cmd { SENSOR_CONFIG_GET = 0x9, SENSOR_CONFIG_SET = 0xA, SENSOR_CONTINUOUS_UPDATE_NOTIFY = 0xB, + SENSOR_NAME_GET = 0xC, + SENSOR_AXIS_NAME_GET = 0xD, }; struct scmi_msg_resp_sensor_attributes { @@ -63,6 +65,10 @@ struct scmi_msg_resp_attrs { __le32 max_range_high; }; +struct scmi_msg_sensor_description { + __le32 desc_index; +}; + struct scmi_msg_resp_sensor_description { __le16 num_returned; __le16 num_remaining; @@ -71,6 +77,7 @@ struct scmi_msg_resp_sensor_description { __le32 attributes_low; /* Common attributes_low macros */ #define SUPPORTS_ASYNC_READ(x) FIELD_GET(BIT(31), (x)) +#define SUPPORTS_EXTENDED_NAMES(x) FIELD_GET(BIT(29), (x)) #define NUM_TRIP_POINTS(x) FIELD_GET(GENMASK(7, 0), (x)) __le32 attributes_high; /* Common attributes_high macros */ @@ -78,7 +85,7 @@ struct scmi_msg_resp_sensor_description { #define SENSOR_SCALE_SIGN BIT(4) #define SENSOR_SCALE_EXTEND GENMASK(31, 5) #define SENSOR_TYPE(x) FIELD_GET(GENMASK(7, 0), (x)) - u8 name[SCMI_MAX_STR_SIZE]; + u8 name[SCMI_SHORT_NAME_MAX_SIZE]; /* only for version > 2.0 */ __le32 power; __le32 resolution; @@ -111,13 +118,22 @@ struct scmi_msg_resp_sensor_axis_description { struct scmi_axis_descriptor { __le32 id; __le32 attributes_low; +#define SUPPORTS_EXTENDED_AXIS_NAMES(x) FIELD_GET(BIT(9), (x)) __le32 attributes_high; - u8 name[SCMI_MAX_STR_SIZE]; + u8 name[SCMI_SHORT_NAME_MAX_SIZE]; __le32 resolution; struct scmi_msg_resp_attrs attrs; } desc[]; }; +struct scmi_msg_resp_sensor_axis_names_description { + __le32 num_axis_flags; + struct scmi_sensor_axis_name_descriptor { + __le32 axis_id; + u8 name[SCMI_MAX_STR_SIZE]; + } desc[]; +}; + /* Base scmi_axis_descriptor size excluding extended attrs after name */ #define SCMI_MSG_RESP_AXIS_DESCR_BASE_SZ 28 @@ -231,335 +247,450 @@ static int scmi_sensor_attributes_get(const struct scmi_protocol_handle *ph, } static inline void scmi_parse_range_attrs(struct scmi_range_attrs *out, - struct scmi_msg_resp_attrs *in) + const struct scmi_msg_resp_attrs *in) { out->min_range = get_unaligned_le64((void *)&in->min_range_low); out->max_range = get_unaligned_le64((void *)&in->max_range_low); } -static int scmi_sensor_update_intervals(const struct scmi_protocol_handle *ph, - struct scmi_sensor_info *s) -{ - int ret, cnt; - u32 desc_index = 0; - u16 num_returned, num_remaining; - struct scmi_xfer *ti; - struct scmi_msg_resp_sensor_list_update_intervals *buf; - struct scmi_msg_sensor_list_update_intervals *msg; - - ret = ph->xops->xfer_get_init(ph, SENSOR_LIST_UPDATE_INTERVALS, - sizeof(*msg), 0, &ti); - if (ret) - return ret; - - buf = ti->rx.buf; - do { - u32 flags; - - msg = ti->tx.buf; - /* Set the number of sensors to be skipped/already read */ - msg->id = cpu_to_le32(s->id); - msg->index = cpu_to_le32(desc_index); +struct scmi_sens_ipriv { + void *priv; + struct device *dev; +}; - ret = ph->xops->do_xfer(ph, ti); - if (ret) - break; +static void iter_intervals_prepare_message(void *message, + unsigned int desc_index, + const void *p) +{ + struct scmi_msg_sensor_list_update_intervals *msg = message; + const struct scmi_sensor_info *s; - flags = le32_to_cpu(buf->num_intervals_flags); - num_returned = NUM_INTERVALS_RETURNED(flags); - num_remaining = NUM_INTERVALS_REMAINING(flags); + s = ((const struct scmi_sens_ipriv *)p)->priv; + /* Set the number of sensors to be skipped/already read */ + msg->id = cpu_to_le32(s->id); + msg->index = cpu_to_le32(desc_index); +} - /* - * Max intervals is not declared previously anywhere so we - * assume it's returned+remaining. - */ - if (!s->intervals.count) { - s->intervals.segmented = SEGMENTED_INTVL_FORMAT(flags); - s->intervals.count = num_returned + num_remaining; - /* segmented intervals are reported in one triplet */ - if (s->intervals.segmented && - (num_remaining || num_returned != 3)) { - dev_err(ph->dev, - "Sensor ID:%d advertises an invalid segmented interval (%d)\n", - s->id, s->intervals.count); +static int iter_intervals_update_state(struct scmi_iterator_state *st, + const void *response, void *p) +{ + u32 flags; + struct scmi_sensor_info *s = ((struct scmi_sens_ipriv *)p)->priv; + struct device *dev = ((struct scmi_sens_ipriv *)p)->dev; + const struct scmi_msg_resp_sensor_list_update_intervals *r = response; + + flags = le32_to_cpu(r->num_intervals_flags); + st->num_returned = NUM_INTERVALS_RETURNED(flags); + st->num_remaining = NUM_INTERVALS_REMAINING(flags); + + /* + * Max intervals is not declared previously anywhere so we + * assume it's returned+remaining on first call. + */ + if (!st->max_resources) { + s->intervals.segmented = SEGMENTED_INTVL_FORMAT(flags); + s->intervals.count = st->num_returned + st->num_remaining; + /* segmented intervals are reported in one triplet */ + if (s->intervals.segmented && + (st->num_remaining || st->num_returned != 3)) { + dev_err(dev, + "Sensor ID:%d advertises an invalid segmented interval (%d)\n", + s->id, s->intervals.count); + s->intervals.segmented = false; + s->intervals.count = 0; + return -EINVAL; + } + /* Direct allocation when exceeding pre-allocated */ + if (s->intervals.count >= SCMI_MAX_PREALLOC_POOL) { + s->intervals.desc = + devm_kcalloc(dev, + s->intervals.count, + sizeof(*s->intervals.desc), + GFP_KERNEL); + if (!s->intervals.desc) { s->intervals.segmented = false; s->intervals.count = 0; - ret = -EINVAL; - break; + return -ENOMEM; } - /* Direct allocation when exceeding pre-allocated */ - if (s->intervals.count >= SCMI_MAX_PREALLOC_POOL) { - s->intervals.desc = - devm_kcalloc(ph->dev, - s->intervals.count, - sizeof(*s->intervals.desc), - GFP_KERNEL); - if (!s->intervals.desc) { - s->intervals.segmented = false; - s->intervals.count = 0; - ret = -ENOMEM; - break; - } - } - } else if (desc_index + num_returned > s->intervals.count) { - dev_err(ph->dev, - "No. of update intervals can't exceed %d\n", - s->intervals.count); - ret = -EINVAL; - break; } - for (cnt = 0; cnt < num_returned; cnt++) - s->intervals.desc[desc_index + cnt] = - le32_to_cpu(buf->intervals[cnt]); + st->max_resources = s->intervals.count; + } - desc_index += num_returned; + return 0; +} - ph->xops->reset_rx_to_maxsz(ph, ti); - /* - * check for both returned and remaining to avoid infinite - * loop due to buggy firmware - */ - } while (num_returned && num_remaining); +static int +iter_intervals_process_response(const struct scmi_protocol_handle *ph, + const void *response, + struct scmi_iterator_state *st, void *p) +{ + const struct scmi_msg_resp_sensor_list_update_intervals *r = response; + struct scmi_sensor_info *s = ((struct scmi_sens_ipriv *)p)->priv; - ph->xops->xfer_put(ph, ti); - return ret; + s->intervals.desc[st->desc_index + st->loop_idx] = + le32_to_cpu(r->intervals[st->loop_idx]); + + return 0; } -static int scmi_sensor_axis_description(const struct scmi_protocol_handle *ph, +static int scmi_sensor_update_intervals(const struct scmi_protocol_handle *ph, struct scmi_sensor_info *s) { - int ret, cnt; - u32 desc_index = 0; - u16 num_returned, num_remaining; - struct scmi_xfer *te; - struct scmi_msg_resp_sensor_axis_description *buf; - struct scmi_msg_sensor_axis_description_get *msg; + void *iter; + struct scmi_iterator_ops ops = { + .prepare_message = iter_intervals_prepare_message, + .update_state = iter_intervals_update_state, + .process_response = iter_intervals_process_response, + }; + struct scmi_sens_ipriv upriv = { + .priv = s, + .dev = ph->dev, + }; + + iter = ph->hops->iter_response_init(ph, &ops, s->intervals.count, + SENSOR_LIST_UPDATE_INTERVALS, + sizeof(struct scmi_msg_sensor_list_update_intervals), + &upriv); + if (IS_ERR(iter)) + return PTR_ERR(iter); + + return ph->hops->iter_response_run(iter); +} - s->axis = devm_kcalloc(ph->dev, s->num_axis, - sizeof(*s->axis), GFP_KERNEL); - if (!s->axis) - return -ENOMEM; +struct scmi_apriv { + bool any_axes_support_extended_names; + struct scmi_sensor_info *s; +}; - ret = ph->xops->xfer_get_init(ph, SENSOR_AXIS_DESCRIPTION_GET, - sizeof(*msg), 0, &te); - if (ret) - return ret; +static void iter_axes_desc_prepare_message(void *message, + const unsigned int desc_index, + const void *priv) +{ + struct scmi_msg_sensor_axis_description_get *msg = message; + const struct scmi_apriv *apriv = priv; - buf = te->rx.buf; - do { - u32 flags; - struct scmi_axis_descriptor *adesc; + /* Set the number of sensors to be skipped/already read */ + msg->id = cpu_to_le32(apriv->s->id); + msg->axis_desc_index = cpu_to_le32(desc_index); +} - msg = te->tx.buf; - /* Set the number of sensors to be skipped/already read */ - msg->id = cpu_to_le32(s->id); - msg->axis_desc_index = cpu_to_le32(desc_index); +static int +iter_axes_desc_update_state(struct scmi_iterator_state *st, + const void *response, void *priv) +{ + u32 flags; + const struct scmi_msg_resp_sensor_axis_description *r = response; - ret = ph->xops->do_xfer(ph, te); - if (ret) - break; + flags = le32_to_cpu(r->num_axis_flags); + st->num_returned = NUM_AXIS_RETURNED(flags); + st->num_remaining = NUM_AXIS_REMAINING(flags); + st->priv = (void *)&r->desc[0]; - flags = le32_to_cpu(buf->num_axis_flags); - num_returned = NUM_AXIS_RETURNED(flags); - num_remaining = NUM_AXIS_REMAINING(flags); + return 0; +} - if (desc_index + num_returned > s->num_axis) { - dev_err(ph->dev, "No. of axis can't exceed %d\n", - s->num_axis); - break; - } +static int +iter_axes_desc_process_response(const struct scmi_protocol_handle *ph, + const void *response, + struct scmi_iterator_state *st, void *priv) +{ + u32 attrh, attrl; + struct scmi_sensor_axis_info *a; + size_t dsize = SCMI_MSG_RESP_AXIS_DESCR_BASE_SZ; + struct scmi_apriv *apriv = priv; + const struct scmi_axis_descriptor *adesc = st->priv; + + attrl = le32_to_cpu(adesc->attributes_low); + if (SUPPORTS_EXTENDED_AXIS_NAMES(attrl)) + apriv->any_axes_support_extended_names = true; + + a = &apriv->s->axis[st->desc_index + st->loop_idx]; + a->id = le32_to_cpu(adesc->id); + a->extended_attrs = SUPPORTS_EXTEND_ATTRS(attrl); + + attrh = le32_to_cpu(adesc->attributes_high); + a->scale = S32_EXT(SENSOR_SCALE(attrh)); + a->type = SENSOR_TYPE(attrh); + strscpy(a->name, adesc->name, SCMI_SHORT_NAME_MAX_SIZE); + + if (a->extended_attrs) { + unsigned int ares = le32_to_cpu(adesc->resolution); + + a->resolution = SENSOR_RES(ares); + a->exponent = S32_EXT(SENSOR_RES_EXP(ares)); + dsize += sizeof(adesc->resolution); + + scmi_parse_range_attrs(&a->attrs, &adesc->attrs); + dsize += sizeof(adesc->attrs); + } + st->priv = ((u8 *)adesc + dsize); - adesc = &buf->desc[0]; - for (cnt = 0; cnt < num_returned; cnt++) { - u32 attrh, attrl; - struct scmi_sensor_axis_info *a; - size_t dsize = SCMI_MSG_RESP_AXIS_DESCR_BASE_SZ; + return 0; +} - attrl = le32_to_cpu(adesc->attributes_low); +static int +iter_axes_extended_name_update_state(struct scmi_iterator_state *st, + const void *response, void *priv) +{ + u32 flags; + const struct scmi_msg_resp_sensor_axis_names_description *r = response; - a = &s->axis[desc_index + cnt]; + flags = le32_to_cpu(r->num_axis_flags); + st->num_returned = NUM_AXIS_RETURNED(flags); + st->num_remaining = NUM_AXIS_REMAINING(flags); + st->priv = (void *)&r->desc[0]; - a->id = le32_to_cpu(adesc->id); - a->extended_attrs = SUPPORTS_EXTEND_ATTRS(attrl); + return 0; +} - attrh = le32_to_cpu(adesc->attributes_high); - a->scale = S32_EXT(SENSOR_SCALE(attrh)); - a->type = SENSOR_TYPE(attrh); - strlcpy(a->name, adesc->name, SCMI_MAX_STR_SIZE); +static int +iter_axes_extended_name_process_response(const struct scmi_protocol_handle *ph, + const void *response, + struct scmi_iterator_state *st, + void *priv) +{ + struct scmi_sensor_axis_info *a; + const struct scmi_apriv *apriv = priv; + struct scmi_sensor_axis_name_descriptor *adesc = st->priv; + u32 axis_id = le32_to_cpu(adesc->axis_id); + + if (axis_id >= st->max_resources) + return -EPROTO; + + /* + * Pick the corresponding descriptor based on the axis_id embedded + * in the reply since the list of axes supporting extended names + * can be a subset of all the axes. + */ + a = &apriv->s->axis[axis_id]; + strscpy(a->name, adesc->name, SCMI_MAX_STR_SIZE); + st->priv = ++adesc; + + return 0; +} - if (a->extended_attrs) { - unsigned int ares = - le32_to_cpu(adesc->resolution); +static int +scmi_sensor_axis_extended_names_get(const struct scmi_protocol_handle *ph, + struct scmi_sensor_info *s) +{ + int ret; + void *iter; + struct scmi_iterator_ops ops = { + .prepare_message = iter_axes_desc_prepare_message, + .update_state = iter_axes_extended_name_update_state, + .process_response = iter_axes_extended_name_process_response, + }; + struct scmi_apriv apriv = { + .any_axes_support_extended_names = false, + .s = s, + }; + + iter = ph->hops->iter_response_init(ph, &ops, s->num_axis, + SENSOR_AXIS_NAME_GET, + sizeof(struct scmi_msg_sensor_axis_description_get), + &apriv); + if (IS_ERR(iter)) + return PTR_ERR(iter); + + /* + * Do not cause whole protocol initialization failure when failing to + * get extended names for axes. + */ + ret = ph->hops->iter_response_run(iter); + if (ret) + dev_warn(ph->dev, + "Failed to get axes extended names for %s (ret:%d).\n", + s->name, ret); - a->resolution = SENSOR_RES(ares); - a->exponent = - S32_EXT(SENSOR_RES_EXP(ares)); - dsize += sizeof(adesc->resolution); + return 0; +} - scmi_parse_range_attrs(&a->attrs, - &adesc->attrs); - dsize += sizeof(adesc->attrs); - } +static int scmi_sensor_axis_description(const struct scmi_protocol_handle *ph, + struct scmi_sensor_info *s, + u32 version) +{ + int ret; + void *iter; + struct scmi_iterator_ops ops = { + .prepare_message = iter_axes_desc_prepare_message, + .update_state = iter_axes_desc_update_state, + .process_response = iter_axes_desc_process_response, + }; + struct scmi_apriv apriv = { + .any_axes_support_extended_names = false, + .s = s, + }; - adesc = (typeof(adesc))((u8 *)adesc + dsize); - } + s->axis = devm_kcalloc(ph->dev, s->num_axis, + sizeof(*s->axis), GFP_KERNEL); + if (!s->axis) + return -ENOMEM; - desc_index += num_returned; + iter = ph->hops->iter_response_init(ph, &ops, s->num_axis, + SENSOR_AXIS_DESCRIPTION_GET, + sizeof(struct scmi_msg_sensor_axis_description_get), + &apriv); + if (IS_ERR(iter)) + return PTR_ERR(iter); - ph->xops->reset_rx_to_maxsz(ph, te); - /* - * check for both returned and remaining to avoid infinite - * loop due to buggy firmware - */ - } while (num_returned && num_remaining); + ret = ph->hops->iter_response_run(iter); + if (ret) + return ret; + + if (PROTOCOL_REV_MAJOR(version) >= 0x3 && + apriv.any_axes_support_extended_names) + ret = scmi_sensor_axis_extended_names_get(ph, s); - ph->xops->xfer_put(ph, te); return ret; } -static int scmi_sensor_description_get(const struct scmi_protocol_handle *ph, - struct sensors_info *si) +static void iter_sens_descr_prepare_message(void *message, + unsigned int desc_index, + const void *priv) { - int ret, cnt; - u32 desc_index = 0; - u16 num_returned, num_remaining; - struct scmi_xfer *t; - struct scmi_msg_resp_sensor_description *buf; - - ret = ph->xops->xfer_get_init(ph, SENSOR_DESCRIPTION_GET, - sizeof(__le32), 0, &t); - if (ret) - return ret; - - buf = t->rx.buf; + struct scmi_msg_sensor_description *msg = message; - do { - struct scmi_sensor_descriptor *sdesc; - - /* Set the number of sensors to be skipped/already read */ - put_unaligned_le32(desc_index, t->tx.buf); + msg->desc_index = cpu_to_le32(desc_index); +} - ret = ph->xops->do_xfer(ph, t); - if (ret) - break; +static int iter_sens_descr_update_state(struct scmi_iterator_state *st, + const void *response, void *priv) +{ + const struct scmi_msg_resp_sensor_description *r = response; - num_returned = le16_to_cpu(buf->num_returned); - num_remaining = le16_to_cpu(buf->num_remaining); + st->num_returned = le16_to_cpu(r->num_returned); + st->num_remaining = le16_to_cpu(r->num_remaining); + st->priv = (void *)&r->desc[0]; - if (desc_index + num_returned > si->num_sensors) { - dev_err(ph->dev, "No. of sensors can't exceed %d", - si->num_sensors); - break; - } + return 0; +} - sdesc = &buf->desc[0]; - for (cnt = 0; cnt < num_returned; cnt++) { - u32 attrh, attrl; - struct scmi_sensor_info *s; - size_t dsize = SCMI_MSG_RESP_SENS_DESCR_BASE_SZ; - - s = &si->sensors[desc_index + cnt]; - s->id = le32_to_cpu(sdesc->id); - - attrl = le32_to_cpu(sdesc->attributes_low); - /* common bitfields parsing */ - s->async = SUPPORTS_ASYNC_READ(attrl); - s->num_trip_points = NUM_TRIP_POINTS(attrl); - /** - * only SCMIv3.0 specific bitfield below. - * Such bitfields are assumed to be zeroed on non - * relevant fw versions...assuming fw not buggy ! - */ - s->update = SUPPORTS_UPDATE_NOTIFY(attrl); - s->timestamped = SUPPORTS_TIMESTAMP(attrl); - if (s->timestamped) - s->tstamp_scale = - S32_EXT(SENSOR_TSTAMP_EXP(attrl)); - s->extended_scalar_attrs = - SUPPORTS_EXTEND_ATTRS(attrl); - - attrh = le32_to_cpu(sdesc->attributes_high); - /* common bitfields parsing */ - s->scale = S32_EXT(SENSOR_SCALE(attrh)); - s->type = SENSOR_TYPE(attrh); - /* Use pre-allocated pool wherever possible */ - s->intervals.desc = s->intervals.prealloc_pool; - if (si->version == SCMIv2_SENSOR_PROTOCOL) { - s->intervals.segmented = false; - s->intervals.count = 1; - /* - * Convert SCMIv2.0 update interval format to - * SCMIv3.0 to be used as the common exposed - * descriptor, accessible via common macros. - */ - s->intervals.desc[0] = - (SENSOR_UPDATE_BASE(attrh) << 5) | - SENSOR_UPDATE_SCALE(attrh); - } else { - /* - * From SCMIv3.0 update intervals are retrieved - * via a dedicated (optional) command. - * Since the command is optional, on error carry - * on without any update interval. - */ - if (scmi_sensor_update_intervals(ph, s)) - dev_dbg(ph->dev, - "Update Intervals not available for sensor ID:%d\n", - s->id); - } - /** - * only > SCMIv2.0 specific bitfield below. - * Such bitfields are assumed to be zeroed on non - * relevant fw versions...assuming fw not buggy ! - */ - s->num_axis = min_t(unsigned int, - SUPPORTS_AXIS(attrh) ? - SENSOR_AXIS_NUMBER(attrh) : 0, - SCMI_MAX_NUM_SENSOR_AXIS); - strlcpy(s->name, sdesc->name, SCMI_MAX_STR_SIZE); - - if (s->extended_scalar_attrs) { - s->sensor_power = le32_to_cpu(sdesc->power); - dsize += sizeof(sdesc->power); - /* Only for sensors reporting scalar values */ - if (s->num_axis == 0) { - unsigned int sres = - le32_to_cpu(sdesc->resolution); - - s->resolution = SENSOR_RES(sres); - s->exponent = - S32_EXT(SENSOR_RES_EXP(sres)); - dsize += sizeof(sdesc->resolution); - - scmi_parse_range_attrs(&s->scalar_attrs, - &sdesc->scalar_attrs); - dsize += sizeof(sdesc->scalar_attrs); - } - } - if (s->num_axis > 0) { - ret = scmi_sensor_axis_description(ph, s); - if (ret) - goto out; - } +static int +iter_sens_descr_process_response(const struct scmi_protocol_handle *ph, + const void *response, + struct scmi_iterator_state *st, void *priv) - sdesc = (typeof(sdesc))((u8 *)sdesc + dsize); +{ + int ret = 0; + u32 attrh, attrl; + size_t dsize = SCMI_MSG_RESP_SENS_DESCR_BASE_SZ; + struct scmi_sensor_info *s; + struct sensors_info *si = priv; + const struct scmi_sensor_descriptor *sdesc = st->priv; + + s = &si->sensors[st->desc_index + st->loop_idx]; + s->id = le32_to_cpu(sdesc->id); + + attrl = le32_to_cpu(sdesc->attributes_low); + /* common bitfields parsing */ + s->async = SUPPORTS_ASYNC_READ(attrl); + s->num_trip_points = NUM_TRIP_POINTS(attrl); + /** + * only SCMIv3.0 specific bitfield below. + * Such bitfields are assumed to be zeroed on non + * relevant fw versions...assuming fw not buggy ! + */ + s->update = SUPPORTS_UPDATE_NOTIFY(attrl); + s->timestamped = SUPPORTS_TIMESTAMP(attrl); + if (s->timestamped) + s->tstamp_scale = S32_EXT(SENSOR_TSTAMP_EXP(attrl)); + s->extended_scalar_attrs = SUPPORTS_EXTEND_ATTRS(attrl); + + attrh = le32_to_cpu(sdesc->attributes_high); + /* common bitfields parsing */ + s->scale = S32_EXT(SENSOR_SCALE(attrh)); + s->type = SENSOR_TYPE(attrh); + /* Use pre-allocated pool wherever possible */ + s->intervals.desc = s->intervals.prealloc_pool; + if (si->version == SCMIv2_SENSOR_PROTOCOL) { + s->intervals.segmented = false; + s->intervals.count = 1; + /* + * Convert SCMIv2.0 update interval format to + * SCMIv3.0 to be used as the common exposed + * descriptor, accessible via common macros. + */ + s->intervals.desc[0] = (SENSOR_UPDATE_BASE(attrh) << 5) | + SENSOR_UPDATE_SCALE(attrh); + } else { + /* + * From SCMIv3.0 update intervals are retrieved + * via a dedicated (optional) command. + * Since the command is optional, on error carry + * on without any update interval. + */ + if (scmi_sensor_update_intervals(ph, s)) + dev_dbg(ph->dev, + "Update Intervals not available for sensor ID:%d\n", + s->id); + } + /** + * only > SCMIv2.0 specific bitfield below. + * Such bitfields are assumed to be zeroed on non + * relevant fw versions...assuming fw not buggy ! + */ + s->num_axis = min_t(unsigned int, + SUPPORTS_AXIS(attrh) ? + SENSOR_AXIS_NUMBER(attrh) : 0, + SCMI_MAX_NUM_SENSOR_AXIS); + strscpy(s->name, sdesc->name, SCMI_SHORT_NAME_MAX_SIZE); + + /* + * If supported overwrite short name with the extended + * one; on error just carry on and use already provided + * short name. + */ + if (PROTOCOL_REV_MAJOR(si->version) >= 0x3 && + SUPPORTS_EXTENDED_NAMES(attrl)) + ph->hops->extended_name_get(ph, SENSOR_NAME_GET, s->id, + s->name, SCMI_MAX_STR_SIZE); + + if (s->extended_scalar_attrs) { + s->sensor_power = le32_to_cpu(sdesc->power); + dsize += sizeof(sdesc->power); + + /* Only for sensors reporting scalar values */ + if (s->num_axis == 0) { + unsigned int sres = le32_to_cpu(sdesc->resolution); + + s->resolution = SENSOR_RES(sres); + s->exponent = S32_EXT(SENSOR_RES_EXP(sres)); + dsize += sizeof(sdesc->resolution); + + scmi_parse_range_attrs(&s->scalar_attrs, + &sdesc->scalar_attrs); + dsize += sizeof(sdesc->scalar_attrs); } + } - desc_index += num_returned; + if (s->num_axis > 0) + ret = scmi_sensor_axis_description(ph, s, si->version); - ph->xops->reset_rx_to_maxsz(ph, t); - /* - * check for both returned and remaining to avoid infinite - * loop due to buggy firmware - */ - } while (num_returned && num_remaining); + st->priv = ((u8 *)sdesc + dsize); -out: - ph->xops->xfer_put(ph, t); return ret; } +static int scmi_sensor_description_get(const struct scmi_protocol_handle *ph, + struct sensors_info *si) +{ + void *iter; + struct scmi_iterator_ops ops = { + .prepare_message = iter_sens_descr_prepare_message, + .update_state = iter_sens_descr_update_state, + .process_response = iter_sens_descr_process_response, + }; + + iter = ph->hops->iter_response_init(ph, &ops, si->num_sensors, + SENSOR_DESCRIPTION_GET, + sizeof(__le32), si); + if (IS_ERR(iter)) + return PTR_ERR(iter); + + return ph->hops->iter_response_run(iter); +} + static inline int scmi_sensor_request_notify(const struct scmi_protocol_handle *ph, u32 sensor_id, u8 message_id, bool enable) @@ -631,6 +762,10 @@ static int scmi_sensor_config_get(const struct scmi_protocol_handle *ph, { int ret; struct scmi_xfer *t; + struct sensors_info *si = ph->get_priv(ph); + + if (sensor_id >= si->num_sensors) + return -EINVAL; ret = ph->xops->xfer_get_init(ph, SENSOR_CONFIG_GET, sizeof(__le32), sizeof(__le32), &t); @@ -640,7 +775,6 @@ static int scmi_sensor_config_get(const struct scmi_protocol_handle *ph, put_unaligned_le32(sensor_id, t->tx.buf); ret = ph->xops->do_xfer(ph, t); if (!ret) { - struct sensors_info *si = ph->get_priv(ph); struct scmi_sensor_info *s = si->sensors + sensor_id; *sensor_config = get_unaligned_le64(t->rx.buf); @@ -657,6 +791,10 @@ static int scmi_sensor_config_set(const struct scmi_protocol_handle *ph, int ret; struct scmi_xfer *t; struct scmi_msg_sensor_config_set *msg; + struct sensors_info *si = ph->get_priv(ph); + + if (sensor_id >= si->num_sensors) + return -EINVAL; ret = ph->xops->xfer_get_init(ph, SENSOR_CONFIG_SET, sizeof(*msg), 0, &t); @@ -669,7 +807,6 @@ static int scmi_sensor_config_set(const struct scmi_protocol_handle *ph, ret = ph->xops->do_xfer(ph, t); if (!ret) { - struct sensors_info *si = ph->get_priv(ph); struct scmi_sensor_info *s = si->sensors + sensor_id; s->sensor_config = sensor_config; @@ -700,8 +837,11 @@ static int scmi_sensor_reading_get(const struct scmi_protocol_handle *ph, int ret; struct scmi_xfer *t; struct scmi_msg_sensor_reading_get *sensor; + struct scmi_sensor_info *s; struct sensors_info *si = ph->get_priv(ph); - struct scmi_sensor_info *s = si->sensors + sensor_id; + + if (sensor_id >= si->num_sensors) + return -EINVAL; ret = ph->xops->xfer_get_init(ph, SENSOR_READING_GET, sizeof(*sensor), 0, &t); @@ -710,6 +850,7 @@ static int scmi_sensor_reading_get(const struct scmi_protocol_handle *ph, sensor = t->tx.buf; sensor->id = cpu_to_le32(sensor_id); + s = si->sensors + sensor_id; if (s->async) { sensor->flags = cpu_to_le32(SENSOR_READ_ASYNC); ret = ph->xops->do_xfer_with_response(ph, t); @@ -764,9 +905,13 @@ scmi_sensor_reading_get_timestamped(const struct scmi_protocol_handle *ph, int ret; struct scmi_xfer *t; struct scmi_msg_sensor_reading_get *sensor; + struct scmi_sensor_info *s; struct sensors_info *si = ph->get_priv(ph); - struct scmi_sensor_info *s = si->sensors + sensor_id; + if (sensor_id >= si->num_sensors) + return -EINVAL; + + s = si->sensors + sensor_id; if (!count || !readings || (!s->num_axis && count > 1) || (s->num_axis && count > s->num_axis)) return -EINVAL; @@ -817,6 +962,9 @@ scmi_sensor_info_get(const struct scmi_protocol_handle *ph, u32 sensor_id) { struct sensors_info *si = ph->get_priv(ph); + if (sensor_id >= si->num_sensors) + return NULL; + return si->sensors + sensor_id; } @@ -966,7 +1114,9 @@ static int scmi_sensors_protocol_init(const struct scmi_protocol_handle *ph) int ret; struct sensors_info *sinfo; - ph->xops->version_get(ph, &version); + ret = ph->xops->version_get(ph, &version); + if (ret) + return ret; dev_dbg(ph->dev, "Sensor Version %d.%d\n", PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version)); diff --git a/drivers/firmware/arm_scmi/shmem.c b/drivers/firmware/arm_scmi/shmem.c index 0e3eaea5d852..1dfe534b8518 100644 --- a/drivers/firmware/arm_scmi/shmem.c +++ b/drivers/firmware/arm_scmi/shmem.c @@ -5,10 +5,13 @@ * Copyright (C) 2019 ARM Ltd. */ +#include <linux/ktime.h> #include <linux/io.h> #include <linux/processor.h> #include <linux/types.h> +#include <asm-generic/bug.h> + #include "common.h" /* @@ -30,16 +33,36 @@ struct scmi_shared_mem { }; void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem, - struct scmi_xfer *xfer) + struct scmi_xfer *xfer, struct scmi_chan_info *cinfo) { + ktime_t stop; + /* * Ideally channel must be free by now unless OS timeout last * request and platform continued to process the same, wait * until it releases the shared memory, otherwise we may endup - * overwriting its response with new message payload or vice-versa + * overwriting its response with new message payload or vice-versa. + * Giving up anyway after twice the expected channel timeout so as + * not to bail-out on intermittent issues where the platform is + * occasionally a bit slower to answer. + * + * Note that after a timeout is detected we bail-out and carry on but + * the transport functionality is probably permanently compromised: + * this is just to ease debugging and avoid complete hangs on boot + * due to a misbehaving SCMI firmware. */ - spin_until_cond(ioread32(&shmem->channel_status) & - SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE); + stop = ktime_add_ms(ktime_get(), 2 * cinfo->rx_timeout_ms); + spin_until_cond((ioread32(&shmem->channel_status) & + SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE) || + ktime_after(ktime_get(), stop)); + if (!(ioread32(&shmem->channel_status) & + SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE)) { + WARN_ON_ONCE(1); + dev_err(cinfo->dev, + "Timeout waiting for a free TX channel !\n"); + return; + } + /* Mark channel busy + clear error */ iowrite32(0x0, &shmem->channel_status); iowrite32(xfer->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED, diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c index 4effecc3bb46..87a7b13cf868 100644 --- a/drivers/firmware/arm_scmi/smc.c +++ b/drivers/firmware/arm_scmi/smc.c @@ -7,6 +7,7 @@ */ #include <linux/arm-smccc.h> +#include <linux/atomic.h> #include <linux/device.h> #include <linux/err.h> #include <linux/interrupt.h> @@ -14,6 +15,7 @@ #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> +#include <linux/processor.h> #include <linux/slab.h> #include "common.h" @@ -23,26 +25,29 @@ * * @cinfo: SCMI channel info * @shmem: Transmit/Receive shared memory area - * @shmem_lock: Lock to protect access to Tx/Rx shared memory area + * @shmem_lock: Lock to protect access to Tx/Rx shared memory area. + * Used when NOT operating in atomic mode. + * @inflight: Atomic flag to protect access to Tx/Rx shared memory area. + * Used when operating in atomic mode. * @func_id: smc/hvc call function id - * @irq: Optional; employed when platforms indicates msg completion by intr. - * @tx_complete: Optional, employed only when irq is valid. */ struct scmi_smc { struct scmi_chan_info *cinfo; struct scmi_shared_mem __iomem *shmem; + /* Protect access to shmem area */ struct mutex shmem_lock; +#define INFLIGHT_NONE MSG_TOKEN_MAX + atomic_t inflight; u32 func_id; - int irq; - struct completion tx_complete; }; static irqreturn_t smc_msg_done_isr(int irq, void *data) { struct scmi_smc *scmi_info = data; - complete(&scmi_info->tx_complete); + scmi_rx_callback(scmi_info->cinfo, + shmem_read_header(scmi_info->shmem), NULL); return IRQ_HANDLED; } @@ -57,6 +62,41 @@ static bool smc_chan_available(struct device *dev, int idx) return true; } +static inline void smc_channel_lock_init(struct scmi_smc *scmi_info) +{ + if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE)) + atomic_set(&scmi_info->inflight, INFLIGHT_NONE); + else + mutex_init(&scmi_info->shmem_lock); +} + +static bool smc_xfer_inflight(struct scmi_xfer *xfer, atomic_t *inflight) +{ + int ret; + + ret = atomic_cmpxchg(inflight, INFLIGHT_NONE, xfer->hdr.seq); + + return ret == INFLIGHT_NONE; +} + +static inline void +smc_channel_lock_acquire(struct scmi_smc *scmi_info, + struct scmi_xfer *xfer __maybe_unused) +{ + if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE)) + spin_until_cond(smc_xfer_inflight(xfer, &scmi_info->inflight)); + else + mutex_lock(&scmi_info->shmem_lock); +} + +static inline void smc_channel_lock_release(struct scmi_smc *scmi_info) +{ + if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE)) + atomic_set(&scmi_info->inflight, INFLIGHT_NONE); + else + mutex_unlock(&scmi_info->shmem_lock); +} + static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, bool tx) { @@ -111,13 +151,13 @@ static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, dev_err(dev, "failed to setup SCMI smc irq\n"); return ret; } - init_completion(&scmi_info->tx_complete); - scmi_info->irq = irq; + } else { + cinfo->no_completion_irq = true; } scmi_info->func_id = func_id; scmi_info->cinfo = cinfo; - mutex_init(&scmi_info->shmem_lock); + smc_channel_lock_init(scmi_info); cinfo->transport_info = scmi_info; return 0; @@ -142,26 +182,22 @@ static int smc_send_message(struct scmi_chan_info *cinfo, struct scmi_smc *scmi_info = cinfo->transport_info; struct arm_smccc_res res; - mutex_lock(&scmi_info->shmem_lock); - - shmem_tx_prepare(scmi_info->shmem, xfer); + /* + * Channel will be released only once response has been + * surely fully retrieved, so after .mark_txdone() + */ + smc_channel_lock_acquire(scmi_info, xfer); - if (scmi_info->irq) - reinit_completion(&scmi_info->tx_complete); + shmem_tx_prepare(scmi_info->shmem, xfer, cinfo); arm_smccc_1_1_invoke(scmi_info->func_id, 0, 0, 0, 0, 0, 0, 0, &res); - if (scmi_info->irq) - wait_for_completion(&scmi_info->tx_complete); - - scmi_rx_callback(scmi_info->cinfo, - shmem_read_header(scmi_info->shmem), NULL); - - mutex_unlock(&scmi_info->shmem_lock); - /* Only SMCCC_RET_NOT_SUPPORTED is valid error code */ - if (res.a0) + if (res.a0) { + smc_channel_lock_release(scmi_info); return -EOPNOTSUPP; + } + return 0; } @@ -173,12 +209,12 @@ static void smc_fetch_response(struct scmi_chan_info *cinfo, shmem_fetch_response(scmi_info->shmem, xfer); } -static bool -smc_poll_done(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer) +static void smc_mark_txdone(struct scmi_chan_info *cinfo, int ret, + struct scmi_xfer *__unused) { struct scmi_smc *scmi_info = cinfo->transport_info; - return shmem_poll_done(scmi_info->shmem, xfer); + smc_channel_lock_release(scmi_info); } static const struct scmi_transport_ops scmi_smc_ops = { @@ -186,8 +222,8 @@ static const struct scmi_transport_ops scmi_smc_ops = { .chan_setup = smc_chan_setup, .chan_free = smc_chan_free, .send_message = smc_send_message, + .mark_txdone = smc_mark_txdone, .fetch_response = smc_fetch_response, - .poll_done = smc_poll_done, }; const struct scmi_desc scmi_smc_desc = { @@ -195,4 +231,14 @@ const struct scmi_desc scmi_smc_desc = { .max_rx_timeout_ms = 30, .max_msg = 20, .max_msg_size = 128, + /* + * Setting .sync_cmds_atomic_replies to true for SMC assumes that, + * once the SMC instruction has completed successfully, the issued + * SCMI command would have been already fully processed by the SCMI + * platform firmware and so any possible response value expected + * for the issued command will be immmediately ready to be fetched + * from the shared memory area. + */ + .sync_cmds_completed_on_ret = true, + .atomic_enabled = IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE), }; diff --git a/drivers/firmware/arm_scmi/system.c b/drivers/firmware/arm_scmi/system.c index e5175ef73b40..9383d7584539 100644 --- a/drivers/firmware/arm_scmi/system.c +++ b/drivers/firmware/arm_scmi/system.c @@ -2,7 +2,7 @@ /* * System Control and Management Interface (SCMI) System Power Protocol * - * Copyright (C) 2020-2021 ARM Ltd. + * Copyright (C) 2020-2022 ARM Ltd. */ #define pr_fmt(fmt) "SCMI Notifications SYSTEM - " fmt @@ -10,7 +10,7 @@ #include <linux/module.h> #include <linux/scmi_protocol.h> -#include "common.h" +#include "protocols.h" #include "notify.h" #define SCMI_SYSTEM_NUM_SOURCES 1 @@ -27,10 +27,12 @@ struct scmi_system_power_state_notifier_payld { __le32 agent_id; __le32 flags; __le32 system_state; + __le32 timeout; }; struct scmi_system_info { u32 version; + bool graceful_timeout_supported; }; static int scmi_system_request_notify(const struct scmi_protocol_handle *ph, @@ -72,17 +74,27 @@ scmi_system_fill_custom_report(const struct scmi_protocol_handle *ph, const void *payld, size_t payld_sz, void *report, u32 *src_id) { + size_t expected_sz; const struct scmi_system_power_state_notifier_payld *p = payld; struct scmi_system_power_state_notifier_report *r = report; + struct scmi_system_info *pinfo = ph->get_priv(ph); + expected_sz = pinfo->graceful_timeout_supported ? + sizeof(*p) : sizeof(*p) - sizeof(__le32); if (evt_id != SCMI_EVENT_SYSTEM_POWER_STATE_NOTIFIER || - sizeof(*p) != payld_sz) + payld_sz != expected_sz) return NULL; r->timestamp = timestamp; r->agent_id = le32_to_cpu(p->agent_id); r->flags = le32_to_cpu(p->flags); r->system_state = le32_to_cpu(p->system_state); + if (pinfo->graceful_timeout_supported && + r->system_state == SCMI_SYSTEM_SHUTDOWN && + SCMI_SYSPOWER_IS_REQUEST_GRACEFUL(r->flags)) + r->timeout = le32_to_cpu(p->timeout); + else + r->timeout = 0x00; *src_id = 0; return r; @@ -113,10 +125,13 @@ static const struct scmi_protocol_events system_protocol_events = { static int scmi_system_protocol_init(const struct scmi_protocol_handle *ph) { + int ret; u32 version; struct scmi_system_info *pinfo; - ph->xops->version_get(ph, &version); + ret = ph->xops->version_get(ph, &version); + if (ret) + return ret; dev_dbg(ph->dev, "System Power Version %d.%d\n", PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version)); @@ -126,6 +141,9 @@ static int scmi_system_protocol_init(const struct scmi_protocol_handle *ph) return -ENOMEM; pinfo->version = version; + if (PROTOCOL_REV_MAJOR(pinfo->version) >= 0x2) + pinfo->graceful_timeout_supported = true; + return ph->set_priv(ph, pinfo); } diff --git a/drivers/firmware/arm_scmi/virtio.c b/drivers/firmware/arm_scmi/virtio.c index 87039c5c03fd..33c9b81a55cd 100644 --- a/drivers/firmware/arm_scmi/virtio.c +++ b/drivers/firmware/arm_scmi/virtio.c @@ -3,8 +3,8 @@ * Virtio Transport driver for Arm System Control and Management Interface * (SCMI). * - * Copyright (C) 2020-2021 OpenSynergy. - * Copyright (C) 2021 ARM Ltd. + * Copyright (C) 2020-2022 OpenSynergy. + * Copyright (C) 2021-2022 ARM Ltd. */ /** @@ -17,7 +17,9 @@ * virtqueue. Access to each virtqueue is protected by spinlocks. */ +#include <linux/completion.h> #include <linux/errno.h> +#include <linux/refcount.h> #include <linux/slab.h> #include <linux/virtio.h> #include <linux/virtio_config.h> @@ -27,6 +29,7 @@ #include "common.h" +#define VIRTIO_MAX_RX_TIMEOUT_MS 60000 #define VIRTIO_SCMI_MAX_MSG_SIZE 128 /* Value may be increased. */ #define VIRTIO_SCMI_MAX_PDU_SIZE \ (VIRTIO_SCMI_MAX_MSG_SIZE + SCMI_MSG_MAX_PROT_OVERHEAD) @@ -37,25 +40,46 @@ * * @vqueue: Associated virtqueue * @cinfo: SCMI Tx or Rx channel + * @free_lock: Protects access to the @free_list. * @free_list: List of unused scmi_vio_msg, maintained for Tx channels only + * @deferred_tx_work: Worker for TX deferred replies processing + * @deferred_tx_wq: Workqueue for TX deferred replies + * @pending_lock: Protects access to the @pending_cmds_list. + * @pending_cmds_list: List of pre-fetched commands queueud for later processing * @is_rx: Whether channel is an Rx channel - * @ready: Whether transport user is ready to hear about channel * @max_msg: Maximum number of pending messages for this channel. - * @lock: Protects access to all members except ready. - * @ready_lock: Protects access to ready. If required, it must be taken before - * lock. + * @lock: Protects access to all members except users, free_list and + * pending_cmds_list. + * @shutdown_done: A reference to a completion used when freeing this channel. + * @users: A reference count to currently active users of this channel. */ struct scmi_vio_channel { struct virtqueue *vqueue; struct scmi_chan_info *cinfo; + /* lock to protect access to the free list. */ + spinlock_t free_lock; struct list_head free_list; + /* lock to protect access to the pending list. */ + spinlock_t pending_lock; + struct list_head pending_cmds_list; + struct work_struct deferred_tx_work; + struct workqueue_struct *deferred_tx_wq; bool is_rx; - bool ready; unsigned int max_msg; - /* lock to protect access to all members except ready. */ + /* + * Lock to protect access to all members except users, free_list and + * pending_cmds_list + */ spinlock_t lock; - /* lock to rotects access to ready flag. */ - spinlock_t ready_lock; + struct completion *shutdown_done; + refcount_t users; +}; + +enum poll_states { + VIO_MSG_NOT_POLLED, + VIO_MSG_POLL_TIMEOUT, + VIO_MSG_POLLING, + VIO_MSG_POLL_DONE, }; /** @@ -65,29 +89,148 @@ struct scmi_vio_channel { * @input: SDU used for (delayed) responses and notifications * @list: List which scmi_vio_msg may be part of * @rx_len: Input SDU size in bytes, once input has been received + * @poll_idx: Last used index registered for polling purposes if this message + * transaction reply was configured for polling. + * @poll_status: Polling state for this message. + * @poll_lock: A lock to protect @poll_status + * @users: A reference count to track this message users and avoid premature + * freeing (and reuse) when polling and IRQ execution paths interleave. */ struct scmi_vio_msg { struct scmi_msg_payld *request; struct scmi_msg_payld *input; struct list_head list; unsigned int rx_len; + unsigned int poll_idx; + enum poll_states poll_status; + /* Lock to protect access to poll_status */ + spinlock_t poll_lock; + refcount_t users; }; /* Only one SCMI VirtIO device can possibly exist */ static struct virtio_device *scmi_vdev; +static void scmi_vio_channel_ready(struct scmi_vio_channel *vioch, + struct scmi_chan_info *cinfo) +{ + unsigned long flags; + + spin_lock_irqsave(&vioch->lock, flags); + cinfo->transport_info = vioch; + /* Indirectly setting channel not available any more */ + vioch->cinfo = cinfo; + spin_unlock_irqrestore(&vioch->lock, flags); + + refcount_set(&vioch->users, 1); +} + +static inline bool scmi_vio_channel_acquire(struct scmi_vio_channel *vioch) +{ + return refcount_inc_not_zero(&vioch->users); +} + +static inline void scmi_vio_channel_release(struct scmi_vio_channel *vioch) +{ + if (refcount_dec_and_test(&vioch->users)) { + unsigned long flags; + + spin_lock_irqsave(&vioch->lock, flags); + if (vioch->shutdown_done) { + vioch->cinfo = NULL; + complete(vioch->shutdown_done); + } + spin_unlock_irqrestore(&vioch->lock, flags); + } +} + +static void scmi_vio_channel_cleanup_sync(struct scmi_vio_channel *vioch) +{ + unsigned long flags; + DECLARE_COMPLETION_ONSTACK(vioch_shutdown_done); + + /* + * Prepare to wait for the last release if not already released + * or in progress. + */ + spin_lock_irqsave(&vioch->lock, flags); + if (!vioch->cinfo || vioch->shutdown_done) { + spin_unlock_irqrestore(&vioch->lock, flags); + return; + } + + vioch->shutdown_done = &vioch_shutdown_done; + virtio_break_device(vioch->vqueue->vdev); + if (!vioch->is_rx && vioch->deferred_tx_wq) + /* Cannot be kicked anymore after this...*/ + vioch->deferred_tx_wq = NULL; + spin_unlock_irqrestore(&vioch->lock, flags); + + scmi_vio_channel_release(vioch); + + /* Let any possibly concurrent RX path release the channel */ + wait_for_completion(vioch->shutdown_done); +} + +/* Assumes to be called with vio channel acquired already */ +static struct scmi_vio_msg * +scmi_virtio_get_free_msg(struct scmi_vio_channel *vioch) +{ + unsigned long flags; + struct scmi_vio_msg *msg; + + spin_lock_irqsave(&vioch->free_lock, flags); + if (list_empty(&vioch->free_list)) { + spin_unlock_irqrestore(&vioch->free_lock, flags); + return NULL; + } + + msg = list_first_entry(&vioch->free_list, typeof(*msg), list); + list_del_init(&msg->list); + spin_unlock_irqrestore(&vioch->free_lock, flags); + + /* Still no users, no need to acquire poll_lock */ + msg->poll_status = VIO_MSG_NOT_POLLED; + refcount_set(&msg->users, 1); + + return msg; +} + +static inline bool scmi_vio_msg_acquire(struct scmi_vio_msg *msg) +{ + return refcount_inc_not_zero(&msg->users); +} + +/* Assumes to be called with vio channel acquired already */ +static inline bool scmi_vio_msg_release(struct scmi_vio_channel *vioch, + struct scmi_vio_msg *msg) +{ + bool ret; + + ret = refcount_dec_and_test(&msg->users); + if (ret) { + unsigned long flags; + + spin_lock_irqsave(&vioch->free_lock, flags); + list_add_tail(&msg->list, &vioch->free_list); + spin_unlock_irqrestore(&vioch->free_lock, flags); + } + + return ret; +} + static bool scmi_vio_have_vq_rx(struct virtio_device *vdev) { return virtio_has_feature(vdev, VIRTIO_SCMI_F_P2A_CHANNELS); } static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch, - struct scmi_vio_msg *msg, - struct device *dev) + struct scmi_vio_msg *msg) { struct scatterlist sg_in; int rc; unsigned long flags; + struct device *dev = &vioch->vqueue->vdev->dev; sg_init_one(&sg_in, msg->input, VIRTIO_SCMI_MAX_PDU_SIZE); @@ -95,7 +238,7 @@ static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch, rc = virtqueue_add_inbuf(vioch->vqueue, &sg_in, 1, msg, GFP_ATOMIC); if (rc) - dev_err_once(dev, "failed to add to virtqueue (%d)\n", rc); + dev_err(dev, "failed to add to RX virtqueue (%d)\n", rc); else virtqueue_kick(vioch->vqueue); @@ -104,22 +247,22 @@ static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch, return rc; } +/* + * Assume to be called with channel already acquired or not ready at all; + * vioch->lock MUST NOT have been already acquired. + */ static void scmi_finalize_message(struct scmi_vio_channel *vioch, struct scmi_vio_msg *msg) { - if (vioch->is_rx) { - scmi_vio_feed_vq_rx(vioch, msg, vioch->cinfo->dev); - } else { - /* Here IRQs are assumed to be already disabled by the caller */ - spin_lock(&vioch->lock); - list_add(&msg->list, &vioch->free_list); - spin_unlock(&vioch->lock); - } + if (vioch->is_rx) + scmi_vio_feed_vq_rx(vioch, msg); + else + scmi_vio_msg_release(vioch, msg); } static void scmi_vio_complete_cb(struct virtqueue *vqueue) { - unsigned long ready_flags; + unsigned long flags; unsigned int length; struct scmi_vio_channel *vioch; struct scmi_vio_msg *msg; @@ -130,27 +273,25 @@ static void scmi_vio_complete_cb(struct virtqueue *vqueue) vioch = &((struct scmi_vio_channel *)vqueue->vdev->priv)[vqueue->index]; for (;;) { - spin_lock_irqsave(&vioch->ready_lock, ready_flags); + if (!scmi_vio_channel_acquire(vioch)) + return; - if (!vioch->ready) { - if (!cb_enabled) - (void)virtqueue_enable_cb(vqueue); - goto unlock_ready_out; - } - - /* IRQs already disabled here no need to irqsave */ - spin_lock(&vioch->lock); + spin_lock_irqsave(&vioch->lock, flags); if (cb_enabled) { virtqueue_disable_cb(vqueue); cb_enabled = false; } + msg = virtqueue_get_buf(vqueue, &length); if (!msg) { - if (virtqueue_enable_cb(vqueue)) - goto unlock_out; + if (virtqueue_enable_cb(vqueue)) { + spin_unlock_irqrestore(&vioch->lock, flags); + scmi_vio_channel_release(vioch); + return; + } cb_enabled = true; } - spin_unlock(&vioch->lock); + spin_unlock_irqrestore(&vioch->lock, flags); if (msg) { msg->rx_len = length; @@ -161,19 +302,57 @@ static void scmi_vio_complete_cb(struct virtqueue *vqueue) } /* - * Release ready_lock and re-enable IRQs between loop iterations - * to allow virtio_chan_free() to possibly kick in and set the - * flag vioch->ready to false even in between processing of - * messages, so as to force outstanding messages to be ignored - * when system is shutting down. + * Release vio channel between loop iterations to allow + * virtio_chan_free() to eventually fully release it when + * shutting down; in such a case, any outstanding message will + * be ignored since this loop will bail out at the next + * iteration. + */ + scmi_vio_channel_release(vioch); + } +} + +static void scmi_vio_deferred_tx_worker(struct work_struct *work) +{ + unsigned long flags; + struct scmi_vio_channel *vioch; + struct scmi_vio_msg *msg, *tmp; + + vioch = container_of(work, struct scmi_vio_channel, deferred_tx_work); + + if (!scmi_vio_channel_acquire(vioch)) + return; + + /* + * Process pre-fetched messages: these could be non-polled messages or + * late timed-out replies to polled messages dequeued by chance while + * polling for some other messages: this worker is in charge to process + * the valid non-expired messages and anyway finally free all of them. + */ + spin_lock_irqsave(&vioch->pending_lock, flags); + + /* Scan the list of possibly pre-fetched messages during polling. */ + list_for_each_entry_safe(msg, tmp, &vioch->pending_cmds_list, list) { + list_del(&msg->list); + + /* + * Channel is acquired here (cannot vanish) and this message + * is no more processed elsewhere so no poll_lock needed. */ - spin_unlock_irqrestore(&vioch->ready_lock, ready_flags); + if (msg->poll_status == VIO_MSG_NOT_POLLED) + scmi_rx_callback(vioch->cinfo, + msg_read_header(msg->input), msg); + + /* Free the processed message once done */ + scmi_vio_msg_release(vioch, msg); } -unlock_out: - spin_unlock(&vioch->lock); -unlock_ready_out: - spin_unlock_irqrestore(&vioch->ready_lock, ready_flags); + spin_unlock_irqrestore(&vioch->pending_lock, flags); + + /* Process possibly still pending messages */ + scmi_vio_complete_cb(vioch->vqueue); + + scmi_vio_channel_release(vioch); } static const char *const scmi_vio_vqueue_names[] = { "tx", "rx" }; @@ -193,8 +372,8 @@ static unsigned int virtio_get_max_msg(struct scmi_chan_info *base_cinfo) static int virtio_link_supplier(struct device *dev) { if (!scmi_vdev) { - dev_notice_once(dev, - "Deferring probe after not finding a bound scmi-virtio device\n"); + dev_notice(dev, + "Deferring probe after not finding a bound scmi-virtio device\n"); return -EPROBE_DEFER; } @@ -231,10 +410,14 @@ static bool virtio_chan_available(struct device *dev, int idx) return vioch && !vioch->cinfo; } +static void scmi_destroy_tx_workqueue(void *deferred_tx_wq) +{ + destroy_workqueue(deferred_tx_wq); +} + static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, bool tx) { - unsigned long flags; struct scmi_vio_channel *vioch; int index = tx ? VIRTIO_SCMI_VQ_TX : VIRTIO_SCMI_VQ_RX; int i; @@ -244,64 +427,65 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, vioch = &((struct scmi_vio_channel *)scmi_vdev->priv)[index]; + /* Setup a deferred worker for polling. */ + if (tx && !vioch->deferred_tx_wq) { + int ret; + + vioch->deferred_tx_wq = + alloc_workqueue(dev_name(&scmi_vdev->dev), + WQ_UNBOUND | WQ_FREEZABLE | WQ_SYSFS, + 0); + if (!vioch->deferred_tx_wq) + return -ENOMEM; + + ret = devm_add_action_or_reset(dev, scmi_destroy_tx_workqueue, + vioch->deferred_tx_wq); + if (ret) + return ret; + + INIT_WORK(&vioch->deferred_tx_work, + scmi_vio_deferred_tx_worker); + } + for (i = 0; i < vioch->max_msg; i++) { struct scmi_vio_msg *msg; - msg = devm_kzalloc(cinfo->dev, sizeof(*msg), GFP_KERNEL); + msg = devm_kzalloc(dev, sizeof(*msg), GFP_KERNEL); if (!msg) return -ENOMEM; if (tx) { - msg->request = devm_kzalloc(cinfo->dev, + msg->request = devm_kzalloc(dev, VIRTIO_SCMI_MAX_PDU_SIZE, GFP_KERNEL); if (!msg->request) return -ENOMEM; + spin_lock_init(&msg->poll_lock); + refcount_set(&msg->users, 1); } - msg->input = devm_kzalloc(cinfo->dev, VIRTIO_SCMI_MAX_PDU_SIZE, + msg->input = devm_kzalloc(dev, VIRTIO_SCMI_MAX_PDU_SIZE, GFP_KERNEL); if (!msg->input) return -ENOMEM; - if (tx) { - spin_lock_irqsave(&vioch->lock, flags); - list_add_tail(&msg->list, &vioch->free_list); - spin_unlock_irqrestore(&vioch->lock, flags); - } else { - scmi_vio_feed_vq_rx(vioch, msg, cinfo->dev); - } + scmi_finalize_message(vioch, msg); } - spin_lock_irqsave(&vioch->lock, flags); - cinfo->transport_info = vioch; - /* Indirectly setting channel not available any more */ - vioch->cinfo = cinfo; - spin_unlock_irqrestore(&vioch->lock, flags); - - spin_lock_irqsave(&vioch->ready_lock, flags); - vioch->ready = true; - spin_unlock_irqrestore(&vioch->ready_lock, flags); + scmi_vio_channel_ready(vioch, cinfo); return 0; } static int virtio_chan_free(int id, void *p, void *data) { - unsigned long flags; struct scmi_chan_info *cinfo = p; struct scmi_vio_channel *vioch = cinfo->transport_info; - spin_lock_irqsave(&vioch->ready_lock, flags); - vioch->ready = false; - spin_unlock_irqrestore(&vioch->ready_lock, flags); + scmi_vio_channel_cleanup_sync(vioch); scmi_free_channel(cinfo, data, id); - spin_lock_irqsave(&vioch->lock, flags); - vioch->cinfo = NULL; - spin_unlock_irqrestore(&vioch->lock, flags); - return 0; } @@ -316,33 +500,56 @@ static int virtio_send_message(struct scmi_chan_info *cinfo, int rc; struct scmi_vio_msg *msg; - spin_lock_irqsave(&vioch->lock, flags); + if (!scmi_vio_channel_acquire(vioch)) + return -EINVAL; - if (list_empty(&vioch->free_list)) { - spin_unlock_irqrestore(&vioch->lock, flags); + msg = scmi_virtio_get_free_msg(vioch); + if (!msg) { + scmi_vio_channel_release(vioch); return -EBUSY; } - msg = list_first_entry(&vioch->free_list, typeof(*msg), list); - list_del(&msg->list); - msg_tx_prepare(msg->request, xfer); sg_init_one(&sg_out, msg->request, msg_command_size(xfer)); sg_init_one(&sg_in, msg->input, msg_response_size(xfer)); + spin_lock_irqsave(&vioch->lock, flags); + + /* + * If polling was requested for this transaction: + * - retrieve last used index (will be used as polling reference) + * - bind the polled message to the xfer via .priv + * - grab an additional msg refcount for the poll-path + */ + if (xfer->hdr.poll_completion) { + msg->poll_idx = virtqueue_enable_cb_prepare(vioch->vqueue); + /* Still no users, no need to acquire poll_lock */ + msg->poll_status = VIO_MSG_POLLING; + scmi_vio_msg_acquire(msg); + /* Ensure initialized msg is visibly bound to xfer */ + smp_store_mb(xfer->priv, msg); + } + rc = virtqueue_add_sgs(vioch->vqueue, sgs, 1, 1, msg, GFP_ATOMIC); - if (rc) { - list_add(&msg->list, &vioch->free_list); - dev_err_once(vioch->cinfo->dev, - "%s() failed to add to virtqueue (%d)\n", __func__, - rc); - } else { + if (rc) + dev_err(vioch->cinfo->dev, + "failed to add to TX virtqueue (%d)\n", rc); + else virtqueue_kick(vioch->vqueue); - } spin_unlock_irqrestore(&vioch->lock, flags); + if (rc) { + /* Ensure order between xfer->priv clear and vq feeding */ + smp_store_mb(xfer->priv, NULL); + if (xfer->hdr.poll_completion) + scmi_vio_msg_release(vioch, msg); + scmi_vio_msg_release(vioch, msg); + } + + scmi_vio_channel_release(vioch); + return rc; } @@ -351,10 +558,8 @@ static void virtio_fetch_response(struct scmi_chan_info *cinfo, { struct scmi_vio_msg *msg = xfer->priv; - if (msg) { + if (msg) msg_fetch_response(msg->input, msg->rx_len, xfer); - xfer->priv = NULL; - } } static void virtio_fetch_notification(struct scmi_chan_info *cinfo, @@ -362,10 +567,225 @@ static void virtio_fetch_notification(struct scmi_chan_info *cinfo, { struct scmi_vio_msg *msg = xfer->priv; - if (msg) { + if (msg) msg_fetch_notification(msg->input, msg->rx_len, max_len, xfer); - xfer->priv = NULL; +} + +/** + * virtio_mark_txdone - Mark transmission done + * + * Free only completed polling transfer messages. + * + * Note that in the SCMI VirtIO transport we never explicitly release still + * outstanding but timed-out messages by forcibly re-adding them to the + * free-list inside the TX code path; we instead let IRQ/RX callbacks, or the + * TX deferred worker, eventually clean up such messages once, finally, a late + * reply is received and discarded (if ever). + * + * This approach was deemed preferable since those pending timed-out buffers are + * still effectively owned by the SCMI platform VirtIO device even after timeout + * expiration: forcibly freeing and reusing them before they had been returned + * explicitly by the SCMI platform could lead to subtle bugs due to message + * corruption. + * An SCMI platform VirtIO device which never returns message buffers is + * anyway broken and it will quickly lead to exhaustion of available messages. + * + * For this same reason, here, we take care to free only the polled messages + * that had been somehow replied (only if not by chance already processed on the + * IRQ path - the initial scmi_vio_msg_release() takes care of this) and also + * any timed-out polled message if that indeed appears to have been at least + * dequeued from the virtqueues (VIO_MSG_POLL_DONE): this is needed since such + * messages won't be freed elsewhere. Any other polled message is marked as + * VIO_MSG_POLL_TIMEOUT. + * + * Possible late replies to timed-out polled messages will be eventually freed + * by RX callbacks if delivered on the IRQ path or by the deferred TX worker if + * dequeued on some other polling path. + * + * @cinfo: SCMI channel info + * @ret: Transmission return code + * @xfer: Transfer descriptor + */ +static void virtio_mark_txdone(struct scmi_chan_info *cinfo, int ret, + struct scmi_xfer *xfer) +{ + unsigned long flags; + struct scmi_vio_channel *vioch = cinfo->transport_info; + struct scmi_vio_msg *msg = xfer->priv; + + if (!msg || !scmi_vio_channel_acquire(vioch)) + return; + + /* Ensure msg is unbound from xfer anyway at this point */ + smp_store_mb(xfer->priv, NULL); + + /* Must be a polled xfer and not already freed on the IRQ path */ + if (!xfer->hdr.poll_completion || scmi_vio_msg_release(vioch, msg)) { + scmi_vio_channel_release(vioch); + return; + } + + spin_lock_irqsave(&msg->poll_lock, flags); + /* Do not free timedout polled messages only if still inflight */ + if (ret != -ETIMEDOUT || msg->poll_status == VIO_MSG_POLL_DONE) + scmi_vio_msg_release(vioch, msg); + else if (msg->poll_status == VIO_MSG_POLLING) + msg->poll_status = VIO_MSG_POLL_TIMEOUT; + spin_unlock_irqrestore(&msg->poll_lock, flags); + + scmi_vio_channel_release(vioch); +} + +/** + * virtio_poll_done - Provide polling support for VirtIO transport + * + * @cinfo: SCMI channel info + * @xfer: Reference to the transfer being poll for. + * + * VirtIO core provides a polling mechanism based only on last used indexes: + * this means that it is possible to poll the virtqueues waiting for something + * new to arrive from the host side, but the only way to check if the freshly + * arrived buffer was indeed what we were waiting for is to compare the newly + * arrived message descriptor with the one we are polling on. + * + * As a consequence it can happen to dequeue something different from the buffer + * we were poll-waiting for: if that is the case such early fetched buffers are + * then added to a the @pending_cmds_list list for later processing by a + * dedicated deferred worker. + * + * So, basically, once something new is spotted we proceed to de-queue all the + * freshly received used buffers until we found the one we were polling on, or, + * we have 'seemingly' emptied the virtqueue; if some buffers are still pending + * in the vqueue at the end of the polling loop (possible due to inherent races + * in virtqueues handling mechanisms), we similarly kick the deferred worker + * and let it process those, to avoid indefinitely looping in the .poll_done + * busy-waiting helper. + * + * Finally, we delegate to the deferred worker also the final free of any timed + * out reply to a polled message that we should dequeue. + * + * Note that, since we do NOT have per-message suppress notification mechanism, + * the message we are polling for could be alternatively delivered via usual + * IRQs callbacks on another core which happened to have IRQs enabled while we + * are actively polling for it here: in such a case it will be handled as such + * by scmi_rx_callback() and the polling loop in the SCMI Core TX path will be + * transparently terminated anyway. + * + * Return: True once polling has successfully completed. + */ +static bool virtio_poll_done(struct scmi_chan_info *cinfo, + struct scmi_xfer *xfer) +{ + bool pending, found = false; + unsigned int length, any_prefetched = 0; + unsigned long flags; + struct scmi_vio_msg *next_msg, *msg = xfer->priv; + struct scmi_vio_channel *vioch = cinfo->transport_info; + + if (!msg) + return true; + + /* + * Processed already by other polling loop on another CPU ? + * + * Note that this message is acquired on the poll path so cannot vanish + * while inside this loop iteration even if concurrently processed on + * the IRQ path. + * + * Avoid to acquire poll_lock since polled_status can be changed + * in a relevant manner only later in this same thread of execution: + * any other possible changes made concurrently by other polling loops + * or by a reply delivered on the IRQ path have no meaningful impact on + * this loop iteration: in other words it is harmless to allow this + * possible race but let has avoid spinlocking with irqs off in this + * initial part of the polling loop. + */ + if (msg->poll_status == VIO_MSG_POLL_DONE) + return true; + + if (!scmi_vio_channel_acquire(vioch)) + return true; + + /* Has cmdq index moved at all ? */ + pending = virtqueue_poll(vioch->vqueue, msg->poll_idx); + if (!pending) { + scmi_vio_channel_release(vioch); + return false; + } + + spin_lock_irqsave(&vioch->lock, flags); + virtqueue_disable_cb(vioch->vqueue); + + /* + * Process all new messages till the polled-for message is found OR + * the vqueue is empty. + */ + while ((next_msg = virtqueue_get_buf(vioch->vqueue, &length))) { + bool next_msg_done = false; + + /* + * Mark any dequeued buffer message as VIO_MSG_POLL_DONE so + * that can be properly freed even on timeout in mark_txdone. + */ + spin_lock(&next_msg->poll_lock); + if (next_msg->poll_status == VIO_MSG_POLLING) { + next_msg->poll_status = VIO_MSG_POLL_DONE; + next_msg_done = true; + } + spin_unlock(&next_msg->poll_lock); + + next_msg->rx_len = length; + /* Is the message we were polling for ? */ + if (next_msg == msg) { + found = true; + break; + } else if (next_msg_done) { + /* Skip the rest if this was another polled msg */ + continue; + } + + /* + * Enqueue for later processing any non-polled message and any + * timed-out polled one that we happen to have dequeued. + */ + spin_lock(&next_msg->poll_lock); + if (next_msg->poll_status == VIO_MSG_NOT_POLLED || + next_msg->poll_status == VIO_MSG_POLL_TIMEOUT) { + spin_unlock(&next_msg->poll_lock); + + any_prefetched++; + spin_lock(&vioch->pending_lock); + list_add_tail(&next_msg->list, + &vioch->pending_cmds_list); + spin_unlock(&vioch->pending_lock); + } else { + spin_unlock(&next_msg->poll_lock); + } + } + + /* + * When the polling loop has successfully terminated if something + * else was queued in the meantime, it will be served by a deferred + * worker OR by the normal IRQ/callback OR by other poll loops. + * + * If we are still looking for the polled reply, the polling index has + * to be updated to the current vqueue last used index. + */ + if (found) { + pending = !virtqueue_enable_cb(vioch->vqueue); + } else { + msg->poll_idx = virtqueue_enable_cb_prepare(vioch->vqueue); + pending = virtqueue_poll(vioch->vqueue, msg->poll_idx); } + + if (vioch->deferred_tx_wq && (any_prefetched || pending)) + queue_work(vioch->deferred_tx_wq, &vioch->deferred_tx_work); + + spin_unlock_irqrestore(&vioch->lock, flags); + + scmi_vio_channel_release(vioch); + + return found; } static const struct scmi_transport_ops scmi_virtio_ops = { @@ -377,6 +797,8 @@ static const struct scmi_transport_ops scmi_virtio_ops = { .send_message = virtio_send_message, .fetch_response = virtio_fetch_response, .fetch_notification = virtio_fetch_notification, + .mark_txdone = virtio_mark_txdone, + .poll_done = virtio_poll_done, }; static int scmi_vio_probe(struct virtio_device *vdev) @@ -417,8 +839,10 @@ static int scmi_vio_probe(struct virtio_device *vdev) unsigned int sz; spin_lock_init(&channels[i].lock); - spin_lock_init(&channels[i].ready_lock); + spin_lock_init(&channels[i].free_lock); INIT_LIST_HEAD(&channels[i].free_list); + spin_lock_init(&channels[i].pending_lock); + INIT_LIST_HEAD(&channels[i].pending_cmds_list); channels[i].vqueue = vqs[i]; sz = virtqueue_get_vring_size(channels[i].vqueue); @@ -427,10 +851,10 @@ static int scmi_vio_probe(struct virtio_device *vdev) sz /= DESCRIPTORS_PER_TX_MSG; if (sz > MSG_TOKEN_MAX) { - dev_info_once(dev, - "%s virtqueue could hold %d messages. Only %ld allowed to be pending.\n", - channels[i].is_rx ? "rx" : "tx", - sz, MSG_TOKEN_MAX); + dev_info(dev, + "%s virtqueue could hold %d messages. Only %ld allowed to be pending.\n", + channels[i].is_rx ? "rx" : "tx", + sz, MSG_TOKEN_MAX); sz = MSG_TOKEN_MAX; } channels[i].max_msg = sz; @@ -452,7 +876,7 @@ static void scmi_vio_remove(struct virtio_device *vdev) * outstanding message on any vqueue to be ignored by complete_cb: now * we can just stop processing buffers and destroy the vqueues. */ - vdev->config->reset(vdev); + virtio_reset_device(vdev); vdev->config->del_vqs(vdev); /* Ensure scmi_vdev is visible as NULL */ smp_store_mb(scmi_vdev, NULL); @@ -460,12 +884,13 @@ static void scmi_vio_remove(struct virtio_device *vdev) static int scmi_vio_validate(struct virtio_device *vdev) { +#ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO_VERSION1_COMPLIANCE if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) { dev_err(&vdev->dev, "device does not comply with spec version 1.x\n"); return -EINVAL; } - +#endif return 0; } @@ -503,7 +928,9 @@ const struct scmi_desc scmi_virtio_desc = { .transport_init = virtio_scmi_init, .transport_exit = virtio_scmi_exit, .ops = &scmi_virtio_ops, - .max_rx_timeout_ms = 60000, /* for non-realtime virtio devices */ + /* for non-realtime virtio devices */ + .max_rx_timeout_ms = VIRTIO_MAX_RX_TIMEOUT_MS, .max_msg = 0, /* overridden by virtio_get_max_msg() */ .max_msg_size = VIRTIO_SCMI_MAX_MSG_SIZE, + .atomic_enabled = IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO_ATOMIC_ENABLE), }; diff --git a/drivers/firmware/arm_scmi/voltage.c b/drivers/firmware/arm_scmi/voltage.c index ac08e819088b..eaa8d944926a 100644 --- a/drivers/firmware/arm_scmi/voltage.c +++ b/drivers/firmware/arm_scmi/voltage.c @@ -2,13 +2,13 @@ /* * System Control and Management Interface (SCMI) Voltage Protocol * - * Copyright (C) 2020-2021 ARM Ltd. + * Copyright (C) 2020-2022 ARM Ltd. */ #include <linux/module.h> #include <linux/scmi_protocol.h> -#include "common.h" +#include "protocols.h" #define VOLTAGE_DOMS_NUM_MASK GENMASK(15, 0) #define REMAINING_LEVELS_MASK GENMASK(31, 16) @@ -21,13 +21,16 @@ enum scmi_voltage_protocol_cmd { VOLTAGE_CONFIG_GET = 0x6, VOLTAGE_LEVEL_SET = 0x7, VOLTAGE_LEVEL_GET = 0x8, + VOLTAGE_DOMAIN_NAME_GET = 0x09, }; #define NUM_VOLTAGE_DOMAINS(x) ((u16)(FIELD_GET(VOLTAGE_DOMS_NUM_MASK, (x)))) struct scmi_msg_resp_domain_attributes { __le32 attr; - u8 name[SCMI_MAX_STR_SIZE]; +#define SUPPORTS_ASYNC_LEVEL_SET(x) ((x) & BIT(31)) +#define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(30)) + u8 name[SCMI_SHORT_NAME_MAX_SIZE]; }; struct scmi_msg_cmd_describe_levels { @@ -54,6 +57,11 @@ struct scmi_msg_cmd_level_set { __le32 voltage_level; }; +struct scmi_resp_voltage_level_set_complete { + __le32 domain_id; + __le32 voltage_level; +}; + struct voltage_info { unsigned int version; unsigned int num_domains; @@ -110,14 +118,100 @@ static int scmi_init_voltage_levels(struct device *dev, return 0; } +struct scmi_volt_ipriv { + struct device *dev; + struct scmi_voltage_info *v; +}; + +static void iter_volt_levels_prepare_message(void *message, + unsigned int desc_index, + const void *priv) +{ + struct scmi_msg_cmd_describe_levels *msg = message; + const struct scmi_volt_ipriv *p = priv; + + msg->domain_id = cpu_to_le32(p->v->id); + msg->level_index = cpu_to_le32(desc_index); +} + +static int iter_volt_levels_update_state(struct scmi_iterator_state *st, + const void *response, void *priv) +{ + int ret = 0; + u32 flags; + const struct scmi_msg_resp_describe_levels *r = response; + struct scmi_volt_ipriv *p = priv; + + flags = le32_to_cpu(r->flags); + st->num_returned = NUM_RETURNED_LEVELS(flags); + st->num_remaining = NUM_REMAINING_LEVELS(flags); + + /* Allocate space for num_levels if not already done */ + if (!p->v->num_levels) { + ret = scmi_init_voltage_levels(p->dev, p->v, st->num_returned, + st->num_remaining, + SUPPORTS_SEGMENTED_LEVELS(flags)); + if (!ret) + st->max_resources = p->v->num_levels; + } + + return ret; +} + +static int +iter_volt_levels_process_response(const struct scmi_protocol_handle *ph, + const void *response, + struct scmi_iterator_state *st, void *priv) +{ + s32 val; + const struct scmi_msg_resp_describe_levels *r = response; + struct scmi_volt_ipriv *p = priv; + + val = (s32)le32_to_cpu(r->voltage[st->loop_idx]); + p->v->levels_uv[st->desc_index + st->loop_idx] = val; + if (val < 0) + p->v->negative_volts_allowed = true; + + return 0; +} + +static int scmi_voltage_levels_get(const struct scmi_protocol_handle *ph, + struct scmi_voltage_info *v) +{ + int ret; + void *iter; + struct scmi_iterator_ops ops = { + .prepare_message = iter_volt_levels_prepare_message, + .update_state = iter_volt_levels_update_state, + .process_response = iter_volt_levels_process_response, + }; + struct scmi_volt_ipriv vpriv = { + .dev = ph->dev, + .v = v, + }; + + iter = ph->hops->iter_response_init(ph, &ops, v->num_levels, + VOLTAGE_DESCRIBE_LEVELS, + sizeof(struct scmi_msg_cmd_describe_levels), + &vpriv); + if (IS_ERR(iter)) + return PTR_ERR(iter); + + ret = ph->hops->iter_response_run(iter); + if (ret) { + v->num_levels = 0; + devm_kfree(ph->dev, v->levels_uv); + } + + return ret; +} + static int scmi_voltage_descriptors_get(const struct scmi_protocol_handle *ph, struct voltage_info *vinfo) { int ret, dom; - struct scmi_xfer *td, *tl; - struct device *dev = ph->dev; + struct scmi_xfer *td; struct scmi_msg_resp_domain_attributes *resp_dom; - struct scmi_msg_resp_describe_levels *resp_levels; ret = ph->xops->xfer_get_init(ph, VOLTAGE_DOMAIN_ATTRIBUTES, sizeof(__le32), sizeof(*resp_dom), &td); @@ -125,90 +219,39 @@ static int scmi_voltage_descriptors_get(const struct scmi_protocol_handle *ph, return ret; resp_dom = td->rx.buf; - ret = ph->xops->xfer_get_init(ph, VOLTAGE_DESCRIBE_LEVELS, - sizeof(__le64), 0, &tl); - if (ret) - goto outd; - resp_levels = tl->rx.buf; - for (dom = 0; dom < vinfo->num_domains; dom++) { - u32 desc_index = 0; - u16 num_returned = 0, num_remaining = 0; - struct scmi_msg_cmd_describe_levels *cmd; + u32 attributes; struct scmi_voltage_info *v; /* Retrieve domain attributes at first ... */ put_unaligned_le32(dom, td->tx.buf); - ret = ph->xops->do_xfer(ph, td); /* Skip domain on comms error */ - if (ret) + if (ph->xops->do_xfer(ph, td)) continue; v = vinfo->domains + dom; v->id = dom; - v->attributes = le32_to_cpu(resp_dom->attr); - strlcpy(v->name, resp_dom->name, SCMI_MAX_STR_SIZE); - - cmd = tl->tx.buf; - /* ...then retrieve domain levels descriptions */ - do { - u32 flags; - int cnt; - - cmd->domain_id = cpu_to_le32(v->id); - cmd->level_index = cpu_to_le32(desc_index); - ret = ph->xops->do_xfer(ph, tl); - if (ret) - break; - - flags = le32_to_cpu(resp_levels->flags); - num_returned = NUM_RETURNED_LEVELS(flags); - num_remaining = NUM_REMAINING_LEVELS(flags); - - /* Allocate space for num_levels if not already done */ - if (!v->num_levels) { - ret = scmi_init_voltage_levels(dev, v, - num_returned, - num_remaining, - SUPPORTS_SEGMENTED_LEVELS(flags)); - if (ret) - break; - } - - if (desc_index + num_returned > v->num_levels) { - dev_err(ph->dev, - "No. of voltage levels can't exceed %d\n", - v->num_levels); - ret = -EINVAL; - break; - } - - for (cnt = 0; cnt < num_returned; cnt++) { - s32 val; - - val = - (s32)le32_to_cpu(resp_levels->voltage[cnt]); - v->levels_uv[desc_index + cnt] = val; - if (val < 0) - v->negative_volts_allowed = true; - } - - desc_index += num_returned; - - ph->xops->reset_rx_to_maxsz(ph, tl); - /* check both to avoid infinite loop due to buggy fw */ - } while (num_returned && num_remaining); - - if (ret) { - v->num_levels = 0; - devm_kfree(dev, v->levels_uv); + attributes = le32_to_cpu(resp_dom->attr); + strscpy(v->name, resp_dom->name, SCMI_SHORT_NAME_MAX_SIZE); + + /* + * If supported overwrite short name with the extended one; + * on error just carry on and use already provided short name. + */ + if (PROTOCOL_REV_MAJOR(vinfo->version) >= 0x2) { + if (SUPPORTS_EXTENDED_NAMES(attributes)) + ph->hops->extended_name_get(ph, + VOLTAGE_DOMAIN_NAME_GET, + v->id, v->name, + SCMI_MAX_STR_SIZE); + if (SUPPORTS_ASYNC_LEVEL_SET(attributes)) + v->async_level_set = true; } - ph->xops->reset_rx_to_maxsz(ph, td); + /* Skip invalid voltage descriptors */ + scmi_voltage_levels_get(ph, v); } - ph->xops->xfer_put(ph, tl); -outd: ph->xops->xfer_put(ph, td); return ret; @@ -271,12 +314,15 @@ static int scmi_voltage_config_get(const struct scmi_protocol_handle *ph, } static int scmi_voltage_level_set(const struct scmi_protocol_handle *ph, - u32 domain_id, u32 flags, s32 volt_uV) + u32 domain_id, + enum scmi_voltage_level_mode mode, + s32 volt_uV) { int ret; struct scmi_xfer *t; struct voltage_info *vinfo = ph->get_priv(ph); struct scmi_msg_cmd_level_set *cmd; + struct scmi_voltage_info *v; if (domain_id >= vinfo->num_domains) return -EINVAL; @@ -286,12 +332,31 @@ static int scmi_voltage_level_set(const struct scmi_protocol_handle *ph, if (ret) return ret; + v = vinfo->domains + domain_id; + cmd = t->tx.buf; cmd->domain_id = cpu_to_le32(domain_id); - cmd->flags = cpu_to_le32(flags); cmd->voltage_level = cpu_to_le32(volt_uV); - ret = ph->xops->do_xfer(ph, t); + if (!v->async_level_set || mode != SCMI_VOLTAGE_LEVEL_SET_AUTO) { + cmd->flags = cpu_to_le32(0x0); + ret = ph->xops->do_xfer(ph, t); + } else { + cmd->flags = cpu_to_le32(0x1); + ret = ph->xops->do_xfer_with_response(ph, t); + if (!ret) { + struct scmi_resp_voltage_level_set_complete *resp; + + resp = t->rx.buf; + if (le32_to_cpu(resp->domain_id) == domain_id) + dev_dbg(ph->dev, + "Voltage domain %d set async to %d\n", + v->id, + le32_to_cpu(resp->voltage_level)); + else + ret = -EPROTO; + } + } ph->xops->xfer_put(ph, t); return ret; diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c index ddf0b9ff9e15..435d0e2658a4 100644 --- a/drivers/firmware/arm_scpi.c +++ b/drivers/firmware/arm_scpi.c @@ -815,7 +815,7 @@ static int scpi_init_versions(struct scpi_drvinfo *info) info->firmware_version = le32_to_cpu(caps.platform_version); } /* Ignore error if not implemented */ - if (scpi_info->is_legacy && ret == -EOPNOTSUPP) + if (info->is_legacy && ret == -EOPNOTSUPP) return 0; return ret; @@ -913,13 +913,14 @@ static int scpi_probe(struct platform_device *pdev) struct resource res; struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; + struct scpi_drvinfo *scpi_drvinfo; - scpi_info = devm_kzalloc(dev, sizeof(*scpi_info), GFP_KERNEL); - if (!scpi_info) + scpi_drvinfo = devm_kzalloc(dev, sizeof(*scpi_drvinfo), GFP_KERNEL); + if (!scpi_drvinfo) return -ENOMEM; if (of_match_device(legacy_scpi_of_match, &pdev->dev)) - scpi_info->is_legacy = true; + scpi_drvinfo->is_legacy = true; count = of_count_phandle_with_args(np, "mboxes", "#mbox-cells"); if (count < 0) { @@ -927,19 +928,19 @@ static int scpi_probe(struct platform_device *pdev) return -ENODEV; } - scpi_info->channels = devm_kcalloc(dev, count, sizeof(struct scpi_chan), - GFP_KERNEL); - if (!scpi_info->channels) + scpi_drvinfo->channels = + devm_kcalloc(dev, count, sizeof(struct scpi_chan), GFP_KERNEL); + if (!scpi_drvinfo->channels) return -ENOMEM; - ret = devm_add_action(dev, scpi_free_channels, scpi_info); + ret = devm_add_action(dev, scpi_free_channels, scpi_drvinfo); if (ret) return ret; - for (; scpi_info->num_chans < count; scpi_info->num_chans++) { + for (; scpi_drvinfo->num_chans < count; scpi_drvinfo->num_chans++) { resource_size_t size; - int idx = scpi_info->num_chans; - struct scpi_chan *pchan = scpi_info->channels + idx; + int idx = scpi_drvinfo->num_chans; + struct scpi_chan *pchan = scpi_drvinfo->channels + idx; struct mbox_client *cl = &pchan->cl; struct device_node *shmem = of_parse_phandle(np, "shmem", idx); @@ -986,45 +987,53 @@ static int scpi_probe(struct platform_device *pdev) return ret; } - scpi_info->commands = scpi_std_commands; + scpi_drvinfo->commands = scpi_std_commands; - platform_set_drvdata(pdev, scpi_info); + platform_set_drvdata(pdev, scpi_drvinfo); - if (scpi_info->is_legacy) { + if (scpi_drvinfo->is_legacy) { /* Replace with legacy variants */ scpi_ops.clk_set_val = legacy_scpi_clk_set_val; - scpi_info->commands = scpi_legacy_commands; + scpi_drvinfo->commands = scpi_legacy_commands; /* Fill priority bitmap */ for (idx = 0; idx < ARRAY_SIZE(legacy_hpriority_cmds); idx++) set_bit(legacy_hpriority_cmds[idx], - scpi_info->cmd_priority); + scpi_drvinfo->cmd_priority); } - ret = scpi_init_versions(scpi_info); + scpi_info = scpi_drvinfo; + + ret = scpi_init_versions(scpi_drvinfo); if (ret) { dev_err(dev, "incorrect or no SCP firmware found\n"); + scpi_info = NULL; return ret; } - if (scpi_info->is_legacy && !scpi_info->protocol_version && - !scpi_info->firmware_version) + if (scpi_drvinfo->is_legacy && !scpi_drvinfo->protocol_version && + !scpi_drvinfo->firmware_version) dev_info(dev, "SCP Protocol legacy pre-1.0 firmware\n"); else dev_info(dev, "SCP Protocol %lu.%lu Firmware %lu.%lu.%lu version\n", FIELD_GET(PROTO_REV_MAJOR_MASK, - scpi_info->protocol_version), + scpi_drvinfo->protocol_version), FIELD_GET(PROTO_REV_MINOR_MASK, - scpi_info->protocol_version), + scpi_drvinfo->protocol_version), FIELD_GET(FW_REV_MAJOR_MASK, - scpi_info->firmware_version), + scpi_drvinfo->firmware_version), FIELD_GET(FW_REV_MINOR_MASK, - scpi_info->firmware_version), + scpi_drvinfo->firmware_version), FIELD_GET(FW_REV_PATCH_MASK, - scpi_info->firmware_version)); - scpi_info->scpi_ops = &scpi_ops; + scpi_drvinfo->firmware_version)); + + scpi_drvinfo->scpi_ops = &scpi_ops; - return devm_of_platform_populate(dev); + ret = devm_of_platform_populate(dev); + if (ret) + scpi_info = NULL; + + return ret; } static const struct of_device_id scpi_of_match[] = { diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c index a7e762c352f9..1e1a51510e83 100644 --- a/drivers/firmware/arm_sdei.c +++ b/drivers/firmware/arm_sdei.c @@ -1059,14 +1059,14 @@ static bool __init sdei_present_acpi(void) return true; } -static int __init sdei_init(void) +void __init sdei_init(void) { struct platform_device *pdev; int ret; ret = platform_driver_register(&sdei_driver); if (ret || !sdei_present_acpi()) - return ret; + return; pdev = platform_device_register_simple(sdei_driver.driver.name, 0, NULL, 0); @@ -1076,17 +1076,8 @@ static int __init sdei_init(void) pr_info("Failed to register ACPI:SDEI platform device %d\n", ret); } - - return ret; } -/* - * On an ACPI system SDEI needs to be ready before HEST:GHES tries to register - * its events. ACPI is initialised from a subsys_initcall(), GHES is initialised - * by device_initcall(). We want to be called in the middle. - */ -subsys_initcall_sync(sdei_init); - int sdei_event_handler(struct pt_regs *regs, struct sdei_registered_event *arg) { diff --git a/drivers/firmware/broadcom/tee_bnxt_fw.c b/drivers/firmware/broadcom/tee_bnxt_fw.c index a5bf4c3f6dc7..40e3183a3d11 100644 --- a/drivers/firmware/broadcom/tee_bnxt_fw.c +++ b/drivers/firmware/broadcom/tee_bnxt_fw.c @@ -197,7 +197,7 @@ static int tee_bnxt_fw_probe(struct device *dev) return -ENODEV; /* Open session with Bnxt load Trusted App */ - memcpy(sess_arg.uuid, bnxt_device->id.uuid.b, TEE_IOCTL_UUID_LEN); + export_uuid(sess_arg.uuid, &bnxt_device->id.uuid); sess_arg.clnt_login = TEE_IOCTL_LOGIN_PUBLIC; sess_arg.num_params = 0; diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c index 948dd8382686..81cc3d0f6eec 100644 --- a/drivers/firmware/cirrus/cs_dsp.c +++ b/drivers/firmware/cirrus/cs_dsp.c @@ -12,16 +12,10 @@ #include <linux/ctype.h> #include <linux/debugfs.h> #include <linux/delay.h> -#include <linux/device.h> -#include <linux/firmware.h> -#include <linux/interrupt.h> -#include <linux/list.h> #include <linux/module.h> #include <linux/moduleparam.h> -#include <linux/regmap.h> #include <linux/slab.h> #include <linux/vmalloc.h> -#include <linux/workqueue.h> #include <linux/firmware/cirrus/cs_dsp.h> #include <linux/firmware/cirrus/wmfw.h> @@ -622,7 +616,8 @@ static void cs_dsp_halo_show_fw_status(struct cs_dsp *dsp) offs[0], offs[1], offs[2], offs[3]); } -static int cs_dsp_coeff_base_reg(struct cs_dsp_coeff_ctl *ctl, unsigned int *reg) +static int cs_dsp_coeff_base_reg(struct cs_dsp_coeff_ctl *ctl, unsigned int *reg, + unsigned int off) { const struct cs_dsp_alg_region *alg_region = &ctl->alg_region; struct cs_dsp *dsp = ctl->dsp; @@ -635,7 +630,7 @@ static int cs_dsp_coeff_base_reg(struct cs_dsp_coeff_ctl *ctl, unsigned int *reg return -EINVAL; } - *reg = dsp->ops->region_to_reg(mem, ctl->alg_region.base + ctl->offset); + *reg = dsp->ops->region_to_reg(mem, ctl->alg_region.base + ctl->offset + off); return 0; } @@ -659,10 +654,12 @@ int cs_dsp_coeff_write_acked_control(struct cs_dsp_coeff_ctl *ctl, unsigned int unsigned int reg; int i, ret; + lockdep_assert_held(&dsp->pwr_lock); + if (!dsp->running) return -EPERM; - ret = cs_dsp_coeff_base_reg(ctl, ®); + ret = cs_dsp_coeff_base_reg(ctl, ®, 0); if (ret) return ret; @@ -716,14 +713,14 @@ int cs_dsp_coeff_write_acked_control(struct cs_dsp_coeff_ctl *ctl, unsigned int EXPORT_SYMBOL_GPL(cs_dsp_coeff_write_acked_control); static int cs_dsp_coeff_write_ctrl_raw(struct cs_dsp_coeff_ctl *ctl, - const void *buf, size_t len) + unsigned int off, const void *buf, size_t len) { struct cs_dsp *dsp = ctl->dsp; void *scratch; int ret; unsigned int reg; - ret = cs_dsp_coeff_base_reg(ctl, ®); + ret = cs_dsp_coeff_base_reg(ctl, ®, off); if (ret) return ret; @@ -749,38 +746,49 @@ static int cs_dsp_coeff_write_ctrl_raw(struct cs_dsp_coeff_ctl *ctl, /** * cs_dsp_coeff_write_ctrl() - Writes the given buffer to the given coefficient control * @ctl: pointer to coefficient control + * @off: word offset at which data should be written * @buf: the buffer to write to the given control - * @len: the length of the buffer + * @len: the length of the buffer in bytes * * Must be called with pwr_lock held. * * Return: Zero for success, a negative number on error. */ -int cs_dsp_coeff_write_ctrl(struct cs_dsp_coeff_ctl *ctl, const void *buf, size_t len) +int cs_dsp_coeff_write_ctrl(struct cs_dsp_coeff_ctl *ctl, + unsigned int off, const void *buf, size_t len) { int ret = 0; + if (!ctl) + return -ENOENT; + + lockdep_assert_held(&ctl->dsp->pwr_lock); + + if (len + off * sizeof(u32) > ctl->len) + return -EINVAL; + if (ctl->flags & WMFW_CTL_FLAG_VOLATILE) ret = -EPERM; else if (buf != ctl->cache) - memcpy(ctl->cache, buf, len); + memcpy(ctl->cache + off * sizeof(u32), buf, len); ctl->set = 1; if (ctl->enabled && ctl->dsp->running) - ret = cs_dsp_coeff_write_ctrl_raw(ctl, buf, len); + ret = cs_dsp_coeff_write_ctrl_raw(ctl, off, buf, len); return ret; } EXPORT_SYMBOL_GPL(cs_dsp_coeff_write_ctrl); -static int cs_dsp_coeff_read_ctrl_raw(struct cs_dsp_coeff_ctl *ctl, void *buf, size_t len) +static int cs_dsp_coeff_read_ctrl_raw(struct cs_dsp_coeff_ctl *ctl, + unsigned int off, void *buf, size_t len) { struct cs_dsp *dsp = ctl->dsp; void *scratch; int ret; unsigned int reg; - ret = cs_dsp_coeff_base_reg(ctl, ®); + ret = cs_dsp_coeff_base_reg(ctl, ®, off); if (ret) return ret; @@ -806,28 +814,38 @@ static int cs_dsp_coeff_read_ctrl_raw(struct cs_dsp_coeff_ctl *ctl, void *buf, s /** * cs_dsp_coeff_read_ctrl() - Reads the given coefficient control into the given buffer * @ctl: pointer to coefficient control + * @off: word offset at which data should be read * @buf: the buffer to store to the given control - * @len: the length of the buffer + * @len: the length of the buffer in bytes * * Must be called with pwr_lock held. * * Return: Zero for success, a negative number on error. */ -int cs_dsp_coeff_read_ctrl(struct cs_dsp_coeff_ctl *ctl, void *buf, size_t len) +int cs_dsp_coeff_read_ctrl(struct cs_dsp_coeff_ctl *ctl, + unsigned int off, void *buf, size_t len) { int ret = 0; + if (!ctl) + return -ENOENT; + + lockdep_assert_held(&ctl->dsp->pwr_lock); + + if (len + off * sizeof(u32) > ctl->len) + return -EINVAL; + if (ctl->flags & WMFW_CTL_FLAG_VOLATILE) { if (ctl->enabled && ctl->dsp->running) - return cs_dsp_coeff_read_ctrl_raw(ctl, buf, len); + return cs_dsp_coeff_read_ctrl_raw(ctl, off, buf, len); else return -EPERM; } else { if (!ctl->flags && ctl->enabled && ctl->dsp->running) - ret = cs_dsp_coeff_read_ctrl_raw(ctl, ctl->cache, ctl->len); + ret = cs_dsp_coeff_read_ctrl_raw(ctl, 0, ctl->cache, ctl->len); if (buf != ctl->cache) - memcpy(buf, ctl->cache, len); + memcpy(buf, ctl->cache + off * sizeof(u32), len); } return ret; @@ -851,7 +869,7 @@ static int cs_dsp_coeff_init_control_caches(struct cs_dsp *dsp) * created so we don't need to do anything. */ if (!ctl->flags || (ctl->flags & WMFW_CTL_FLAG_READABLE)) { - ret = cs_dsp_coeff_read_ctrl_raw(ctl, ctl->cache, ctl->len); + ret = cs_dsp_coeff_read_ctrl_raw(ctl, 0, ctl->cache, ctl->len); if (ret < 0) return ret; } @@ -869,7 +887,7 @@ static int cs_dsp_coeff_sync_controls(struct cs_dsp *dsp) if (!ctl->enabled) continue; if (ctl->set && !(ctl->flags & WMFW_CTL_FLAG_VOLATILE)) { - ret = cs_dsp_coeff_write_ctrl_raw(ctl, ctl->cache, + ret = cs_dsp_coeff_write_ctrl_raw(ctl, 0, ctl->cache, ctl->len); if (ret < 0) return ret; @@ -937,8 +955,7 @@ static int cs_dsp_create_control(struct cs_dsp *dsp, ctl->alg_region = *alg_region; if (subname && dsp->fw_ver >= 2) { ctl->subname_len = subname_len; - ctl->subname = kmemdup(subname, - strlen(subname) + 1, GFP_KERNEL); + ctl->subname = kasprintf(GFP_KERNEL, "%.*s", subname_len, subname); if (!ctl->subname) { ret = -ENOMEM; goto err_ctl; @@ -1159,6 +1176,7 @@ static int cs_dsp_parse_coeff(struct cs_dsp *dsp, return -EINVAL; break; case WMFW_CTL_TYPE_HOSTEVENT: + case WMFW_CTL_TYPE_FWEVENT: ret = cs_dsp_check_coeff_flags(dsp, &coeff_blk, WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE | @@ -1459,6 +1477,8 @@ struct cs_dsp_coeff_ctl *cs_dsp_get_ctl(struct cs_dsp *dsp, const char *name, in { struct cs_dsp_coeff_ctl *pos, *rslt = NULL; + lockdep_assert_held(&dsp->pwr_lock); + list_for_each_entry(pos, &dsp->ctl_list, list) { if (!pos->subname) continue; @@ -1554,6 +1574,8 @@ struct cs_dsp_alg_region *cs_dsp_find_alg_region(struct cs_dsp *dsp, { struct cs_dsp_alg_region *alg_region; + lockdep_assert_held(&dsp->pwr_lock); + list_for_each_entry(alg_region, &dsp->alg_regions, list) { if (id == alg_region->alg && type == alg_region->type) return alg_region; @@ -1565,7 +1587,7 @@ EXPORT_SYMBOL_GPL(cs_dsp_find_alg_region); static struct cs_dsp_alg_region *cs_dsp_create_region(struct cs_dsp *dsp, int type, __be32 id, - __be32 base) + __be32 ver, __be32 base) { struct cs_dsp_alg_region *alg_region; @@ -1575,6 +1597,7 @@ static struct cs_dsp_alg_region *cs_dsp_create_region(struct cs_dsp *dsp, alg_region->type = type; alg_region->alg = be32_to_cpu(id); + alg_region->ver = be32_to_cpu(ver); alg_region->base = be32_to_cpu(base); list_add_tail(&alg_region->list, &dsp->alg_regions); @@ -1624,14 +1647,14 @@ static void cs_dsp_parse_wmfw_v3_id_header(struct cs_dsp *dsp, nalgs); } -static int cs_dsp_create_regions(struct cs_dsp *dsp, __be32 id, int nregions, - const int *type, __be32 *base) +static int cs_dsp_create_regions(struct cs_dsp *dsp, __be32 id, __be32 ver, + int nregions, const int *type, __be32 *base) { struct cs_dsp_alg_region *alg_region; int i; for (i = 0; i < nregions; i++) { - alg_region = cs_dsp_create_region(dsp, type[i], id, base[i]); + alg_region = cs_dsp_create_region(dsp, type[i], id, ver, base[i]); if (IS_ERR(alg_region)) return PTR_ERR(alg_region); } @@ -1666,12 +1689,14 @@ static int cs_dsp_adsp1_setup_algs(struct cs_dsp *dsp) cs_dsp_parse_wmfw_id_header(dsp, &adsp1_id.fw, n_algs); alg_region = cs_dsp_create_region(dsp, WMFW_ADSP1_ZM, - adsp1_id.fw.id, adsp1_id.zm); + adsp1_id.fw.id, adsp1_id.fw.ver, + adsp1_id.zm); if (IS_ERR(alg_region)) return PTR_ERR(alg_region); alg_region = cs_dsp_create_region(dsp, WMFW_ADSP1_DM, - adsp1_id.fw.id, adsp1_id.dm); + adsp1_id.fw.id, adsp1_id.fw.ver, + adsp1_id.dm); if (IS_ERR(alg_region)) return PTR_ERR(alg_region); @@ -1694,6 +1719,7 @@ static int cs_dsp_adsp1_setup_algs(struct cs_dsp *dsp) alg_region = cs_dsp_create_region(dsp, WMFW_ADSP1_DM, adsp1_alg[i].alg.id, + adsp1_alg[i].alg.ver, adsp1_alg[i].dm); if (IS_ERR(alg_region)) { ret = PTR_ERR(alg_region); @@ -1715,6 +1741,7 @@ static int cs_dsp_adsp1_setup_algs(struct cs_dsp *dsp) alg_region = cs_dsp_create_region(dsp, WMFW_ADSP1_ZM, adsp1_alg[i].alg.id, + adsp1_alg[i].alg.ver, adsp1_alg[i].zm); if (IS_ERR(alg_region)) { ret = PTR_ERR(alg_region); @@ -1767,17 +1794,20 @@ static int cs_dsp_adsp2_setup_algs(struct cs_dsp *dsp) cs_dsp_parse_wmfw_id_header(dsp, &adsp2_id.fw, n_algs); alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_XM, - adsp2_id.fw.id, adsp2_id.xm); + adsp2_id.fw.id, adsp2_id.fw.ver, + adsp2_id.xm); if (IS_ERR(alg_region)) return PTR_ERR(alg_region); alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_YM, - adsp2_id.fw.id, adsp2_id.ym); + adsp2_id.fw.id, adsp2_id.fw.ver, + adsp2_id.ym); if (IS_ERR(alg_region)) return PTR_ERR(alg_region); alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_ZM, - adsp2_id.fw.id, adsp2_id.zm); + adsp2_id.fw.id, adsp2_id.fw.ver, + adsp2_id.zm); if (IS_ERR(alg_region)) return PTR_ERR(alg_region); @@ -1802,6 +1832,7 @@ static int cs_dsp_adsp2_setup_algs(struct cs_dsp *dsp) alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_XM, adsp2_alg[i].alg.id, + adsp2_alg[i].alg.ver, adsp2_alg[i].xm); if (IS_ERR(alg_region)) { ret = PTR_ERR(alg_region); @@ -1823,6 +1854,7 @@ static int cs_dsp_adsp2_setup_algs(struct cs_dsp *dsp) alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_YM, adsp2_alg[i].alg.id, + adsp2_alg[i].alg.ver, adsp2_alg[i].ym); if (IS_ERR(alg_region)) { ret = PTR_ERR(alg_region); @@ -1844,6 +1876,7 @@ static int cs_dsp_adsp2_setup_algs(struct cs_dsp *dsp) alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_ZM, adsp2_alg[i].alg.id, + adsp2_alg[i].alg.ver, adsp2_alg[i].zm); if (IS_ERR(alg_region)) { ret = PTR_ERR(alg_region); @@ -1869,7 +1902,7 @@ out: return ret; } -static int cs_dsp_halo_create_regions(struct cs_dsp *dsp, __be32 id, +static int cs_dsp_halo_create_regions(struct cs_dsp *dsp, __be32 id, __be32 ver, __be32 xm_base, __be32 ym_base) { static const int types[] = { @@ -1878,7 +1911,7 @@ static int cs_dsp_halo_create_regions(struct cs_dsp *dsp, __be32 id, }; __be32 bases[] = { xm_base, xm_base, ym_base, ym_base }; - return cs_dsp_create_regions(dsp, id, ARRAY_SIZE(types), types, bases); + return cs_dsp_create_regions(dsp, id, ver, ARRAY_SIZE(types), types, bases); } static int cs_dsp_halo_setup_algs(struct cs_dsp *dsp) @@ -1906,7 +1939,7 @@ static int cs_dsp_halo_setup_algs(struct cs_dsp *dsp) cs_dsp_parse_wmfw_v3_id_header(dsp, &halo_id.fw, n_algs); - ret = cs_dsp_halo_create_regions(dsp, halo_id.fw.id, + ret = cs_dsp_halo_create_regions(dsp, halo_id.fw.id, halo_id.fw.ver, halo_id.xm_base, halo_id.ym_base); if (ret) return ret; @@ -1930,6 +1963,7 @@ static int cs_dsp_halo_setup_algs(struct cs_dsp *dsp) be32_to_cpu(halo_alg[i].ym_base)); ret = cs_dsp_halo_create_regions(dsp, halo_alg[i].alg.id, + halo_alg[i].alg.ver, halo_alg[i].xm_base, halo_alg[i].ym_base); if (ret) @@ -1951,7 +1985,8 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware const struct cs_dsp_region *mem; struct cs_dsp_alg_region *alg_region; const char *region_name; - int ret, pos, blocks, type, offset, reg; + int ret, pos, blocks, type, offset, reg, version; + char *text = NULL; struct cs_dsp_buf *buf; if (!firmware) @@ -1973,6 +2008,7 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware switch (be32_to_cpu(hdr->rev) & 0xff) { case 1: + case 2: break; default: cs_dsp_err(dsp, "%s: Unsupported coefficient file format %d\n", @@ -1995,6 +2031,7 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware type = le16_to_cpu(blk->type); offset = le16_to_cpu(blk->offset); + version = le32_to_cpu(blk->ver) >> 8; cs_dsp_dbg(dsp, "%s.%d: %x v%d.%d.%d\n", file, blocks, le32_to_cpu(blk->id), @@ -2008,6 +2045,8 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware region_name = "Unknown"; switch (type) { case (WMFW_NAME_TEXT << 8): + text = kzalloc(le32_to_cpu(blk->len) + 1, GFP_KERNEL); + break; case (WMFW_INFO_TEXT << 8): case (WMFW_METADATA << 8): break; @@ -2052,6 +2091,16 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware alg_region = cs_dsp_find_alg_region(dsp, type, le32_to_cpu(blk->id)); if (alg_region) { + if (version != alg_region->ver) + cs_dsp_warn(dsp, + "Algorithm coefficient version %d.%d.%d but expected %d.%d.%d\n", + (version >> 16) & 0xFF, + (version >> 8) & 0xFF, + version & 0xFF, + (alg_region->ver >> 16) & 0xFF, + (alg_region->ver >> 8) & 0xFF, + alg_region->ver & 0xFF); + reg = alg_region->base; reg = dsp->ops->region_to_reg(mem, reg); reg += offset; @@ -2067,6 +2116,13 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware break; } + if (text) { + memcpy(text, blk->data, le32_to_cpu(blk->len)); + cs_dsp_info(dsp, "%s: %s\n", dsp->fw_name, text); + kfree(text); + text = NULL; + } + if (reg) { if (le32_to_cpu(blk->len) > firmware->size - pos - sizeof(*blk)) { @@ -2117,6 +2173,7 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware out_fw: regmap_async_complete(regmap); cs_dsp_buf_free(&buf_list); + kfree(text); return ret; } @@ -2600,6 +2657,12 @@ int cs_dsp_run(struct cs_dsp *dsp) goto err; } + if (dsp->client_ops->pre_run) { + ret = dsp->client_ops->pre_run(dsp); + if (ret) + goto err; + } + /* Sync set controls */ ret = cs_dsp_coeff_sync_controls(dsp); if (ret != 0) @@ -2662,6 +2725,9 @@ void cs_dsp_stop(struct cs_dsp *dsp) mutex_lock(&dsp->pwr_lock); + if (dsp->client_ops->pre_stop) + dsp->client_ops->pre_stop(dsp); + dsp->running = false; if (dsp->ops->stop_core) @@ -2680,10 +2746,16 @@ EXPORT_SYMBOL_GPL(cs_dsp_stop); static int cs_dsp_halo_start_core(struct cs_dsp *dsp) { - return regmap_update_bits(dsp->regmap, - dsp->base + HALO_CCM_CORE_CONTROL, - HALO_CORE_RESET | HALO_CORE_EN, - HALO_CORE_RESET | HALO_CORE_EN); + int ret; + + ret = regmap_update_bits(dsp->regmap, dsp->base + HALO_CCM_CORE_CONTROL, + HALO_CORE_RESET | HALO_CORE_EN, + HALO_CORE_RESET | HALO_CORE_EN); + if (ret) + return ret; + + return regmap_update_bits(dsp->regmap, dsp->base + HALO_CCM_CORE_CONTROL, + HALO_CORE_RESET, 0); } static void cs_dsp_halo_stop_core(struct cs_dsp *dsp) @@ -2789,6 +2861,8 @@ int cs_dsp_read_raw_data_block(struct cs_dsp *dsp, int mem_type, unsigned int me unsigned int reg; int ret; + lockdep_assert_held(&dsp->pwr_lock); + if (!mem) return -EINVAL; @@ -2842,6 +2916,8 @@ int cs_dsp_write_data_word(struct cs_dsp *dsp, int mem_type, unsigned int mem_ad __be32 val = cpu_to_be32(data & 0x00ffffffu); unsigned int reg; + lockdep_assert_held(&dsp->pwr_lock); + if (!mem) return -EINVAL; @@ -3104,6 +3180,110 @@ static const struct cs_dsp_ops cs_dsp_halo_ops = { .stop_core = cs_dsp_halo_stop_core, }; +/** + * cs_dsp_chunk_write() - Format data to a DSP memory chunk + * @ch: Pointer to the chunk structure + * @nbits: Number of bits to write + * @val: Value to write + * + * This function sequentially writes values into the format required for DSP + * memory, it handles both inserting of the padding bytes and converting to + * big endian. Note that data is only committed to the chunk when a whole DSP + * words worth of data is available. + * + * Return: Zero for success, a negative number on error. + */ +int cs_dsp_chunk_write(struct cs_dsp_chunk *ch, int nbits, u32 val) +{ + int nwrite, i; + + nwrite = min(CS_DSP_DATA_WORD_BITS - ch->cachebits, nbits); + + ch->cache <<= nwrite; + ch->cache |= val >> (nbits - nwrite); + ch->cachebits += nwrite; + nbits -= nwrite; + + if (ch->cachebits == CS_DSP_DATA_WORD_BITS) { + if (cs_dsp_chunk_end(ch)) + return -ENOSPC; + + ch->cache &= 0xFFFFFF; + for (i = 0; i < sizeof(ch->cache); i++, ch->cache <<= BITS_PER_BYTE) + *ch->data++ = (ch->cache & 0xFF000000) >> CS_DSP_DATA_WORD_BITS; + + ch->bytes += sizeof(ch->cache); + ch->cachebits = 0; + } + + if (nbits) + return cs_dsp_chunk_write(ch, nbits, val); + + return 0; +} +EXPORT_SYMBOL_GPL(cs_dsp_chunk_write); + +/** + * cs_dsp_chunk_flush() - Pad remaining data with zero and commit to chunk + * @ch: Pointer to the chunk structure + * + * As cs_dsp_chunk_write only writes data when a whole DSP word is ready to + * be written out it is possible that some data will remain in the cache, this + * function will pad that data with zeros upto a whole DSP word and write out. + * + * Return: Zero for success, a negative number on error. + */ +int cs_dsp_chunk_flush(struct cs_dsp_chunk *ch) +{ + if (!ch->cachebits) + return 0; + + return cs_dsp_chunk_write(ch, CS_DSP_DATA_WORD_BITS - ch->cachebits, 0); +} +EXPORT_SYMBOL_GPL(cs_dsp_chunk_flush); + +/** + * cs_dsp_chunk_read() - Parse data from a DSP memory chunk + * @ch: Pointer to the chunk structure + * @nbits: Number of bits to read + * + * This function sequentially reads values from a DSP memory formatted buffer, + * it handles both removing of the padding bytes and converting from big endian. + * + * Return: A negative number is returned on error, otherwise the read value. + */ +int cs_dsp_chunk_read(struct cs_dsp_chunk *ch, int nbits) +{ + int nread, i; + u32 result; + + if (!ch->cachebits) { + if (cs_dsp_chunk_end(ch)) + return -ENOSPC; + + ch->cache = 0; + ch->cachebits = CS_DSP_DATA_WORD_BITS; + + for (i = 0; i < sizeof(ch->cache); i++, ch->cache <<= BITS_PER_BYTE) + ch->cache |= *ch->data++; + + ch->bytes += sizeof(ch->cache); + } + + nread = min(ch->cachebits, nbits); + nbits -= nread; + + result = ch->cache >> ((sizeof(ch->cache) * BITS_PER_BYTE) - nread); + ch->cache <<= nread; + ch->cachebits -= nread; + + if (nbits) + result = (result << nbits) | cs_dsp_chunk_read(ch, nbits); + + return result; +} +EXPORT_SYMBOL_GPL(cs_dsp_chunk_read); + MODULE_DESCRIPTION("Cirrus Logic DSP Support"); MODULE_AUTHOR("Simon Trimmer <simont@opensource.cirrus.com>"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/firmware/dmi-sysfs.c b/drivers/firmware/dmi-sysfs.c index 8b8127fa8955..66727ad3361b 100644 --- a/drivers/firmware/dmi-sysfs.c +++ b/drivers/firmware/dmi-sysfs.c @@ -302,12 +302,12 @@ static struct attribute *dmi_sysfs_sel_attrs[] = { &dmi_sysfs_attr_sel_per_log_type_descriptor_length.attr, NULL, }; - +ATTRIBUTE_GROUPS(dmi_sysfs_sel); static struct kobj_type dmi_system_event_log_ktype = { .release = dmi_entry_free, .sysfs_ops = &dmi_sysfs_specialize_attr_ops, - .default_attrs = dmi_sysfs_sel_attrs, + .default_groups = dmi_sysfs_sel_groups, }; typedef u8 (*sel_io_reader)(const struct dmi_system_event_log *sel, @@ -518,6 +518,7 @@ static struct attribute *dmi_sysfs_entry_attrs[] = { &dmi_sysfs_attr_entry_position.attr, NULL, }; +ATTRIBUTE_GROUPS(dmi_sysfs_entry); static ssize_t dmi_entry_raw_read_helper(struct dmi_sysfs_entry *entry, const struct dmi_header *dh, @@ -565,7 +566,7 @@ static void dmi_sysfs_entry_release(struct kobject *kobj) static struct kobj_type dmi_sysfs_entry_ktype = { .release = dmi_sysfs_entry_release, .sysfs_ops = &dmi_sysfs_attr_ops, - .default_attrs = dmi_sysfs_entry_attrs, + .default_groups = dmi_sysfs_entry_groups, }; static struct kset *dmi_kset; @@ -603,7 +604,7 @@ static void __init dmi_sysfs_register_handle(const struct dmi_header *dh, "%d-%d", dh->type, entry->instance); if (*ret) { - kfree(entry); + kobject_put(&entry->kobj); return; } diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c index f191a1f901ac..015c95a825d3 100644 --- a/drivers/firmware/dmi_scan.c +++ b/drivers/firmware/dmi_scan.c @@ -567,8 +567,13 @@ static int __init dmi_present(const u8 *buf) { u32 smbios_ver; + /* + * The size of this structure is 31 bytes, but we also accept value + * 30 due to a mistake in SMBIOS specification version 2.1. + */ if (memcmp(buf, "_SM_", 4) == 0 && - buf[5] < 32 && dmi_checksum(buf, buf[5])) { + buf[5] >= 30 && buf[5] <= 32 && + dmi_checksum(buf, buf[5])) { smbios_ver = get_unaligned_be16(buf + 6); smbios_entry_point_size = buf[5]; memcpy(smbios_entry_point, buf, smbios_entry_point_size); @@ -629,8 +634,9 @@ static int __init dmi_present(const u8 *buf) static int __init dmi_smbios3_present(const u8 *buf) { if (memcmp(buf, "_SM3_", 5) == 0 && - buf[6] < 32 && dmi_checksum(buf, buf[6])) { - dmi_ver = get_unaligned_be32(buf + 6) & 0xFFFFFF; + buf[6] >= 24 && buf[6] <= 32 && + dmi_checksum(buf, buf[6])) { + dmi_ver = get_unaligned_be24(buf + 7); dmi_num = 0; /* No longer specified */ dmi_len = get_unaligned_le32(buf + 12); dmi_base = get_unaligned_le64(buf + 16); diff --git a/drivers/firmware/edd.c b/drivers/firmware/edd.c index 14d0970a7198..5cc238916551 100644 --- a/drivers/firmware/edd.c +++ b/drivers/firmware/edd.c @@ -574,14 +574,6 @@ static EDD_DEVICE_ATTR(interface, 0444, edd_show_interface, edd_has_edd30); static EDD_DEVICE_ATTR(host_bus, 0444, edd_show_host_bus, edd_has_edd30); static EDD_DEVICE_ATTR(mbr_signature, 0444, edd_show_mbr_signature, edd_has_mbr_signature); - -/* These are default attributes that are added for every edd - * device discovered. There are none. - */ -static struct attribute * def_attrs[] = { - NULL, -}; - /* These attributes are conditional and only added for some devices. */ static struct edd_attribute * edd_attrs[] = { &edd_attr_raw_data, @@ -619,7 +611,6 @@ static void edd_release(struct kobject * kobj) static struct kobj_type edd_ktype = { .release = edd_release, .sysfs_ops = &edd_attr_ops, - .default_attrs = def_attrs, }; static struct kset *edd_kset; @@ -694,8 +685,7 @@ static void edd_populate_dir(struct edd_device * edev) int i; for (i = 0; (attr = edd_attrs[i]) && !error; i++) { - if (!attr->test || - (attr->test && attr->test(edev))) + if (!attr->test || attr->test(edev)) error = sysfs_create_file(&edev->kobj,&attr->attr); } diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig index 2c3dac5ecb36..6787ed8dfacf 100644 --- a/drivers/firmware/efi/Kconfig +++ b/drivers/firmware/efi/Kconfig @@ -2,18 +2,6 @@ menu "EFI (Extensible Firmware Interface) Support" depends on EFI -config EFI_VARS - tristate "EFI Variable Support via sysfs" - depends on EFI && (X86 || IA64) - default n - help - If you say Y here, you are able to get EFI (Extensible Firmware - Interface) variable information via sysfs. You may read, - write, create, and destroy EFI variables through this interface. - Note that this driver is only retained for compatibility with - legacy users: new users should use the efivarfs filesystem - instead. - config EFI_ESRT bool depends on EFI && !IA64 @@ -22,6 +10,7 @@ config EFI_ESRT config EFI_VARS_PSTORE tristate "Register efivars backend for pstore" depends on PSTORE + select UCS2_STRING default y help Say Y here to enable use efivars as a backend to pstore. This @@ -91,6 +80,18 @@ config EFI_SOFT_RESERVE If unsure, say Y. +config EFI_DXE_MEM_ATTRIBUTES + bool "Adjust memory attributes in EFISTUB" + depends on EFI && EFI_STUB && X86 + default y + help + UEFI specification does not guarantee all memory to be + accessible for both write and execute as the kernel expects + it to be. + Use DXE services to check and alter memory protection + attributes during boot via EFISTUB to ensure that memory + ranges used by the kernel are writable and executable. + config EFI_PARAMS_FROM_FDT bool help @@ -104,9 +105,28 @@ config EFI_RUNTIME_WRAPPERS config EFI_GENERIC_STUB bool +config EFI_ZBOOT + bool "Enable the generic EFI decompressor" + depends on EFI_GENERIC_STUB && !ARM + select HAVE_KERNEL_GZIP + select HAVE_KERNEL_LZ4 + select HAVE_KERNEL_LZMA + select HAVE_KERNEL_LZO + select HAVE_KERNEL_XZ + select HAVE_KERNEL_ZSTD + help + Create the bootable image as an EFI application that carries the + actual kernel image in compressed form, and decompresses it into + memory before executing it via LoadImage/StartImage EFI boot service + calls. For compatibility with non-EFI loaders, the payload can be + decompressed and executed by the loader as well, provided that the + loader implements the decompression algorithm and that non-EFI boot + is supported by the encapsulated image. (The compression algorithm + used is described in the zboot image header) + config EFI_ARMSTUB_DTB_LOADER bool "Enable the DTB loader" - depends on EFI_GENERIC_STUB && !RISCV + depends on EFI_GENERIC_STUB && !RISCV && !LOONGARCH default y help Select this config option to add support for the dtb= command @@ -123,7 +143,7 @@ config EFI_GENERIC_STUB_INITRD_CMDLINE_LOADER bool "Enable the command line initrd loader" if !X86 depends on EFI_STUB && (EFI_GENERIC_STUB || X86) default y if X86 - depends on !RISCV + depends on !RISCV && !LOONGARCH help Select this config option to add support for the initrd= command line parameter, allowing an initrd that resides on the same volume @@ -133,6 +153,7 @@ config EFI_GENERIC_STUB_INITRD_CMDLINE_LOADER config EFI_BOOTLOADER_CONTROL tristate "EFI Bootloader Control" + select UCS2_STRING default n help This module installs a reboot hook, such that if reboot() is @@ -181,6 +202,9 @@ config EFI_TEST Say Y here to enable the runtime services support via /dev/efi_test. If unsure, say N. +config EFI_DEV_PATH_PARSER + bool + config APPLE_PROPERTIES bool "Apple Device Properties" depends on EFI_STUB && X86 @@ -243,13 +267,59 @@ config EFI_DISABLE_PCI_DMA options "efi=disable_early_pci_dma" or "efi=no_disable_early_pci_dma" may be used to override this option. -endmenu +config EFI_EARLYCON + def_bool y + depends on SERIAL_EARLYCON && !ARM && !IA64 + select FONT_SUPPORT + select ARCH_USE_MEMREMAP_PROT + +config EFI_CUSTOM_SSDT_OVERLAYS + bool "Load custom ACPI SSDT overlay from an EFI variable" + depends on ACPI + default ACPI_TABLE_UPGRADE + help + Allow loading of an ACPI SSDT overlay from an EFI variable specified + by a kernel command line option. + + See Documentation/admin-guide/acpi/ssdt-overlays.rst for more + information. + +config EFI_DISABLE_RUNTIME + bool "Disable EFI runtime services support by default" + default y if PREEMPT_RT + help + Allow to disable the EFI runtime services support by default. This can + already be achieved by using the efi=noruntime option, but it could be + useful to have this default without any kernel command line parameter. + + The EFI runtime services are disabled by default when PREEMPT_RT is + enabled, because measurements have shown that some EFI functions calls + might take too much time to complete, causing large latencies which is + an issue for Real-Time kernels. + + This default can be overridden by using the efi=runtime option. + +config EFI_COCO_SECRET + bool "EFI Confidential Computing Secret Area Support" + help + Confidential Computing platforms (such as AMD SEV) allow the + Guest Owner to securely inject secrets during guest VM launch. + The secrets are placed in a designated EFI reserved memory area. + + In order to use the secrets in the kernel, the location of the secret + area (as published in the EFI config table) must be kept. + + If you say Y here, the address of the EFI secret area will be kept + for usage inside the kernel. This will allow the + virt/coco/efi_secret module to access the secrets, which in turn + allows userspace programs to access the injected secrets. config EFI_EMBEDDED_FIRMWARE bool - depends on EFI select CRYPTO_LIB_SHA256 +endmenu + config UEFI_CPER bool @@ -262,25 +332,3 @@ config UEFI_CPER_X86 bool depends on UEFI_CPER && X86 default y - -config EFI_DEV_PATH_PARSER - bool - depends on ACPI - default n - -config EFI_EARLYCON - def_bool y - depends on EFI && SERIAL_EARLYCON && !ARM && !IA64 - select FONT_SUPPORT - select ARCH_USE_MEMREMAP_PROT - -config EFI_CUSTOM_SSDT_OVERLAYS - bool "Load custom ACPI SSDT overlay from an EFI variable" - depends on EFI && ACPI - default ACPI_TABLE_UPGRADE - help - Allow loading of an ACPI SSDT overlay from an EFI variable specified - by a kernel command line option. - - See Documentation/admin-guide/acpi/ssdt-overlays.rst for more - information. diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile index c02ff25dd477..8d151e332584 100644 --- a/drivers/firmware/efi/Makefile +++ b/drivers/firmware/efi/Makefile @@ -17,7 +17,6 @@ ifneq ($(CONFIG_EFI_CAPSULE_LOADER),) obj-$(CONFIG_EFI) += capsule.o endif obj-$(CONFIG_EFI_PARAMS_FROM_FDT) += fdtparams.o -obj-$(CONFIG_EFI_VARS) += efivars.o obj-$(CONFIG_EFI_ESRT) += esrt.o obj-$(CONFIG_EFI_VARS_PSTORE) += efi-pstore.o obj-$(CONFIG_UEFI_CPER) += cper.o diff --git a/drivers/firmware/efi/apple-properties.c b/drivers/firmware/efi/apple-properties.c index 4c3201e290e2..ea84108035eb 100644 --- a/drivers/firmware/efi/apple-properties.c +++ b/drivers/firmware/efi/apple-properties.c @@ -24,7 +24,7 @@ static bool dump_properties __initdata; static int __init dump_properties_enable(char *arg) { dump_properties = true; - return 0; + return 1; } __setup("dump_apple_properties", dump_properties_enable); diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c index 3359ae2adf24..7c48c380d722 100644 --- a/drivers/firmware/efi/arm-runtime.c +++ b/drivers/firmware/efi/arm-runtime.c @@ -63,7 +63,7 @@ static bool __init efi_virtmap_init(void) if (!(md->attribute & EFI_MEMORY_RUNTIME)) continue; - if (md->virt_addr == 0) + if (md->virt_addr == U64_MAX) return false; ret = efi_create_mapping(&efi_mm, md); diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c index 4dde8edd53b6..3e8d4b51a814 100644 --- a/drivers/firmware/efi/capsule-loader.c +++ b/drivers/firmware/efi/capsule-loader.c @@ -243,29 +243,6 @@ failed: } /** - * efi_capsule_flush - called by file close or file flush - * @file: file pointer - * @id: not used - * - * If a capsule is being partially uploaded then calling this function - * will be treated as upload termination and will free those completed - * buffer pages and -ECANCELED will be returned. - **/ -static int efi_capsule_flush(struct file *file, fl_owner_t id) -{ - int ret = 0; - struct capsule_info *cap_info = file->private_data; - - if (cap_info->index > 0) { - pr_err("capsule upload not complete\n"); - efi_free_all_buff_pages(cap_info); - ret = -ECANCELED; - } - - return ret; -} - -/** * efi_capsule_release - called by file close * @inode: not used * @file: file pointer @@ -277,6 +254,13 @@ static int efi_capsule_release(struct inode *inode, struct file *file) { struct capsule_info *cap_info = file->private_data; + if (cap_info->index > 0 && + (cap_info->header.headersize == 0 || + cap_info->count < cap_info->total_size)) { + pr_err("capsule upload not complete\n"); + efi_free_all_buff_pages(cap_info); + } + kfree(cap_info->pages); kfree(cap_info->phys); kfree(file->private_data); @@ -324,7 +308,6 @@ static const struct file_operations efi_capsule_fops = { .owner = THIS_MODULE, .open = efi_capsule_open, .write = efi_capsule_write, - .flush = efi_capsule_flush, .release = efi_capsule_release, .llseek = no_llseek, }; diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c index 6ec8edec6329..e4e5ea7ce910 100644 --- a/drivers/firmware/efi/cper.c +++ b/drivers/firmware/efi/cper.c @@ -211,7 +211,33 @@ const char *cper_mem_err_type_str(unsigned int etype) } EXPORT_SYMBOL_GPL(cper_mem_err_type_str); -static int cper_mem_err_location(struct cper_mem_err_compact *mem, char *msg) +const char *cper_mem_err_status_str(u64 status) +{ + switch ((status >> 8) & 0xff) { + case 1: return "Error detected internal to the component"; + case 4: return "Storage error in DRAM memory"; + case 5: return "Storage error in TLB"; + case 6: return "Storage error in cache"; + case 7: return "Error in one or more functional units"; + case 8: return "Component failed self test"; + case 9: return "Overflow or undervalue of internal queue"; + case 16: return "Error detected in the bus"; + case 17: return "Virtual address not found on IO-TLB or IO-PDIR"; + case 18: return "Improper access error"; + case 19: return "Access to a memory address which is not mapped to any component"; + case 20: return "Loss of Lockstep"; + case 21: return "Response not associated with a request"; + case 22: return "Bus parity error - must also set the A, C, or D Bits"; + case 23: return "Detection of a protocol error"; + case 24: return "Detection of a PATH_ERROR"; + case 25: return "Bus operation timeout"; + case 26: return "A read was issued to data that has been poisoned"; + default: return "Reserved"; + } +} +EXPORT_SYMBOL_GPL(cper_mem_err_status_str); + +int cper_mem_err_location(struct cper_mem_err_compact *mem, char *msg) { u32 len, n; @@ -221,51 +247,51 @@ static int cper_mem_err_location(struct cper_mem_err_compact *mem, char *msg) n = 0; len = CPER_REC_LEN; if (mem->validation_bits & CPER_MEM_VALID_NODE) - n += scnprintf(msg + n, len - n, "node: %d ", mem->node); + n += scnprintf(msg + n, len - n, "node:%d ", mem->node); if (mem->validation_bits & CPER_MEM_VALID_CARD) - n += scnprintf(msg + n, len - n, "card: %d ", mem->card); + n += scnprintf(msg + n, len - n, "card:%d ", mem->card); if (mem->validation_bits & CPER_MEM_VALID_MODULE) - n += scnprintf(msg + n, len - n, "module: %d ", mem->module); + n += scnprintf(msg + n, len - n, "module:%d ", mem->module); if (mem->validation_bits & CPER_MEM_VALID_RANK_NUMBER) - n += scnprintf(msg + n, len - n, "rank: %d ", mem->rank); + n += scnprintf(msg + n, len - n, "rank:%d ", mem->rank); if (mem->validation_bits & CPER_MEM_VALID_BANK) - n += scnprintf(msg + n, len - n, "bank: %d ", mem->bank); + n += scnprintf(msg + n, len - n, "bank:%d ", mem->bank); if (mem->validation_bits & CPER_MEM_VALID_BANK_GROUP) - n += scnprintf(msg + n, len - n, "bank_group: %d ", + n += scnprintf(msg + n, len - n, "bank_group:%d ", mem->bank >> CPER_MEM_BANK_GROUP_SHIFT); if (mem->validation_bits & CPER_MEM_VALID_BANK_ADDRESS) - n += scnprintf(msg + n, len - n, "bank_address: %d ", + n += scnprintf(msg + n, len - n, "bank_address:%d ", mem->bank & CPER_MEM_BANK_ADDRESS_MASK); if (mem->validation_bits & CPER_MEM_VALID_DEVICE) - n += scnprintf(msg + n, len - n, "device: %d ", mem->device); + n += scnprintf(msg + n, len - n, "device:%d ", mem->device); if (mem->validation_bits & (CPER_MEM_VALID_ROW | CPER_MEM_VALID_ROW_EXT)) { u32 row = mem->row; row |= cper_get_mem_extension(mem->validation_bits, mem->extended); - n += scnprintf(msg + n, len - n, "row: %d ", row); + n += scnprintf(msg + n, len - n, "row:%d ", row); } if (mem->validation_bits & CPER_MEM_VALID_COLUMN) - n += scnprintf(msg + n, len - n, "column: %d ", mem->column); + n += scnprintf(msg + n, len - n, "column:%d ", mem->column); if (mem->validation_bits & CPER_MEM_VALID_BIT_POSITION) - n += scnprintf(msg + n, len - n, "bit_position: %d ", + n += scnprintf(msg + n, len - n, "bit_position:%d ", mem->bit_pos); if (mem->validation_bits & CPER_MEM_VALID_REQUESTOR_ID) - n += scnprintf(msg + n, len - n, "requestor_id: 0x%016llx ", + n += scnprintf(msg + n, len - n, "requestor_id:0x%016llx ", mem->requestor_id); if (mem->validation_bits & CPER_MEM_VALID_RESPONDER_ID) - n += scnprintf(msg + n, len - n, "responder_id: 0x%016llx ", + n += scnprintf(msg + n, len - n, "responder_id:0x%016llx ", mem->responder_id); if (mem->validation_bits & CPER_MEM_VALID_TARGET_ID) - n += scnprintf(msg + n, len - n, "target_id: 0x%016llx ", + n += scnprintf(msg + n, len - n, "target_id:0x%016llx ", mem->target_id); if (mem->validation_bits & CPER_MEM_VALID_CHIP_ID) - n += scnprintf(msg + n, len - n, "chip_id: %d ", + n += scnprintf(msg + n, len - n, "chip_id:%d ", mem->extended >> CPER_MEM_CHIP_ID_SHIFT); return n; } -static int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg) +int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg) { u32 len, n; const char *bank = NULL, *device = NULL; @@ -334,7 +360,9 @@ static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem, return; } if (mem->validation_bits & CPER_MEM_VALID_ERROR_STATUS) - printk("%s""error_status: 0x%016llx\n", pfx, mem->error_status); + printk("%s error_status: %s (0x%016llx)\n", + pfx, cper_mem_err_status_str(mem->error_status), + mem->error_status); if (mem->validation_bits & CPER_MEM_VALID_PA) printk("%s""physical_address: 0x%016llx\n", pfx, mem->physical_addr); diff --git a/drivers/firmware/efi/dev-path-parser.c b/drivers/firmware/efi/dev-path-parser.c index eb9c65f97841..f80d87c199c3 100644 --- a/drivers/firmware/efi/dev-path-parser.c +++ b/drivers/firmware/efi/dev-path-parser.c @@ -15,9 +15,11 @@ static long __init parse_acpi_path(const struct efi_dev_path *node, struct device *parent, struct device **child) { - char hid[ACPI_ID_LEN], uid[11]; /* UINT_MAX + null byte */ struct acpi_device *adev; struct device *phys_dev; + char hid[ACPI_ID_LEN]; + u64 uid; + int ret; if (node->header.length != 12) return -EINVAL; @@ -27,12 +29,12 @@ static long __init parse_acpi_path(const struct efi_dev_path *node, 'A' + ((node->acpi.hid >> 5) & 0x1f) - 1, 'A' + ((node->acpi.hid >> 0) & 0x1f) - 1, node->acpi.hid >> 16); - sprintf(uid, "%u", node->acpi.uid); for_each_acpi_dev_match(adev, hid, NULL, -1) { - if (adev->pnp.unique_id && !strcmp(adev->pnp.unique_id, uid)) + ret = acpi_dev_uid_to_integer(adev, &uid); + if (ret == 0 && node->acpi.uid == uid) break; - if (!adev->pnp.unique_id && node->acpi.uid == 0) + if (ret == -ENODATA && node->acpi.uid == 0) break; } if (!adev) diff --git a/drivers/firmware/efi/efi-init.c b/drivers/firmware/efi/efi-init.c index b19ce1a83f91..2fd770b499a3 100644 --- a/drivers/firmware/efi/efi-init.c +++ b/drivers/firmware/efi/efi-init.c @@ -51,34 +51,10 @@ static phys_addr_t __init efi_to_phys(unsigned long addr) return addr; } -static __initdata unsigned long screen_info_table = EFI_INVALID_TABLE_ADDR; -static __initdata unsigned long cpu_state_table = EFI_INVALID_TABLE_ADDR; - -static const efi_config_table_type_t arch_tables[] __initconst = { - {LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID, &screen_info_table}, - {LINUX_EFI_ARM_CPU_STATE_TABLE_GUID, &cpu_state_table}, - {} -}; +extern __weak const efi_config_table_type_t efi_arch_tables[]; static void __init init_screen_info(void) { - struct screen_info *si; - - if (IS_ENABLED(CONFIG_ARM) && - screen_info_table != EFI_INVALID_TABLE_ADDR) { - si = early_memremap_ro(screen_info_table, sizeof(*si)); - if (!si) { - pr_err("Could not map screen_info config table\n"); - return; - } - screen_info = *si; - early_memunmap(si, sizeof(*si)); - - /* dummycon on ARM needs non-zero values for columns/lines */ - screen_info.orig_video_cols = 80; - screen_info.orig_video_lines = 25; - } - if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI && memblock_is_map_memory(screen_info.lfb_base)) memblock_mark_nomap(screen_info.lfb_base, screen_info.lfb_size); @@ -119,8 +95,7 @@ static int __init uefi_init(u64 efi_system_table) goto out; } retval = efi_config_parse_tables(config_tables, systab->nr_tables, - IS_ENABLED(CONFIG_ARM) ? arch_tables - : NULL); + efi_arch_tables); early_memunmap(config_tables, table_size); out: @@ -235,6 +210,12 @@ void __init efi_init(void) } reserve_regions(); + /* + * For memblock manipulation, the cap should come after the memblock_add(). + * And now, memblock is fully populated, it is time to do capping. + */ + early_init_dt_check_for_usable_mem_range(); + efi_find_mirror(); efi_esrt_init(); efi_mokvar_table_init(); @@ -242,36 +223,4 @@ void __init efi_init(void) PAGE_ALIGN(data.size + (data.phys_map & ~PAGE_MASK))); init_screen_info(); - -#ifdef CONFIG_ARM - /* ARM does not permit early mappings to persist across paging_init() */ - efi_memmap_unmap(); - - if (cpu_state_table != EFI_INVALID_TABLE_ADDR) { - struct efi_arm_entry_state *state; - bool dump_state = true; - - state = early_memremap_ro(cpu_state_table, - sizeof(struct efi_arm_entry_state)); - if (state == NULL) { - pr_warn("Unable to map CPU entry state table.\n"); - return; - } - - if ((state->sctlr_before_ebs & 1) == 0) - pr_warn(FW_BUG "EFI stub was entered with MMU and Dcache disabled, please fix your firmware!\n"); - else if ((state->sctlr_after_ebs & 1) == 0) - pr_warn(FW_BUG "ExitBootServices() returned with MMU and Dcache disabled, please fix your firmware!\n"); - else - dump_state = false; - - if (dump_state || efi_enabled(EFI_DBG)) { - pr_info("CPSR at EFI stub entry : 0x%08x\n", state->cpsr_before_ebs); - pr_info("SCTLR at EFI stub entry : 0x%08x\n", state->sctlr_before_ebs); - pr_info("CPSR after ExitBootServices() : 0x%08x\n", state->cpsr_after_ebs); - pr_info("SCTLR after ExitBootServices(): 0x%08x\n", state->sctlr_after_ebs); - } - early_memunmap(state, sizeof(struct efi_arm_entry_state)); - } -#endif } diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c index 0ef086e43090..3bddc152fcd4 100644 --- a/drivers/firmware/efi/efi-pstore.c +++ b/drivers/firmware/efi/efi-pstore.c @@ -6,6 +6,8 @@ #include <linux/slab.h> #include <linux/ucs2_string.h> +MODULE_IMPORT_NS(EFIVAR); + #define DUMP_NAME_LEN 66 #define EFIVARS_DATA_SIZE_MAX 1024 @@ -20,18 +22,25 @@ module_param_named(pstore_disable, efivars_pstore_disable, bool, 0644); EFI_VARIABLE_BOOTSERVICE_ACCESS | \ EFI_VARIABLE_RUNTIME_ACCESS) -static LIST_HEAD(efi_pstore_list); -static DECLARE_WORK(efivar_work, NULL); - static int efi_pstore_open(struct pstore_info *psi) { - psi->data = NULL; + int err; + + err = efivar_lock(); + if (err) + return err; + + psi->data = kzalloc(EFIVARS_DATA_SIZE_MAX, GFP_KERNEL); + if (!psi->data) + return -ENOMEM; + return 0; } static int efi_pstore_close(struct pstore_info *psi) { - psi->data = NULL; + efivar_unlock(); + kfree(psi->data); return 0; } @@ -40,22 +49,17 @@ static inline u64 generic_id(u64 timestamp, unsigned int part, int count) return (timestamp * 100 + part) * 1000 + count; } -static int efi_pstore_read_func(struct efivar_entry *entry, - struct pstore_record *record) +static int efi_pstore_read_func(struct pstore_record *record, + efi_char16_t *varname) { - efi_guid_t vendor = LINUX_EFI_CRASH_GUID; + unsigned long wlen, size = EFIVARS_DATA_SIZE_MAX; char name[DUMP_NAME_LEN], data_type; - int i; + efi_status_t status; int cnt; unsigned int part; - unsigned long size; u64 time; - if (efi_guidcmp(entry->var.VendorGuid, vendor)) - return 0; - - for (i = 0; i < DUMP_NAME_LEN; i++) - name[i] = entry->var.VariableName[i]; + ucs2_as_utf8(name, varname, DUMP_NAME_LEN); if (sscanf(name, "dump-type%u-%u-%d-%llu-%c", &record->type, &part, &cnt, &time, &data_type) == 5) { @@ -95,161 +99,75 @@ static int efi_pstore_read_func(struct efivar_entry *entry, } else return 0; - entry->var.DataSize = 1024; - __efivar_entry_get(entry, &entry->var.Attributes, - &entry->var.DataSize, entry->var.Data); - size = entry->var.DataSize; - memcpy(record->buf, entry->var.Data, - (size_t)min_t(unsigned long, EFIVARS_DATA_SIZE_MAX, size)); - - return size; -} - -/** - * efi_pstore_scan_sysfs_enter - * @pos: scanning entry - * @next: next entry - * @head: list head - */ -static void efi_pstore_scan_sysfs_enter(struct efivar_entry *pos, - struct efivar_entry *next, - struct list_head *head) -{ - pos->scanning = true; - if (&next->list != head) - next->scanning = true; -} - -/** - * __efi_pstore_scan_sysfs_exit - * @entry: deleting entry - * @turn_off_scanning: Check if a scanning flag should be turned off - */ -static inline int __efi_pstore_scan_sysfs_exit(struct efivar_entry *entry, - bool turn_off_scanning) -{ - if (entry->deleting) { - list_del(&entry->list); - efivar_entry_iter_end(); - kfree(entry); - if (efivar_entry_iter_begin()) - return -EINTR; - } else if (turn_off_scanning) - entry->scanning = false; - - return 0; -} - -/** - * efi_pstore_scan_sysfs_exit - * @pos: scanning entry - * @next: next entry - * @head: list head - * @stop: a flag checking if scanning will stop - */ -static int efi_pstore_scan_sysfs_exit(struct efivar_entry *pos, - struct efivar_entry *next, - struct list_head *head, bool stop) -{ - int ret = __efi_pstore_scan_sysfs_exit(pos, true); - - if (ret) - return ret; - - if (stop) - ret = __efi_pstore_scan_sysfs_exit(next, &next->list != head); - return ret; -} + record->buf = kmalloc(size, GFP_KERNEL); + if (!record->buf) + return -ENOMEM; -/** - * efi_pstore_sysfs_entry_iter - * - * @record: pstore record to pass to callback - * - * You MUST call efivar_entry_iter_begin() before this function, and - * efivar_entry_iter_end() afterwards. - * - */ -static int efi_pstore_sysfs_entry_iter(struct pstore_record *record) -{ - struct efivar_entry **pos = (struct efivar_entry **)&record->psi->data; - struct efivar_entry *entry, *n; - struct list_head *head = &efi_pstore_list; - int size = 0; - int ret; - - if (!*pos) { - list_for_each_entry_safe(entry, n, head, list) { - efi_pstore_scan_sysfs_enter(entry, n, head); - - size = efi_pstore_read_func(entry, record); - ret = efi_pstore_scan_sysfs_exit(entry, n, head, - size < 0); - if (ret) - return ret; - if (size) - break; - } - *pos = n; - return size; + status = efivar_get_variable(varname, &LINUX_EFI_CRASH_GUID, NULL, + &size, record->buf); + if (status != EFI_SUCCESS) { + kfree(record->buf); + return -EIO; } - list_for_each_entry_safe_from((*pos), n, head, list) { - efi_pstore_scan_sysfs_enter((*pos), n, head); - - size = efi_pstore_read_func((*pos), record); - ret = efi_pstore_scan_sysfs_exit((*pos), n, head, size < 0); - if (ret) - return ret; - if (size) - break; + /* + * Store the name of the variable in the pstore_record priv field, so + * we can reuse it later if we need to delete the EFI variable from the + * variable store. + */ + wlen = (ucs2_strnlen(varname, DUMP_NAME_LEN) + 1) * sizeof(efi_char16_t); + record->priv = kmemdup(varname, wlen, GFP_KERNEL); + if (!record->priv) { + kfree(record->buf); + return -ENOMEM; } - *pos = n; + return size; } -/** - * efi_pstore_read - * - * This function returns a size of NVRAM entry logged via efi_pstore_write(). - * The meaning and behavior of efi_pstore/pstore are as below. - * - * size > 0: Got data of an entry logged via efi_pstore_write() successfully, - * and pstore filesystem will continue reading subsequent entries. - * size == 0: Entry was not logged via efi_pstore_write(), - * and efi_pstore driver will continue reading subsequent entries. - * size < 0: Failed to get data of entry logging via efi_pstore_write(), - * and pstore will stop reading entry. - */ static ssize_t efi_pstore_read(struct pstore_record *record) { - ssize_t size; + efi_char16_t *varname = record->psi->data; + efi_guid_t guid = LINUX_EFI_CRASH_GUID; + unsigned long varname_size; + efi_status_t status; - record->buf = kzalloc(EFIVARS_DATA_SIZE_MAX, GFP_KERNEL); - if (!record->buf) - return -ENOMEM; + for (;;) { + varname_size = EFIVARS_DATA_SIZE_MAX; - if (efivar_entry_iter_begin()) { - size = -EINTR; - goto out; - } - size = efi_pstore_sysfs_entry_iter(record); - efivar_entry_iter_end(); + /* + * If this is the first read() call in the pstore enumeration, + * varname will be the empty string, and the GetNextVariable() + * runtime service call will return the first EFI variable in + * its own enumeration order, ignoring the guid argument. + * + * Subsequent calls to GetNextVariable() must pass the name and + * guid values returned by the previous call, which is why we + * store varname in record->psi->data. Given that we only + * enumerate variables with the efi-pstore GUID, there is no + * need to record the guid return value. + */ + status = efivar_get_next_variable(&varname_size, varname, &guid); + if (status == EFI_NOT_FOUND) + return 0; -out: - if (size <= 0) { - kfree(record->buf); - record->buf = NULL; + if (status != EFI_SUCCESS) + return -EIO; + + /* skip variables that don't concern us */ + if (efi_guidcmp(guid, LINUX_EFI_CRASH_GUID)) + continue; + + return efi_pstore_read_func(record, varname); } - return size; } static int efi_pstore_write(struct pstore_record *record) { char name[DUMP_NAME_LEN]; efi_char16_t efi_name[DUMP_NAME_LEN]; - efi_guid_t vendor = LINUX_EFI_CRASH_GUID; - int i, ret = 0; + efi_status_t status; + int i; record->id = generic_id(record->time.tv_sec, record->part, record->count); @@ -265,88 +183,26 @@ static int efi_pstore_write(struct pstore_record *record) for (i = 0; i < DUMP_NAME_LEN; i++) efi_name[i] = name[i]; - ret = efivar_entry_set_safe(efi_name, vendor, PSTORE_EFI_ATTRIBUTES, - preemptible(), record->size, record->psi->buf); - - if (record->reason == KMSG_DUMP_OOPS && try_module_get(THIS_MODULE)) - if (!schedule_work(&efivar_work)) - module_put(THIS_MODULE); - - return ret; + if (efivar_trylock()) + return -EBUSY; + status = efivar_set_variable_locked(efi_name, &LINUX_EFI_CRASH_GUID, + PSTORE_EFI_ATTRIBUTES, + record->size, record->psi->buf, + true); + efivar_unlock(); + return status == EFI_SUCCESS ? 0 : -EIO; }; -/* - * Clean up an entry with the same name - */ -static int efi_pstore_erase_func(struct efivar_entry *entry, void *data) -{ - efi_char16_t *efi_name = data; - efi_guid_t vendor = LINUX_EFI_CRASH_GUID; - unsigned long ucs2_len = ucs2_strlen(efi_name); - - if (efi_guidcmp(entry->var.VendorGuid, vendor)) - return 0; - - if (ucs2_strncmp(entry->var.VariableName, efi_name, (size_t)ucs2_len)) - return 0; - - if (entry->scanning) { - /* - * Skip deletion because this entry will be deleted - * after scanning is completed. - */ - entry->deleting = true; - } else - list_del(&entry->list); - - /* found */ - __efivar_entry_delete(entry); - - return 1; -} - -static int efi_pstore_erase_name(const char *name) -{ - struct efivar_entry *entry = NULL; - efi_char16_t efi_name[DUMP_NAME_LEN]; - int found, i; - - for (i = 0; i < DUMP_NAME_LEN; i++) { - efi_name[i] = name[i]; - if (name[i] == '\0') - break; - } - - if (efivar_entry_iter_begin()) - return -EINTR; - - found = __efivar_entry_iter(efi_pstore_erase_func, &efi_pstore_list, - efi_name, &entry); - efivar_entry_iter_end(); - - if (found && !entry->scanning) - kfree(entry); - - return found ? 0 : -ENOENT; -} - static int efi_pstore_erase(struct pstore_record *record) { - char name[DUMP_NAME_LEN]; - int ret; - - snprintf(name, sizeof(name), "dump-type%u-%u-%d-%lld", - record->type, record->part, record->count, - (long long)record->time.tv_sec); - ret = efi_pstore_erase_name(name); - if (ret != -ENOENT) - return ret; + efi_status_t status; - snprintf(name, sizeof(name), "dump-type%u-%u-%lld", - record->type, record->part, (long long)record->time.tv_sec); - ret = efi_pstore_erase_name(name); + status = efivar_set_variable(record->priv, &LINUX_EFI_CRASH_GUID, + PSTORE_EFI_ATTRIBUTES, 0, NULL); - return ret; + if (status != EFI_SUCCESS && status != EFI_NOT_FOUND) + return -EIO; + return 0; } static struct pstore_info efi_pstore_info = { @@ -360,77 +216,14 @@ static struct pstore_info efi_pstore_info = { .erase = efi_pstore_erase, }; -static int efi_pstore_callback(efi_char16_t *name, efi_guid_t vendor, - unsigned long name_size, void *data) -{ - struct efivar_entry *entry; - int ret; - - entry = kzalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) - return -ENOMEM; - - memcpy(entry->var.VariableName, name, name_size); - entry->var.VendorGuid = vendor; - - ret = efivar_entry_add(entry, &efi_pstore_list); - if (ret) - kfree(entry); - - return ret; -} - -static int efi_pstore_update_entry(efi_char16_t *name, efi_guid_t vendor, - unsigned long name_size, void *data) -{ - struct efivar_entry *entry = data; - - if (efivar_entry_find(name, vendor, &efi_pstore_list, false)) - return 0; - - memcpy(entry->var.VariableName, name, name_size); - memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t)); - - return 1; -} - -static void efi_pstore_update_entries(struct work_struct *work) -{ - struct efivar_entry *entry; - int err; - - /* Add new sysfs entries */ - while (1) { - entry = kzalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) - return; - - err = efivar_init(efi_pstore_update_entry, entry, - false, &efi_pstore_list); - if (!err) - break; - - efivar_entry_add(entry, &efi_pstore_list); - } - - kfree(entry); - module_put(THIS_MODULE); -} - static __init int efivars_pstore_init(void) { - int ret; - - if (!efivars_kobject() || !efivar_supports_writes()) + if (!efivar_supports_writes()) return 0; if (efivars_pstore_disable) return 0; - ret = efivar_init(efi_pstore_callback, NULL, true, &efi_pstore_list); - if (ret) - return ret; - efi_pstore_info.buf = kmalloc(4096, GFP_KERNEL); if (!efi_pstore_info.buf) return -ENOMEM; @@ -443,8 +236,6 @@ static __init int efivars_pstore_init(void) efi_pstore_info.bufsize = 0; } - INIT_WORK(&efivar_work, efi_pstore_update_entries); - return 0; } diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index ae79c3300129..a46df5d1d094 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -21,6 +21,7 @@ #include <linux/device.h> #include <linux/efi.h> #include <linux/of.h> +#include <linux/initrd.h> #include <linux/io.h> #include <linux/kexec.h> #include <linux/platform_device.h> @@ -46,15 +47,19 @@ struct efi __read_mostly efi = { #ifdef CONFIG_LOAD_UEFI_KEYS .mokvar_table = EFI_INVALID_TABLE_ADDR, #endif +#ifdef CONFIG_EFI_COCO_SECRET + .coco_secret = EFI_INVALID_TABLE_ADDR, +#endif }; EXPORT_SYMBOL(efi); unsigned long __ro_after_init efi_rng_seed = EFI_INVALID_TABLE_ADDR; static unsigned long __initdata mem_reserve = EFI_INVALID_TABLE_ADDR; static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR; +static unsigned long __initdata initrd = EFI_INVALID_TABLE_ADDR; struct mm_struct efi_mm = { - .mm_rb = RB_ROOT, + .mm_mt = MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, efi_mm.mmap_lock), .mm_users = ATOMIC_INIT(2), .mm_count = ATOMIC_INIT(1), .write_protect_seq = SEQCNT_ZERO(efi_mm.write_protect_seq), @@ -66,7 +71,7 @@ struct mm_struct efi_mm = { struct workqueue_struct *efi_rts_wq; -static bool disable_runtime = IS_ENABLED(CONFIG_PREEMPT_RT); +static bool disable_runtime = IS_ENABLED(CONFIG_EFI_DISABLE_RUNTIME); static int __init setup_noefi(char *arg) { disable_runtime = true; @@ -199,7 +204,7 @@ static void generic_ops_unregister(void) } #ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS -#define EFIVAR_SSDT_NAME_MAX 16 +#define EFIVAR_SSDT_NAME_MAX 16UL static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata; static int __init efivar_ssdt_setup(char *str) { @@ -212,87 +217,68 @@ static int __init efivar_ssdt_setup(char *str) memcpy(efivar_ssdt, str, strlen(str)); else pr_warn("efivar_ssdt: name too long: %s\n", str); - return 0; + return 1; } __setup("efivar_ssdt=", efivar_ssdt_setup); -static __init int efivar_ssdt_iter(efi_char16_t *name, efi_guid_t vendor, - unsigned long name_size, void *data) -{ - struct efivar_entry *entry; - struct list_head *list = data; - char utf8_name[EFIVAR_SSDT_NAME_MAX]; - int limit = min_t(unsigned long, EFIVAR_SSDT_NAME_MAX, name_size); - - ucs2_as_utf8(utf8_name, name, limit - 1); - if (strncmp(utf8_name, efivar_ssdt, limit) != 0) - return 0; - - entry = kmalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) - return 0; - - memcpy(entry->var.VariableName, name, name_size); - memcpy(&entry->var.VendorGuid, &vendor, sizeof(efi_guid_t)); - - efivar_entry_add(entry, list); - - return 0; -} - static __init int efivar_ssdt_load(void) { - LIST_HEAD(entries); - struct efivar_entry *entry, *aux; - unsigned long size; - void *data; - int ret; + unsigned long name_size = 256; + efi_char16_t *name = NULL; + efi_status_t status; + efi_guid_t guid; if (!efivar_ssdt[0]) return 0; - ret = efivar_init(efivar_ssdt_iter, &entries, true, &entries); - - list_for_each_entry_safe(entry, aux, &entries, list) { - pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt, - &entry->var.VendorGuid); + name = kzalloc(name_size, GFP_KERNEL); + if (!name) + return -ENOMEM; - list_del(&entry->list); + for (;;) { + char utf8_name[EFIVAR_SSDT_NAME_MAX]; + unsigned long data_size = 0; + void *data; + int limit; - ret = efivar_entry_size(entry, &size); - if (ret) { - pr_err("failed to get var size\n"); - goto free_entry; + status = efi.get_next_variable(&name_size, name, &guid); + if (status == EFI_NOT_FOUND) { + break; + } else if (status == EFI_BUFFER_TOO_SMALL) { + name = krealloc(name, name_size, GFP_KERNEL); + if (!name) + return -ENOMEM; + continue; } - data = kmalloc(size, GFP_KERNEL); - if (!data) { - ret = -ENOMEM; - goto free_entry; - } + limit = min(EFIVAR_SSDT_NAME_MAX, name_size); + ucs2_as_utf8(utf8_name, name, limit - 1); + if (strncmp(utf8_name, efivar_ssdt, limit) != 0) + continue; - ret = efivar_entry_get(entry, NULL, &size, data); - if (ret) { - pr_err("failed to get var data\n"); - goto free_data; - } + pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt, &guid); - ret = acpi_load_table(data, NULL); - if (ret) { - pr_err("failed to load table: %d\n", ret); - goto free_data; - } + status = efi.get_variable(name, &guid, NULL, &data_size, NULL); + if (status != EFI_BUFFER_TOO_SMALL || !data_size) + return -EIO; - goto free_entry; + data = kmalloc(data_size, GFP_KERNEL); + if (!data) + return -ENOMEM; -free_data: + status = efi.get_variable(name, &guid, NULL, &data_size, data); + if (status == EFI_SUCCESS) { + acpi_status ret = acpi_load_table(data, NULL); + if (ret) + pr_err("failed to load table: %u\n", ret); + else + continue; + } else { + pr_err("failed to get var data: 0x%lx\n", status); + } kfree(data); - -free_entry: - kfree(entry); } - - return ret; + return 0; } #else static inline int efivar_ssdt_load(void) { return 0; } @@ -422,6 +408,11 @@ static int __init efisubsys_init(void) if (efi_enabled(EFI_DBG) && efi_enabled(EFI_PRESERVE_BS_REGIONS)) efi_debugfs_init(); +#ifdef CONFIG_EFI_COCO_SECRET + if (efi.coco_secret != EFI_INVALID_TABLE_ADDR) + platform_device_register_simple("efi_secret", 0, NULL, 0); +#endif + return 0; err_remove_group: @@ -438,6 +429,29 @@ err_put: subsys_initcall(efisubsys_init); +void __init efi_find_mirror(void) +{ + efi_memory_desc_t *md; + u64 mirror_size = 0, total_size = 0; + + if (!efi_enabled(EFI_MEMMAP)) + return; + + for_each_efi_memory_desc(md) { + unsigned long long start = md->phys_addr; + unsigned long long size = md->num_pages << EFI_PAGE_SHIFT; + + total_size += size; + if (md->attribute & EFI_MEMORY_MORE_RELIABLE) { + memblock_mark_mirror(start, size); + mirror_size += size; + } + } + if (mirror_size) + pr_info("Memory: %lldM/%lldM mirrored memory\n", + mirror_size>>20, total_size>>20); +} + /* * Find the efi memory descriptor for a given physical address. Given a * physical address, determine if it exists within an EFI Memory Map entry, @@ -522,6 +536,7 @@ static const efi_config_table_type_t common_tables[] __initconst = { {LINUX_EFI_TPM_EVENT_LOG_GUID, &efi.tpm_log, "TPMEventLog" }, {LINUX_EFI_TPM_FINAL_LOG_GUID, &efi.tpm_final_log, "TPMFinalLog" }, {LINUX_EFI_MEMRESERVE_TABLE_GUID, &mem_reserve, "MEMRESERVE" }, + {LINUX_EFI_INITRD_MEDIA_GUID, &initrd, "INITRD" }, {EFI_RT_PROPERTIES_TABLE_GUID, &rt_prop, "RTPROP" }, #ifdef CONFIG_EFI_RCI2_TABLE {DELLEMC_EFI_RCI2_TABLE_GUID, &rci2_table_phys }, @@ -529,6 +544,9 @@ static const efi_config_table_type_t common_tables[] __initconst = { #ifdef CONFIG_LOAD_UEFI_KEYS {LINUX_EFI_MOK_VARIABLE_TABLE_GUID, &efi.mokvar_table, "MOKvar" }, #endif +#ifdef CONFIG_EFI_COCO_SECRET + {LINUX_EFI_COCO_SECRET_AREA_GUID, &efi.coco_secret, "CocoSecret" }, +#endif {}, }; @@ -593,7 +611,7 @@ int __init efi_config_parse_tables(const efi_config_table_t *config_tables, seed = early_memremap(efi_rng_seed, sizeof(*seed)); if (seed != NULL) { - size = READ_ONCE(seed->size); + size = min(seed->size, EFI_RANDOM_SEED_SIZE); early_memunmap(seed, sizeof(*seed)); } else { pr_err("Could not map UEFI random seed!\n"); @@ -661,6 +679,18 @@ int __init efi_config_parse_tables(const efi_config_table_t *config_tables, } } + if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && + initrd != EFI_INVALID_TABLE_ADDR && phys_initrd_size == 0) { + struct linux_efi_initrd *tbl; + + tbl = early_memremap(initrd, sizeof(*tbl)); + if (tbl) { + phys_initrd_start = tbl->base; + phys_initrd_size = tbl->size; + early_memunmap(tbl, sizeof(*tbl)); + } + } + return 0; } @@ -722,6 +752,13 @@ void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr, systab_hdr->revision >> 16, systab_hdr->revision & 0xffff, vendor); + + if (IS_ENABLED(CONFIG_X86_64) && + systab_hdr->revision > EFI_1_10_SYSTEM_TABLE_REVISION && + !strcmp(vendor, "Apple")) { + pr_info("Apple Mac detected, using EFI v1.10 runtime services only\n"); + efi.runtime_version = EFI_1_10_SYSTEM_TABLE_REVISION; + } } static __initdata char memory_type_name[][13] = { @@ -879,6 +916,7 @@ int efi_status_to_err(efi_status_t status) return err; } +EXPORT_SYMBOL_GPL(efi_status_to_err); static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock); static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init; diff --git a/drivers/firmware/efi/efibc.c b/drivers/firmware/efi/efibc.c index 15a47539dc56..4f9fb086eab7 100644 --- a/drivers/firmware/efi/efibc.c +++ b/drivers/firmware/efi/efibc.c @@ -10,69 +10,54 @@ #include <linux/module.h> #include <linux/reboot.h> #include <linux/slab.h> +#include <linux/ucs2_string.h> -static void efibc_str_to_str16(const char *str, efi_char16_t *str16) -{ - size_t i; - - for (i = 0; i < strlen(str); i++) - str16[i] = str[i]; - - str16[i] = '\0'; -} +#define MAX_DATA_LEN 512 -static int efibc_set_variable(const char *name, const char *value) +static int efibc_set_variable(efi_char16_t *name, efi_char16_t *value, + unsigned long len) { - int ret; - efi_guid_t guid = LINUX_EFI_LOADER_ENTRY_GUID; - struct efivar_entry *entry; - size_t size = (strlen(value) + 1) * sizeof(efi_char16_t); + efi_status_t status; - if (size > sizeof(entry->var.Data)) { - pr_err("value is too large (%zu bytes) for '%s' EFI variable\n", size, name); - return -EINVAL; - } + status = efi.set_variable(name, &LINUX_EFI_LOADER_ENTRY_GUID, + EFI_VARIABLE_NON_VOLATILE + | EFI_VARIABLE_BOOTSERVICE_ACCESS + | EFI_VARIABLE_RUNTIME_ACCESS, + len * sizeof(efi_char16_t), value); - entry = kmalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) { - pr_err("failed to allocate efivar entry for '%s' EFI variable\n", name); - return -ENOMEM; + if (status != EFI_SUCCESS) { + pr_err("failed to set EFI variable: 0x%lx\n", status); + return -EIO; } - - efibc_str_to_str16(name, entry->var.VariableName); - efibc_str_to_str16(value, (efi_char16_t *)entry->var.Data); - memcpy(&entry->var.VendorGuid, &guid, sizeof(guid)); - - ret = efivar_entry_set_safe(entry->var.VariableName, - entry->var.VendorGuid, - EFI_VARIABLE_NON_VOLATILE - | EFI_VARIABLE_BOOTSERVICE_ACCESS - | EFI_VARIABLE_RUNTIME_ACCESS, - false, size, entry->var.Data); - - if (ret) - pr_err("failed to set %s EFI variable: 0x%x\n", - name, ret); - - kfree(entry); - return ret; + return 0; } static int efibc_reboot_notifier_call(struct notifier_block *notifier, unsigned long event, void *data) { - const char *reason = "shutdown"; + efi_char16_t *reason = event == SYS_RESTART ? L"reboot" + : L"shutdown"; + const u8 *str = data; + efi_char16_t *wdata; + unsigned long l; int ret; - if (event == SYS_RESTART) - reason = "reboot"; - - ret = efibc_set_variable("LoaderEntryRebootReason", reason); + ret = efibc_set_variable(L"LoaderEntryRebootReason", reason, + ucs2_strlen(reason)); if (ret || !data) return NOTIFY_DONE; - efibc_set_variable("LoaderEntryOneShot", (char *)data); + wdata = kmalloc(MAX_DATA_LEN * sizeof(efi_char16_t), GFP_KERNEL); + if (!wdata) + return NOTIFY_DONE; + + for (l = 0; l < MAX_DATA_LEN - 1 && str[l] != '\0'; l++) + wdata[l] = str[l]; + wdata[l] = L'\0'; + + efibc_set_variable(L"LoaderEntryOneShot", wdata, l); + kfree(wdata); return NOTIFY_DONE; } @@ -84,7 +69,7 @@ static int __init efibc_init(void) { int ret; - if (!efivars_kobject() || !efivar_supports_writes()) + if (!efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE)) return -ENODEV; ret = register_reboot_notifier(&efibc_reboot_notifier); diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c deleted file mode 100644 index e6b16b3a17a8..000000000000 --- a/drivers/firmware/efi/efivars.c +++ /dev/null @@ -1,670 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * Originally from efivars.c, - * - * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com> - * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com> - * - * This code takes all variables accessible from EFI runtime and - * exports them via sysfs - */ - -#include <linux/efi.h> -#include <linux/module.h> -#include <linux/slab.h> -#include <linux/ucs2_string.h> -#include <linux/compat.h> - -#define EFIVARS_VERSION "0.08" -#define EFIVARS_DATE "2004-May-17" - -MODULE_AUTHOR("Matt Domsch <Matt_Domsch@Dell.com>"); -MODULE_DESCRIPTION("sysfs interface to EFI Variables"); -MODULE_LICENSE("GPL"); -MODULE_VERSION(EFIVARS_VERSION); - -static LIST_HEAD(efivar_sysfs_list); - -static struct kset *efivars_kset; - -static struct bin_attribute *efivars_new_var; -static struct bin_attribute *efivars_del_var; - -struct compat_efi_variable { - efi_char16_t VariableName[EFI_VAR_NAME_LEN/sizeof(efi_char16_t)]; - efi_guid_t VendorGuid; - __u32 DataSize; - __u8 Data[1024]; - __u32 Status; - __u32 Attributes; -} __packed; - -struct efivar_attribute { - struct attribute attr; - ssize_t (*show) (struct efivar_entry *entry, char *buf); - ssize_t (*store)(struct efivar_entry *entry, const char *buf, size_t count); -}; - -#define EFIVAR_ATTR(_name, _mode, _show, _store) \ -struct efivar_attribute efivar_attr_##_name = { \ - .attr = {.name = __stringify(_name), .mode = _mode}, \ - .show = _show, \ - .store = _store, \ -}; - -#define to_efivar_attr(_attr) container_of(_attr, struct efivar_attribute, attr) -#define to_efivar_entry(obj) container_of(obj, struct efivar_entry, kobj) - -/* - * Prototype for sysfs creation function - */ -static int -efivar_create_sysfs_entry(struct efivar_entry *new_var); - -static ssize_t -efivar_guid_read(struct efivar_entry *entry, char *buf) -{ - struct efi_variable *var = &entry->var; - char *str = buf; - - if (!entry || !buf) - return 0; - - efi_guid_to_str(&var->VendorGuid, str); - str += strlen(str); - str += sprintf(str, "\n"); - - return str - buf; -} - -static ssize_t -efivar_attr_read(struct efivar_entry *entry, char *buf) -{ - struct efi_variable *var = &entry->var; - unsigned long size = sizeof(var->Data); - char *str = buf; - int ret; - - if (!entry || !buf) - return -EINVAL; - - ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data); - var->DataSize = size; - if (ret) - return -EIO; - - if (var->Attributes & EFI_VARIABLE_NON_VOLATILE) - str += sprintf(str, "EFI_VARIABLE_NON_VOLATILE\n"); - if (var->Attributes & EFI_VARIABLE_BOOTSERVICE_ACCESS) - str += sprintf(str, "EFI_VARIABLE_BOOTSERVICE_ACCESS\n"); - if (var->Attributes & EFI_VARIABLE_RUNTIME_ACCESS) - str += sprintf(str, "EFI_VARIABLE_RUNTIME_ACCESS\n"); - if (var->Attributes & EFI_VARIABLE_HARDWARE_ERROR_RECORD) - str += sprintf(str, "EFI_VARIABLE_HARDWARE_ERROR_RECORD\n"); - if (var->Attributes & EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS) - str += sprintf(str, - "EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS\n"); - if (var->Attributes & - EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS) - str += sprintf(str, - "EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS\n"); - if (var->Attributes & EFI_VARIABLE_APPEND_WRITE) - str += sprintf(str, "EFI_VARIABLE_APPEND_WRITE\n"); - return str - buf; -} - -static ssize_t -efivar_size_read(struct efivar_entry *entry, char *buf) -{ - struct efi_variable *var = &entry->var; - unsigned long size = sizeof(var->Data); - char *str = buf; - int ret; - - if (!entry || !buf) - return -EINVAL; - - ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data); - var->DataSize = size; - if (ret) - return -EIO; - - str += sprintf(str, "0x%lx\n", var->DataSize); - return str - buf; -} - -static ssize_t -efivar_data_read(struct efivar_entry *entry, char *buf) -{ - struct efi_variable *var = &entry->var; - unsigned long size = sizeof(var->Data); - int ret; - - if (!entry || !buf) - return -EINVAL; - - ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data); - var->DataSize = size; - if (ret) - return -EIO; - - memcpy(buf, var->Data, var->DataSize); - return var->DataSize; -} - -static inline int -sanity_check(struct efi_variable *var, efi_char16_t *name, efi_guid_t vendor, - unsigned long size, u32 attributes, u8 *data) -{ - /* - * If only updating the variable data, then the name - * and guid should remain the same - */ - if (memcmp(name, var->VariableName, sizeof(var->VariableName)) || - efi_guidcmp(vendor, var->VendorGuid)) { - printk(KERN_ERR "efivars: Cannot edit the wrong variable!\n"); - return -EINVAL; - } - - if ((size <= 0) || (attributes == 0)){ - printk(KERN_ERR "efivars: DataSize & Attributes must be valid!\n"); - return -EINVAL; - } - - if ((attributes & ~EFI_VARIABLE_MASK) != 0 || - efivar_validate(vendor, name, data, size) == false) { - printk(KERN_ERR "efivars: Malformed variable content\n"); - return -EINVAL; - } - - return 0; -} - -static void -copy_out_compat(struct efi_variable *dst, struct compat_efi_variable *src) -{ - memcpy(dst->VariableName, src->VariableName, EFI_VAR_NAME_LEN); - memcpy(dst->Data, src->Data, sizeof(src->Data)); - - dst->VendorGuid = src->VendorGuid; - dst->DataSize = src->DataSize; - dst->Attributes = src->Attributes; -} - -/* - * We allow each variable to be edited via rewriting the - * entire efi variable structure. - */ -static ssize_t -efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count) -{ - struct efi_variable *new_var, *var = &entry->var; - efi_char16_t *name; - unsigned long size; - efi_guid_t vendor; - u32 attributes; - u8 *data; - int err; - - if (!entry || !buf) - return -EINVAL; - - if (in_compat_syscall()) { - struct compat_efi_variable *compat; - - if (count != sizeof(*compat)) - return -EINVAL; - - compat = (struct compat_efi_variable *)buf; - attributes = compat->Attributes; - vendor = compat->VendorGuid; - name = compat->VariableName; - size = compat->DataSize; - data = compat->Data; - - err = sanity_check(var, name, vendor, size, attributes, data); - if (err) - return err; - - copy_out_compat(&entry->var, compat); - } else { - if (count != sizeof(struct efi_variable)) - return -EINVAL; - - new_var = (struct efi_variable *)buf; - - attributes = new_var->Attributes; - vendor = new_var->VendorGuid; - name = new_var->VariableName; - size = new_var->DataSize; - data = new_var->Data; - - err = sanity_check(var, name, vendor, size, attributes, data); - if (err) - return err; - - memcpy(&entry->var, new_var, count); - } - - err = efivar_entry_set(entry, attributes, size, data, NULL); - if (err) { - printk(KERN_WARNING "efivars: set_variable() failed: status=%d\n", err); - return -EIO; - } - - return count; -} - -static ssize_t -efivar_show_raw(struct efivar_entry *entry, char *buf) -{ - struct efi_variable *var = &entry->var; - struct compat_efi_variable *compat; - unsigned long datasize = sizeof(var->Data); - size_t size; - int ret; - - if (!entry || !buf) - return 0; - - ret = efivar_entry_get(entry, &var->Attributes, &datasize, var->Data); - var->DataSize = datasize; - if (ret) - return -EIO; - - if (in_compat_syscall()) { - compat = (struct compat_efi_variable *)buf; - - size = sizeof(*compat); - memcpy(compat->VariableName, var->VariableName, - EFI_VAR_NAME_LEN); - memcpy(compat->Data, var->Data, sizeof(compat->Data)); - - compat->VendorGuid = var->VendorGuid; - compat->DataSize = var->DataSize; - compat->Attributes = var->Attributes; - } else { - size = sizeof(*var); - memcpy(buf, var, size); - } - - return size; -} - -/* - * Generic read/write functions that call the specific functions of - * the attributes... - */ -static ssize_t efivar_attr_show(struct kobject *kobj, struct attribute *attr, - char *buf) -{ - struct efivar_entry *var = to_efivar_entry(kobj); - struct efivar_attribute *efivar_attr = to_efivar_attr(attr); - ssize_t ret = -EIO; - - if (!capable(CAP_SYS_ADMIN)) - return -EACCES; - - if (efivar_attr->show) { - ret = efivar_attr->show(var, buf); - } - return ret; -} - -static ssize_t efivar_attr_store(struct kobject *kobj, struct attribute *attr, - const char *buf, size_t count) -{ - struct efivar_entry *var = to_efivar_entry(kobj); - struct efivar_attribute *efivar_attr = to_efivar_attr(attr); - ssize_t ret = -EIO; - - if (!capable(CAP_SYS_ADMIN)) - return -EACCES; - - if (efivar_attr->store) - ret = efivar_attr->store(var, buf, count); - - return ret; -} - -static const struct sysfs_ops efivar_attr_ops = { - .show = efivar_attr_show, - .store = efivar_attr_store, -}; - -static void efivar_release(struct kobject *kobj) -{ - struct efivar_entry *var = to_efivar_entry(kobj); - kfree(var); -} - -static EFIVAR_ATTR(guid, 0400, efivar_guid_read, NULL); -static EFIVAR_ATTR(attributes, 0400, efivar_attr_read, NULL); -static EFIVAR_ATTR(size, 0400, efivar_size_read, NULL); -static EFIVAR_ATTR(data, 0400, efivar_data_read, NULL); -static EFIVAR_ATTR(raw_var, 0600, efivar_show_raw, efivar_store_raw); - -static struct attribute *def_attrs[] = { - &efivar_attr_guid.attr, - &efivar_attr_size.attr, - &efivar_attr_attributes.attr, - &efivar_attr_data.attr, - &efivar_attr_raw_var.attr, - NULL, -}; - -static struct kobj_type efivar_ktype = { - .release = efivar_release, - .sysfs_ops = &efivar_attr_ops, - .default_attrs = def_attrs, -}; - -static ssize_t efivar_create(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, - char *buf, loff_t pos, size_t count) -{ - struct compat_efi_variable *compat = (struct compat_efi_variable *)buf; - struct efi_variable *new_var = (struct efi_variable *)buf; - struct efivar_entry *new_entry; - bool need_compat = in_compat_syscall(); - efi_char16_t *name; - unsigned long size; - u32 attributes; - u8 *data; - int err; - - if (!capable(CAP_SYS_ADMIN)) - return -EACCES; - - if (need_compat) { - if (count != sizeof(*compat)) - return -EINVAL; - - attributes = compat->Attributes; - name = compat->VariableName; - size = compat->DataSize; - data = compat->Data; - } else { - if (count != sizeof(*new_var)) - return -EINVAL; - - attributes = new_var->Attributes; - name = new_var->VariableName; - size = new_var->DataSize; - data = new_var->Data; - } - - if ((attributes & ~EFI_VARIABLE_MASK) != 0 || - efivar_validate(new_var->VendorGuid, name, data, - size) == false) { - printk(KERN_ERR "efivars: Malformed variable content\n"); - return -EINVAL; - } - - new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL); - if (!new_entry) - return -ENOMEM; - - if (need_compat) - copy_out_compat(&new_entry->var, compat); - else - memcpy(&new_entry->var, new_var, sizeof(*new_var)); - - err = efivar_entry_set(new_entry, attributes, size, - data, &efivar_sysfs_list); - if (err) { - if (err == -EEXIST) - err = -EINVAL; - goto out; - } - - if (efivar_create_sysfs_entry(new_entry)) { - printk(KERN_WARNING "efivars: failed to create sysfs entry.\n"); - kfree(new_entry); - } - return count; - -out: - kfree(new_entry); - return err; -} - -static ssize_t efivar_delete(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, - char *buf, loff_t pos, size_t count) -{ - struct efi_variable *del_var = (struct efi_variable *)buf; - struct compat_efi_variable *compat; - struct efivar_entry *entry; - efi_char16_t *name; - efi_guid_t vendor; - int err = 0; - - if (!capable(CAP_SYS_ADMIN)) - return -EACCES; - - if (in_compat_syscall()) { - if (count != sizeof(*compat)) - return -EINVAL; - - compat = (struct compat_efi_variable *)buf; - name = compat->VariableName; - vendor = compat->VendorGuid; - } else { - if (count != sizeof(*del_var)) - return -EINVAL; - - name = del_var->VariableName; - vendor = del_var->VendorGuid; - } - - if (efivar_entry_iter_begin()) - return -EINTR; - entry = efivar_entry_find(name, vendor, &efivar_sysfs_list, true); - if (!entry) - err = -EINVAL; - else if (__efivar_entry_delete(entry)) - err = -EIO; - - if (err) { - efivar_entry_iter_end(); - return err; - } - - if (!entry->scanning) { - efivar_entry_iter_end(); - efivar_unregister(entry); - } else - efivar_entry_iter_end(); - - /* It's dead Jim.... */ - return count; -} - -/** - * efivar_create_sysfs_entry - create a new entry in sysfs - * @new_var: efivar entry to create - * - * Returns 0 on success, negative error code on failure - */ -static int -efivar_create_sysfs_entry(struct efivar_entry *new_var) -{ - int short_name_size; - char *short_name; - unsigned long utf8_name_size; - efi_char16_t *variable_name = new_var->var.VariableName; - int ret; - - /* - * Length of the variable bytes in UTF8, plus the '-' separator, - * plus the GUID, plus trailing NUL - */ - utf8_name_size = ucs2_utf8size(variable_name); - short_name_size = utf8_name_size + 1 + EFI_VARIABLE_GUID_LEN + 1; - - short_name = kmalloc(short_name_size, GFP_KERNEL); - if (!short_name) - return -ENOMEM; - - ucs2_as_utf8(short_name, variable_name, short_name_size); - - /* This is ugly, but necessary to separate one vendor's - private variables from another's. */ - short_name[utf8_name_size] = '-'; - efi_guid_to_str(&new_var->var.VendorGuid, - short_name + utf8_name_size + 1); - - new_var->kobj.kset = efivars_kset; - - ret = kobject_init_and_add(&new_var->kobj, &efivar_ktype, - NULL, "%s", short_name); - kfree(short_name); - if (ret) { - kobject_put(&new_var->kobj); - return ret; - } - - kobject_uevent(&new_var->kobj, KOBJ_ADD); - if (efivar_entry_add(new_var, &efivar_sysfs_list)) { - efivar_unregister(new_var); - return -EINTR; - } - - return 0; -} - -static int -create_efivars_bin_attributes(void) -{ - struct bin_attribute *attr; - int error; - - /* new_var */ - attr = kzalloc(sizeof(*attr), GFP_KERNEL); - if (!attr) - return -ENOMEM; - - attr->attr.name = "new_var"; - attr->attr.mode = 0200; - attr->write = efivar_create; - efivars_new_var = attr; - - /* del_var */ - attr = kzalloc(sizeof(*attr), GFP_KERNEL); - if (!attr) { - error = -ENOMEM; - goto out_free; - } - attr->attr.name = "del_var"; - attr->attr.mode = 0200; - attr->write = efivar_delete; - efivars_del_var = attr; - - sysfs_bin_attr_init(efivars_new_var); - sysfs_bin_attr_init(efivars_del_var); - - /* Register */ - error = sysfs_create_bin_file(&efivars_kset->kobj, efivars_new_var); - if (error) { - printk(KERN_ERR "efivars: unable to create new_var sysfs file" - " due to error %d\n", error); - goto out_free; - } - - error = sysfs_create_bin_file(&efivars_kset->kobj, efivars_del_var); - if (error) { - printk(KERN_ERR "efivars: unable to create del_var sysfs file" - " due to error %d\n", error); - sysfs_remove_bin_file(&efivars_kset->kobj, efivars_new_var); - goto out_free; - } - - return 0; -out_free: - kfree(efivars_del_var); - efivars_del_var = NULL; - kfree(efivars_new_var); - efivars_new_var = NULL; - return error; -} - -static int efivars_sysfs_callback(efi_char16_t *name, efi_guid_t vendor, - unsigned long name_size, void *data) -{ - struct efivar_entry *entry; - - entry = kzalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) - return -ENOMEM; - - memcpy(entry->var.VariableName, name, name_size); - memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t)); - - efivar_create_sysfs_entry(entry); - - return 0; -} - -static int efivar_sysfs_destroy(struct efivar_entry *entry, void *data) -{ - int err = efivar_entry_remove(entry); - - if (err) - return err; - efivar_unregister(entry); - return 0; -} - -static void efivars_sysfs_exit(void) -{ - /* Remove all entries and destroy */ - int err; - - err = __efivar_entry_iter(efivar_sysfs_destroy, &efivar_sysfs_list, - NULL, NULL); - if (err) { - pr_err("efivars: Failed to destroy sysfs entries\n"); - return; - } - - if (efivars_new_var) - sysfs_remove_bin_file(&efivars_kset->kobj, efivars_new_var); - if (efivars_del_var) - sysfs_remove_bin_file(&efivars_kset->kobj, efivars_del_var); - kfree(efivars_new_var); - kfree(efivars_del_var); - kset_unregister(efivars_kset); -} - -static int efivars_sysfs_init(void) -{ - struct kobject *parent_kobj = efivars_kobject(); - int error = 0; - - /* No efivars has been registered yet */ - if (!parent_kobj || !efivar_supports_writes()) - return 0; - - printk(KERN_INFO "EFI Variables Facility v%s %s\n", EFIVARS_VERSION, - EFIVARS_DATE); - - efivars_kset = kset_create_and_add("vars", NULL, parent_kobj); - if (!efivars_kset) { - printk(KERN_ERR "efivars: Subsystem registration failed.\n"); - return -ENOMEM; - } - - efivar_init(efivars_sysfs_callback, NULL, true, &efivar_sysfs_list); - - error = create_efivars_bin_attributes(); - if (error) { - efivars_sysfs_exit(); - return error; - } - - return 0; -} - -module_init(efivars_sysfs_init); -module_exit(efivars_sysfs_exit); diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c index d5915272141f..2a2f52b017e7 100644 --- a/drivers/firmware/efi/esrt.c +++ b/drivers/firmware/efi/esrt.c @@ -146,6 +146,8 @@ static struct attribute *esre1_attrs[] = { &esre_last_attempt_status.attr, NULL }; +ATTRIBUTE_GROUPS(esre1); + static void esre_release(struct kobject *kobj) { struct esre_entry *entry = to_entry(kobj); @@ -157,7 +159,7 @@ static void esre_release(struct kobject *kobj) static struct kobj_type esre1_ktype = { .release = esre_release, .sysfs_ops = &esre_attr_ops, - .default_attrs = esre1_attrs, + .default_groups = esre1_groups, }; diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index d0537573501e..ef5045a53ce0 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -26,8 +26,10 @@ cflags-$(CONFIG_ARM) := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) \ $(call cc-option,-mno-single-pic-base) cflags-$(CONFIG_RISCV) := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) \ -fpic +cflags-$(CONFIG_LOONGARCH) := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) \ + -fpie -cflags-$(CONFIG_EFI_GENERIC_STUB) += -I$(srctree)/scripts/dtc/libfdt +cflags-$(CONFIG_EFI_PARAMS_FROM_FDT) += -I$(srctree)/scripts/dtc/libfdt KBUILD_CFLAGS := $(cflags-y) -Os -DDISABLE_BRANCH_PROFILING \ -include $(srctree)/include/linux/hidden.h \ @@ -37,8 +39,17 @@ KBUILD_CFLAGS := $(cflags-y) -Os -DDISABLE_BRANCH_PROFILING \ $(call cc-option,-fno-addrsig) \ -D__DISABLE_EXPORTS +# +# struct randomization only makes sense for Linux internal types, which the EFI +# stub code never touches, so let's turn off struct randomization for the stub +# altogether +# +KBUILD_CFLAGS := $(filter-out $(RANDSTRUCT_CFLAGS), $(KBUILD_CFLAGS)) + # remove SCS flags from all objects in this directory KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_SCS), $(KBUILD_CFLAGS)) +# disable CFI +KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_CFI), $(KBUILD_CFLAGS)) # disable LTO KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_LTO), $(KBUILD_CFLAGS)) @@ -46,6 +57,7 @@ GCOV_PROFILE := n # Sanitizer runtimes are unavailable and cannot be linked here. KASAN_SANITIZE := n KCSAN_SANITIZE := n +KMSAN_SANITIZE := n UBSAN_SANITIZE := n OBJECT_FILES_NON_STANDARD := y @@ -57,21 +69,32 @@ lib-y := efi-stub-helper.o gop.o secureboot.o tpm.o \ skip_spaces.o lib-cmdline.o lib-ctype.o \ alignedmem.o relocate.o vsprintf.o -# include the stub's generic dependencies from lib/ when building for ARM/arm64 -efi-deps-y := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c +# include the stub's libfdt dependencies from lib/ when needed +libfdt-deps := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c \ + fdt_empty_tree.c fdt_sw.c + +lib-$(CONFIG_EFI_PARAMS_FROM_FDT) += fdt.o \ + $(patsubst %.c,lib-%.o,$(libfdt-deps)) $(obj)/lib-%.o: $(srctree)/lib/%.c FORCE $(call if_changed_rule,cc_o_c) -lib-$(CONFIG_EFI_GENERIC_STUB) += efi-stub.o fdt.o string.o \ - $(patsubst %.c,lib-%.o,$(efi-deps-y)) +lib-$(CONFIG_EFI_GENERIC_STUB) += efi-stub.o string.o intrinsics.o systable.o lib-$(CONFIG_ARM) += arm32-stub.o -lib-$(CONFIG_ARM64) += arm64-stub.o +lib-$(CONFIG_ARM64) += arm64-stub.o smbios.o lib-$(CONFIG_X86) += x86-stub.o lib-$(CONFIG_RISCV) += riscv-stub.o +lib-$(CONFIG_LOONGARCH) += loongarch-stub.o + CFLAGS_arm32-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET) +zboot-obj-$(CONFIG_RISCV) := lib-clz_ctz.o lib-ashldi3.o +lib-$(CONFIG_EFI_ZBOOT) += zboot.o $(zboot-obj-y) + +extra-y := $(lib-y) +lib-y := $(patsubst %.o,%.stub.o,$(lib-y)) + # Even when -mbranch-protection=none is set, Clang will generate a # .note.gnu.property for code-less object files (like lib/ctype.c), # so work around this by explicitly removing the unwanted section. @@ -111,9 +134,6 @@ STUBCOPY_RELOC-$(CONFIG_ARM) := R_ARM_ABS # a verification pass to see if any absolute relocations exist in any of the # object files. # -extra-y := $(lib-y) -lib-y := $(patsubst %.o,%.stub.o,$(lib-y)) - STUBCOPY_FLAGS-$(CONFIG_ARM64) += --prefix-alloc-sections=.init \ --prefix-symbols=__efistub_ STUBCOPY_RELOC-$(CONFIG_ARM64) := R_AARCH64_ABS @@ -125,6 +145,12 @@ STUBCOPY_FLAGS-$(CONFIG_RISCV) += --prefix-alloc-sections=.init \ --prefix-symbols=__efistub_ STUBCOPY_RELOC-$(CONFIG_RISCV) := R_RISCV_HI20 +# For LoongArch, keep all the symbols in .init section and make sure that no +# absolute symbols references exist. +STUBCOPY_FLAGS-$(CONFIG_LOONGARCH) += --prefix-alloc-sections=.init \ + --prefix-symbols=__efistub_ +STUBCOPY_RELOC-$(CONFIG_LOONGARCH) := R_LARCH_MARK_LA + $(obj)/%.stub.o: $(obj)/%.o FORCE $(call if_changed,stubcopy) diff --git a/drivers/firmware/efi/libstub/Makefile.zboot b/drivers/firmware/efi/libstub/Makefile.zboot new file mode 100644 index 000000000000..3340b385a05b --- /dev/null +++ b/drivers/firmware/efi/libstub/Makefile.zboot @@ -0,0 +1,49 @@ +# SPDX-License-Identifier: GPL-2.0 + +# to be include'd by arch/$(ARCH)/boot/Makefile after setting +# EFI_ZBOOT_PAYLOAD, EFI_ZBOOT_BFD_TARGET and EFI_ZBOOT_MACH_TYPE + +comp-type-$(CONFIG_KERNEL_GZIP) := gzip +comp-type-$(CONFIG_KERNEL_LZ4) := lz4 +comp-type-$(CONFIG_KERNEL_LZMA) := lzma +comp-type-$(CONFIG_KERNEL_LZO) := lzo +comp-type-$(CONFIG_KERNEL_XZ) := xzkern +comp-type-$(CONFIG_KERNEL_ZSTD) := zstd22 + +# in GZIP, the appended le32 carrying the uncompressed size is part of the +# format, but in other cases, we just append it at the end for convenience, +# causing the original tools to complain when checking image integrity. +# So disregard it when calculating the payload size in the zimage header. +zboot-method-y := $(comp-type-y)_with_size +zboot-size-len-y := 4 + +zboot-method-$(CONFIG_KERNEL_GZIP) := gzip +zboot-size-len-$(CONFIG_KERNEL_GZIP) := 0 + +$(obj)/vmlinuz: $(obj)/$(EFI_ZBOOT_PAYLOAD) FORCE + $(call if_changed,$(zboot-method-y)) + +OBJCOPYFLAGS_vmlinuz.o := -I binary -O $(EFI_ZBOOT_BFD_TARGET) \ + --rename-section .data=.gzdata,load,alloc,readonly,contents +$(obj)/vmlinuz.o: $(obj)/vmlinuz FORCE + $(call if_changed,objcopy) + +AFLAGS_zboot-header.o += -DMACHINE_TYPE=IMAGE_FILE_MACHINE_$(EFI_ZBOOT_MACH_TYPE) \ + -DZBOOT_EFI_PATH="\"$(realpath $(obj)/vmlinuz.efi.elf)\"" \ + -DZBOOT_SIZE_LEN=$(zboot-size-len-y) \ + -DCOMP_TYPE="\"$(comp-type-y)\"" + +$(obj)/zboot-header.o: $(srctree)/drivers/firmware/efi/libstub/zboot-header.S FORCE + $(call if_changed_rule,as_o_S) + +ZBOOT_DEPS := $(obj)/zboot-header.o $(objtree)/drivers/firmware/efi/libstub/lib.a + +LDFLAGS_vmlinuz.efi.elf := -T $(srctree)/drivers/firmware/efi/libstub/zboot.lds +$(obj)/vmlinuz.efi.elf: $(obj)/vmlinuz.o $(ZBOOT_DEPS) FORCE + $(call if_changed,ld) + +OBJCOPYFLAGS_vmlinuz.efi := -O binary +$(obj)/vmlinuz.efi: $(obj)/vmlinuz.efi.elf FORCE + $(call if_changed,objcopy) + +targets += zboot-header.o vmlinuz vmlinuz.o vmlinuz.efi.elf vmlinuz.efi diff --git a/drivers/firmware/efi/libstub/arm32-stub.c b/drivers/firmware/efi/libstub/arm32-stub.c index 4b5b2403b3a0..0131e3aaa605 100644 --- a/drivers/firmware/efi/libstub/arm32-stub.c +++ b/drivers/firmware/efi/libstub/arm32-stub.c @@ -117,7 +117,8 @@ efi_status_t handle_kernel_image(unsigned long *image_addr, unsigned long *image_size, unsigned long *reserve_addr, unsigned long *reserve_size, - efi_loaded_image_t *image) + efi_loaded_image_t *image, + efi_handle_t image_handle) { const int slack = TEXT_OFFSET - 5 * PAGE_SIZE; int alloc_size = MAX_UNCOMP_KERNEL_SIZE + EFI_PHYS_ALIGN; diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c index 2363fee9211c..f9de5217ea65 100644 --- a/drivers/firmware/efi/libstub/arm64-stub.c +++ b/drivers/firmware/efi/libstub/arm64-stub.c @@ -15,16 +15,39 @@ #include "efistub.h" +static bool system_needs_vamap(void) +{ + const u8 *type1_family = efi_get_smbios_string(1, family); + + /* + * Ampere Altra machines crash in SetTime() if SetVirtualAddressMap() + * has not been called prior. + */ + if (!type1_family || strcmp(type1_family, "Altra")) + return false; + + efi_warn("Working around broken SetVirtualAddressMap()\n"); + return true; +} + efi_status_t check_platform_features(void) { u64 tg; + /* + * If we have 48 bits of VA space for TTBR0 mappings, we can map the + * UEFI runtime regions 1:1 and so calling SetVirtualAddressMap() is + * unnecessary. + */ + if (VA_BITS_MIN >= 48 && !system_needs_vamap()) + efi_novamap = true; + /* UEFI mandates support for 4 KB granularity, no need to check */ if (IS_ENABLED(CONFIG_ARM64_4K_PAGES)) return EFI_SUCCESS; - tg = (read_cpuid(ID_AA64MMFR0_EL1) >> ID_AA64MMFR0_TGRAN_SHIFT) & 0xf; - if (tg < ID_AA64MMFR0_TGRAN_SUPPORTED_MIN || tg > ID_AA64MMFR0_TGRAN_SUPPORTED_MAX) { + tg = (read_cpuid(ID_AA64MMFR0_EL1) >> ID_AA64MMFR0_EL1_TGRAN_SHIFT) & 0xf; + if (tg < ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN || tg > ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX) { if (IS_ENABLED(CONFIG_ARM64_64K_PAGES)) efi_err("This 64 KB granular kernel is not supported by your CPU\n"); else @@ -42,26 +65,17 @@ efi_status_t check_platform_features(void) */ static bool check_image_region(u64 base, u64 size) { - unsigned long map_size, desc_size, buff_size; - efi_memory_desc_t *memory_map; - struct efi_boot_memmap map; + struct efi_boot_memmap *map; efi_status_t status; bool ret = false; int map_offset; - map.map = &memory_map; - map.map_size = &map_size; - map.desc_size = &desc_size; - map.desc_ver = NULL; - map.key_ptr = NULL; - map.buff_size = &buff_size; - - status = efi_get_memory_map(&map); + status = efi_get_memory_map(&map, false); if (status != EFI_SUCCESS) return false; - for (map_offset = 0; map_offset < map_size; map_offset += desc_size) { - efi_memory_desc_t *md = (void *)memory_map + map_offset; + for (map_offset = 0; map_offset < map->map_size; map_offset += map->desc_size) { + efi_memory_desc_t *md = (void *)map->map + map_offset; u64 end = md->phys_addr + md->num_pages * EFI_PAGE_SIZE; /* @@ -74,7 +88,7 @@ static bool check_image_region(u64 base, u64 size) } } - efi_bs_call(free_pool, memory_map); + efi_bs_call(free_pool, map); return ret; } @@ -83,7 +97,8 @@ efi_status_t handle_kernel_image(unsigned long *image_addr, unsigned long *image_size, unsigned long *reserve_addr, unsigned long *reserve_size, - efi_loaded_image_t *image) + efi_loaded_image_t *image, + efi_handle_t image_handle) { efi_status_t status; unsigned long kernel_size, kernel_memsize = 0; @@ -100,7 +115,15 @@ efi_status_t handle_kernel_image(unsigned long *image_addr, u64 min_kimg_align = efi_nokaslr ? MIN_KIMG_ALIGN : EFI_KIMG_ALIGN; if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { - if (!efi_nokaslr) { + efi_guid_t li_fixed_proto = LINUX_EFI_LOADED_IMAGE_FIXED_GUID; + void *p; + + if (efi_nokaslr) { + efi_info("KASLR disabled on kernel command line\n"); + } else if (efi_bs_call(handle_protocol, image_handle, + &li_fixed_proto, &p) == EFI_SUCCESS) { + efi_info("Image placement fixed by loader\n"); + } else { status = efi_get_random_bytes(sizeof(phys_seed), (u8 *)&phys_seed); if (status == EFI_NOT_FOUND) { @@ -111,17 +134,15 @@ efi_status_t handle_kernel_image(unsigned long *image_addr, status); efi_nokaslr = true; } - } else { - efi_info("KASLR disabled on kernel command line\n"); } } if (image->image_base != _text) efi_err("FIRMWARE BUG: efi_loaded_image_t::image_base has bogus value\n"); - if (!IS_ALIGNED((u64)_text, EFI_KIMG_ALIGN)) - efi_err("FIRMWARE BUG: kernel image not aligned on %ldk boundary\n", - EFI_KIMG_ALIGN >> 10); + if (!IS_ALIGNED((u64)_text, SEGMENT_ALIGN)) + efi_err("FIRMWARE BUG: kernel image not aligned on %dk boundary\n", + SEGMENT_ALIGN >> 10); kernel_size = _edata - _text; kernel_memsize = kernel_size + (_end - _edata); diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c index d489bdc645fe..0c493521b25b 100644 --- a/drivers/firmware/efi/libstub/efi-stub-helper.c +++ b/drivers/firmware/efi/libstub/efi-stub-helper.c @@ -20,10 +20,10 @@ bool efi_nochunk; bool efi_nokaslr = !IS_ENABLED(CONFIG_RANDOMIZE_BASE); -bool efi_noinitrd; int efi_loglevel = CONSOLE_LOGLEVEL_DEFAULT; bool efi_novamap; +static bool efi_noinitrd; static bool efi_nosoftreserve; static bool efi_disable_pci_dma = IS_ENABLED(CONFIG_EFI_DISABLE_PCI_DMA); @@ -218,7 +218,7 @@ efi_status_t efi_parse_options(char const *cmdline) efi_noinitrd = true; } else if (!strcmp(param, "efi") && val) { efi_nochunk = parse_option_str(val, "nochunk"); - efi_novamap = parse_option_str(val, "novamap"); + efi_novamap |= parse_option_str(val, "novamap"); efi_nosoftreserve = IS_ENABLED(CONFIG_EFI_SOFT_RESERVE) && parse_option_str(val, "nosoftreserve"); @@ -310,7 +310,7 @@ bool efi_load_option_unpack(efi_load_option_unpacked_t *dest, * * Detect this case and extract OptionalData. */ -void efi_apply_loadoptions_quirk(const void **load_options, int *load_options_size) +void efi_apply_loadoptions_quirk(const void **load_options, u32 *load_options_size) { const efi_load_option_t *load_option = *load_options; efi_load_option_unpacked_t load_option_unpacked; @@ -334,6 +334,85 @@ void efi_apply_loadoptions_quirk(const void **load_options, int *load_options_si *load_options_size = load_option_unpacked.optional_data_size; } +enum efistub_event { + EFISTUB_EVT_INITRD, + EFISTUB_EVT_LOAD_OPTIONS, + EFISTUB_EVT_COUNT, +}; + +#define STR_WITH_SIZE(s) sizeof(s), s + +static const struct { + u32 pcr_index; + u32 event_id; + u32 event_data_len; + u8 event_data[52]; +} events[] = { + [EFISTUB_EVT_INITRD] = { + 9, + INITRD_EVENT_TAG_ID, + STR_WITH_SIZE("Linux initrd") + }, + [EFISTUB_EVT_LOAD_OPTIONS] = { + 9, + LOAD_OPTIONS_EVENT_TAG_ID, + STR_WITH_SIZE("LOADED_IMAGE::LoadOptions") + }, +}; + +static efi_status_t efi_measure_tagged_event(unsigned long load_addr, + unsigned long load_size, + enum efistub_event event) +{ + efi_guid_t tcg2_guid = EFI_TCG2_PROTOCOL_GUID; + efi_tcg2_protocol_t *tcg2 = NULL; + efi_status_t status; + + efi_bs_call(locate_protocol, &tcg2_guid, NULL, (void **)&tcg2); + if (tcg2) { + struct efi_measured_event { + efi_tcg2_event_t event_data; + efi_tcg2_tagged_event_t tagged_event; + u8 tagged_event_data[]; + } *evt; + int size = sizeof(*evt) + events[event].event_data_len; + + status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size, + (void **)&evt); + if (status != EFI_SUCCESS) + goto fail; + + evt->event_data = (struct efi_tcg2_event){ + .event_size = size, + .event_header.header_size = sizeof(evt->event_data.event_header), + .event_header.header_version = EFI_TCG2_EVENT_HEADER_VERSION, + .event_header.pcr_index = events[event].pcr_index, + .event_header.event_type = EV_EVENT_TAG, + }; + + evt->tagged_event = (struct efi_tcg2_tagged_event){ + .tagged_event_id = events[event].event_id, + .tagged_event_data_size = events[event].event_data_len, + }; + + memcpy(evt->tagged_event_data, events[event].event_data, + events[event].event_data_len); + + status = efi_call_proto(tcg2, hash_log_extend_event, 0, + load_addr, load_size, &evt->event_data); + efi_bs_call(free_pool, evt); + + if (status != EFI_SUCCESS) + goto fail; + return EFI_SUCCESS; + } + + return EFI_UNSUPPORTED; +fail: + efi_warn("Failed to measure data for event %d: 0x%lx\n", event, status); + return status; +} + /* * Convert the unicode UEFI command line to ASCII to pass to kernel. * Size of memory allocated return in *cmd_line_len. @@ -341,21 +420,26 @@ void efi_apply_loadoptions_quirk(const void **load_options, int *load_options_si */ char *efi_convert_cmdline(efi_loaded_image_t *image, int *cmd_line_len) { - const u16 *s2; - unsigned long cmdline_addr = 0; - int options_chars = efi_table_attr(image, load_options_size); - const u16 *options = efi_table_attr(image, load_options); + const efi_char16_t *options = efi_table_attr(image, load_options); + u32 options_size = efi_table_attr(image, load_options_size); int options_bytes = 0, safe_options_bytes = 0; /* UTF-8 bytes */ + unsigned long cmdline_addr = 0; + const efi_char16_t *s2; bool in_quote = false; efi_status_t status; + u32 options_chars; - efi_apply_loadoptions_quirk((const void **)&options, &options_chars); - options_chars /= sizeof(*options); + if (options_size > 0) + efi_measure_tagged_event((unsigned long)options, options_size, + EFISTUB_EVT_LOAD_OPTIONS); + + efi_apply_loadoptions_quirk((const void **)&options, &options_size); + options_chars = options_size / sizeof(efi_char16_t); if (options) { s2 = options; while (options_bytes < COMMAND_LINE_SIZE && options_chars--) { - u16 c = *s2++; + efi_char16_t c = *s2++; if (c < 0x80) { if (c == L'\0' || c == L'\n') @@ -419,7 +503,6 @@ char *efi_convert_cmdline(efi_loaded_image_t *image, int *cmd_line_len) /** * efi_exit_boot_services() - Exit boot services * @handle: handle of the exiting image - * @map: pointer to receive the memory map * @priv: argument to be passed to @priv_func * @priv_func: function to process the memory map before exiting boot services * @@ -432,26 +515,26 @@ char *efi_convert_cmdline(efi_loaded_image_t *image, int *cmd_line_len) * * Return: status code */ -efi_status_t efi_exit_boot_services(void *handle, - struct efi_boot_memmap *map, - void *priv, +efi_status_t efi_exit_boot_services(void *handle, void *priv, efi_exit_boot_map_processing priv_func) { + struct efi_boot_memmap *map; efi_status_t status; - status = efi_get_memory_map(map); - + status = efi_get_memory_map(&map, true); if (status != EFI_SUCCESS) - goto fail; + return status; status = priv_func(map, priv); - if (status != EFI_SUCCESS) - goto free_map; + if (status != EFI_SUCCESS) { + efi_bs_call(free_pool, map); + return status; + } if (efi_disable_pci_dma) efi_pci_disable_bridge_busmaster(); - status = efi_bs_call(exit_boot_services, handle, *map->key_ptr); + status = efi_bs_call(exit_boot_services, handle, map->map_key); if (status == EFI_INVALID_PARAMETER) { /* @@ -467,35 +550,26 @@ efi_status_t efi_exit_boot_services(void *handle, * buffer should account for any changes in the map so the call * to get_memory_map() is expected to succeed here. */ - *map->map_size = *map->buff_size; + map->map_size = map->buff_size; status = efi_bs_call(get_memory_map, - map->map_size, - *map->map, - map->key_ptr, - map->desc_size, - map->desc_ver); + &map->map_size, + &map->map, + &map->map_key, + &map->desc_size, + &map->desc_ver); /* exit_boot_services() was called, thus cannot free */ if (status != EFI_SUCCESS) - goto fail; + return status; status = priv_func(map, priv); /* exit_boot_services() was called, thus cannot free */ if (status != EFI_SUCCESS) - goto fail; + return status; - status = efi_bs_call(exit_boot_services, handle, *map->key_ptr); + status = efi_bs_call(exit_boot_services, handle, map->map_key); } - /* exit_boot_services() was called, thus cannot free */ - if (status != EFI_SUCCESS) - goto fail; - - return EFI_SUCCESS; - -free_map: - efi_bs_call(free_pool, *map->map); -fail: return status; } @@ -560,20 +634,16 @@ static const struct { * * %EFI_SUCCESS if the initrd was loaded successfully, in which * case @load_addr and @load_size are assigned accordingly * * %EFI_NOT_FOUND if no LoadFile2 protocol exists on the initrd device path - * * %EFI_INVALID_PARAMETER if load_addr == NULL or load_size == NULL * * %EFI_OUT_OF_RESOURCES if memory allocation failed * * %EFI_LOAD_ERROR in all other cases */ static -efi_status_t efi_load_initrd_dev_path(unsigned long *load_addr, - unsigned long *load_size, +efi_status_t efi_load_initrd_dev_path(struct linux_efi_initrd *initrd, unsigned long max) { efi_guid_t lf2_proto_guid = EFI_LOAD_FILE2_PROTOCOL_GUID; efi_device_path_protocol_t *dp; efi_load_file2_protocol_t *lf2; - unsigned long initrd_addr; - unsigned long initrd_size; efi_handle_t handle; efi_status_t status; @@ -587,75 +657,98 @@ efi_status_t efi_load_initrd_dev_path(unsigned long *load_addr, if (status != EFI_SUCCESS) return status; - status = efi_call_proto(lf2, load_file, dp, false, &initrd_size, NULL); + initrd->size = 0; + status = efi_call_proto(lf2, load_file, dp, false, &initrd->size, NULL); if (status != EFI_BUFFER_TOO_SMALL) return EFI_LOAD_ERROR; - status = efi_allocate_pages(initrd_size, &initrd_addr, max); + status = efi_allocate_pages(initrd->size, &initrd->base, max); if (status != EFI_SUCCESS) return status; - status = efi_call_proto(lf2, load_file, dp, false, &initrd_size, - (void *)initrd_addr); + status = efi_call_proto(lf2, load_file, dp, false, &initrd->size, + (void *)initrd->base); if (status != EFI_SUCCESS) { - efi_free(initrd_size, initrd_addr); + efi_free(initrd->size, initrd->base); return EFI_LOAD_ERROR; } - - *load_addr = initrd_addr; - *load_size = initrd_size; return EFI_SUCCESS; } static efi_status_t efi_load_initrd_cmdline(efi_loaded_image_t *image, - unsigned long *load_addr, - unsigned long *load_size, + struct linux_efi_initrd *initrd, unsigned long soft_limit, unsigned long hard_limit) { if (!IS_ENABLED(CONFIG_EFI_GENERIC_STUB_INITRD_CMDLINE_LOADER) || - (IS_ENABLED(CONFIG_X86) && (!efi_is_native() || image == NULL))) { - *load_addr = *load_size = 0; - return EFI_SUCCESS; - } + (IS_ENABLED(CONFIG_X86) && (!efi_is_native() || image == NULL))) + return EFI_UNSUPPORTED; return handle_cmdline_files(image, L"initrd=", sizeof(L"initrd=") - 2, soft_limit, hard_limit, - load_addr, load_size); + &initrd->base, &initrd->size); } /** * efi_load_initrd() - Load initial RAM disk * @image: EFI loaded image protocol - * @load_addr: pointer to loaded initrd - * @load_size: size of loaded initrd * @soft_limit: preferred address for loading the initrd * @hard_limit: upper limit address for loading the initrd * * Return: status code */ efi_status_t efi_load_initrd(efi_loaded_image_t *image, - unsigned long *load_addr, - unsigned long *load_size, unsigned long soft_limit, - unsigned long hard_limit) + unsigned long hard_limit, + const struct linux_efi_initrd **out) { - efi_status_t status; + efi_guid_t tbl_guid = LINUX_EFI_INITRD_MEDIA_GUID; + efi_status_t status = EFI_SUCCESS; + struct linux_efi_initrd initrd, *tbl; - if (!load_addr || !load_size) - return EFI_INVALID_PARAMETER; + if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD) || efi_noinitrd) + return EFI_SUCCESS; - status = efi_load_initrd_dev_path(load_addr, load_size, hard_limit); + status = efi_load_initrd_dev_path(&initrd, hard_limit); if (status == EFI_SUCCESS) { efi_info("Loaded initrd from LINUX_EFI_INITRD_MEDIA_GUID device path\n"); + if (initrd.size > 0 && + efi_measure_tagged_event(initrd.base, initrd.size, + EFISTUB_EVT_INITRD) == EFI_SUCCESS) + efi_info("Measured initrd data into PCR 9\n"); } else if (status == EFI_NOT_FOUND) { - status = efi_load_initrd_cmdline(image, load_addr, load_size, - soft_limit, hard_limit); - if (status == EFI_SUCCESS && *load_size > 0) + status = efi_load_initrd_cmdline(image, &initrd, soft_limit, + hard_limit); + /* command line loader disabled or no initrd= passed? */ + if (status == EFI_UNSUPPORTED || status == EFI_NOT_READY) + return EFI_SUCCESS; + if (status == EFI_SUCCESS) efi_info("Loaded initrd from command line option\n"); } + if (status != EFI_SUCCESS) + goto failed; + + status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, sizeof(initrd), + (void **)&tbl); + if (status != EFI_SUCCESS) + goto free_initrd; + + *tbl = initrd; + status = efi_bs_call(install_configuration_table, &tbl_guid, tbl); + if (status != EFI_SUCCESS) + goto free_tbl; + + if (out) + *out = tbl; + return EFI_SUCCESS; +free_tbl: + efi_bs_call(free_pool, tbl); +free_initrd: + efi_free(initrd.size, initrd.base); +failed: + efi_err("Failed to load initrd: 0x%lx\n", status); return status; } diff --git a/drivers/firmware/efi/libstub/efi-stub.c b/drivers/firmware/efi/libstub/efi-stub.c index 26e69788f27a..cf474f0dd261 100644 --- a/drivers/firmware/efi/libstub/efi-stub.c +++ b/drivers/firmware/efi/libstub/efi-stub.c @@ -10,7 +10,6 @@ */ #include <linux/efi.h> -#include <linux/libfdt.h> #include <asm/efi.h> #include "efistub.h" @@ -40,14 +39,22 @@ #ifdef CONFIG_ARM64 # define EFI_RT_VIRTUAL_LIMIT DEFAULT_MAP_WINDOW_64 -#else +#elif defined(CONFIG_RISCV) || defined(CONFIG_LOONGARCH) +# define EFI_RT_VIRTUAL_LIMIT TASK_SIZE_MIN +#else /* Only if TASK_SIZE is a constant */ # define EFI_RT_VIRTUAL_LIMIT TASK_SIZE #endif -static u64 virtmap_base = EFI_RT_VIRTUAL_BASE; -static bool flat_va_mapping; +/* + * Some architectures map the EFI regions into the kernel's linear map using a + * fixed offset. + */ +#ifndef EFI_RT_VIRTUAL_OFFSET +#define EFI_RT_VIRTUAL_OFFSET 0 +#endif -const efi_system_table_t *efi_system_table; +static u64 virtmap_base = EFI_RT_VIRTUAL_BASE; +static bool flat_va_mapping = (EFI_RT_VIRTUAL_OFFSET != 0); static struct screen_info *setup_graphics(void) { @@ -122,19 +129,13 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle, unsigned long image_addr; unsigned long image_size = 0; /* addr/point and size pairs for memory management*/ - unsigned long initrd_addr = 0; - unsigned long initrd_size = 0; - unsigned long fdt_addr = 0; /* Original DTB */ - unsigned long fdt_size = 0; char *cmdline_ptr = NULL; int cmdline_size = 0; efi_guid_t loaded_image_proto = LOADED_IMAGE_PROTOCOL_GUID; unsigned long reserve_addr = 0; unsigned long reserve_size = 0; - enum efi_secureboot_mode secure_boot; struct screen_info *si; efi_properties_table_t *prop_tbl; - unsigned long max_addr; efi_system_table = sys_table_arg; @@ -153,8 +154,8 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle, * information about the running image, such as size and the command * line. */ - status = efi_system_table->boottime->handle_protocol(handle, - &loaded_image_proto, (void *)&image); + status = efi_bs_call(handle_protocol, handle, &loaded_image_proto, + (void *)&image); if (status != EFI_SUCCESS) { efi_err("Failed to get loaded image protocol\n"); goto fail; @@ -197,7 +198,7 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle, status = handle_kernel_image(&image_addr, &image_size, &reserve_addr, &reserve_size, - image); + image, handle); if (status != EFI_SUCCESS) { efi_err("Failed to relocate kernel\n"); goto fail_free_screeninfo; @@ -208,45 +209,8 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle, /* Ask the firmware to clear memory on unclean shutdown */ efi_enable_reset_attack_mitigation(); - secure_boot = efi_get_secureboot(); - - /* - * Unauthenticated device tree data is a security hazard, so ignore - * 'dtb=' unless UEFI Secure Boot is disabled. We assume that secure - * boot is enabled if we can't determine its state. - */ - if (!IS_ENABLED(CONFIG_EFI_ARMSTUB_DTB_LOADER) || - secure_boot != efi_secureboot_mode_disabled) { - if (strstr(cmdline_ptr, "dtb=")) - efi_err("Ignoring DTB from command line.\n"); - } else { - status = efi_load_dtb(image, &fdt_addr, &fdt_size); - - if (status != EFI_SUCCESS) { - efi_err("Failed to load device tree!\n"); - goto fail_free_image; - } - } - - if (fdt_addr) { - efi_info("Using DTB from command line\n"); - } else { - /* Look for a device tree configuration table entry. */ - fdt_addr = (uintptr_t)get_fdt(&fdt_size); - if (fdt_addr) - efi_info("Using DTB from configuration table\n"); - } - - if (!fdt_addr) - efi_info("Generating empty DTB\n"); - - if (!efi_noinitrd) { - max_addr = efi_get_max_initrd_addr(image_addr); - status = efi_load_initrd(image, &initrd_addr, &initrd_size, - ULONG_MAX, max_addr); - if (status != EFI_SUCCESS) - efi_err("Failed to load initrd!\n"); - } + efi_load_initrd(image, ULONG_MAX, efi_get_max_initrd_addr(image_addr), + NULL); efi_random_get_seed(); @@ -258,8 +222,8 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle, * The easiest way to achieve that is to simply use a 1:1 mapping. */ prop_tbl = get_efi_config_table(EFI_PROPERTIES_TABLE_GUID); - flat_va_mapping = prop_tbl && - (prop_tbl->memory_protection_attribute & + flat_va_mapping |= prop_tbl && + (prop_tbl->memory_protection_attribute & EFI_PROPERTIES_RUNTIME_MEMORY_PROTECTION_NON_EXECUTABLE_PE_DATA); /* force efi_novamap if SetVirtualAddressMap() is unsupported */ @@ -288,25 +252,8 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle, install_memreserve_table(); - status = allocate_new_fdt_and_exit_boot(handle, &fdt_addr, - initrd_addr, initrd_size, - cmdline_ptr, fdt_addr, fdt_size); - if (status != EFI_SUCCESS) - goto fail_free_initrd; + status = efi_boot_kernel(handle, image, image_addr, cmdline_ptr); - if (IS_ENABLED(CONFIG_ARM)) - efi_handle_post_ebs_state(); - - efi_enter_kernel(image_addr, fdt_addr, fdt_totalsize((void *)fdt_addr)); - /* not reached */ - -fail_free_initrd: - efi_err("Failed to update FDT and exit boot services\n"); - - efi_free(initrd_size, initrd_addr); - efi_free(fdt_size, fdt_addr); - -fail_free_image: efi_free(image_size, image_addr); efi_free(reserve_size, reserve_addr); fail_free_screeninfo: @@ -318,6 +265,35 @@ fail: } /* + * efi_allocate_virtmap() - create a pool allocation for the virtmap + * + * Create an allocation that is of sufficient size to hold all the memory + * descriptors that will be passed to SetVirtualAddressMap() to inform the + * firmware about the virtual mapping that will be used under the OS to call + * into the firmware. + */ +efi_status_t efi_alloc_virtmap(efi_memory_desc_t **virtmap, + unsigned long *desc_size, u32 *desc_ver) +{ + unsigned long size, mmap_key; + efi_status_t status; + + /* + * Use the size of the current memory map as an upper bound for the + * size of the buffer we need to pass to SetVirtualAddressMap() to + * cover all EFI_MEMORY_RUNTIME regions. + */ + size = 0; + status = efi_bs_call(get_memory_map, &size, NULL, &mmap_key, desc_size, + desc_ver); + if (status != EFI_BUFFER_TOO_SMALL) + return EFI_LOAD_ERROR; + + return efi_bs_call(allocate_pool, EFI_LOADER_DATA, size, + (void **)virtmap); +} + +/* * efi_get_virtmap() - create a virtual mapping for the EFI memory map * * This function populates the virt_addr fields of all memory region descriptors @@ -332,6 +308,8 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size, efi_memory_desc_t *in, *out = runtime_map; int l; + *count = 0; + for (l = 0; l < map_size; l += desc_size) { u64 paddr, size; @@ -342,7 +320,7 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size, paddr = in->phys_addr; size = in->num_pages * EFI_PAGE_SIZE; - in->virt_addr = in->phys_addr; + in->virt_addr = in->phys_addr + EFI_RT_VIRTUAL_OFFSET; if (efi_novamap) { continue; } diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h index cde0a2ef507d..eb03d5a9aac8 100644 --- a/drivers/firmware/efi/libstub/efistub.h +++ b/drivers/firmware/efi/libstub/efistub.h @@ -31,12 +31,14 @@ extern bool efi_nochunk; extern bool efi_nokaslr; -extern bool efi_noinitrd; extern int efi_loglevel; extern bool efi_novamap; extern const efi_system_table_t *efi_system_table; +typedef union efi_dxe_services_table efi_dxe_services_table_t; +extern const efi_dxe_services_table_t *efi_dxe_table; + efi_status_t __efiapi efi_pe_entry(efi_handle_t handle, efi_system_table_t *sys_table_arg); @@ -45,6 +47,7 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle, #define efi_is_native() (true) #define efi_bs_call(func, ...) efi_system_table->boottime->func(__VA_ARGS__) #define efi_rt_call(func, ...) efi_system_table->runtime->func(__VA_ARGS__) +#define efi_dxe_call(func, ...) efi_dxe_table->func(__VA_ARGS__) #define efi_table_attr(inst, attr) (inst->attr) #define efi_call_proto(inst, func, ...) inst->func(inst, ##__VA_ARGS__) @@ -157,16 +160,24 @@ void efi_set_u64_split(u64 data, u32 *lo, u32 *hi) */ #define EFI_MMAP_NR_SLACK_SLOTS 8 -struct efi_boot_memmap { - efi_memory_desc_t **map; - unsigned long *map_size; - unsigned long *desc_size; - u32 *desc_ver; - unsigned long *key_ptr; - unsigned long *buff_size; +typedef struct efi_generic_dev_path efi_device_path_protocol_t; + +union efi_device_path_to_text_protocol { + struct { + efi_char16_t *(__efiapi *convert_device_node_to_text)( + const efi_device_path_protocol_t *, + bool, bool); + efi_char16_t *(__efiapi *convert_device_path_to_text)( + const efi_device_path_protocol_t *, + bool, bool); + }; + struct { + u32 convert_device_node_to_text; + u32 convert_device_path_to_text; + } mixed_mode; }; -typedef struct efi_generic_dev_path efi_device_path_protocol_t; +typedef union efi_device_path_to_text_protocol efi_device_path_to_text_protocol_t; typedef void *efi_event_t; /* Note that notifications won't work in mixed mode */ @@ -251,13 +262,17 @@ union efi_boot_services { efi_handle_t *); efi_status_t (__efiapi *install_configuration_table)(efi_guid_t *, void *); - void *load_image; - void *start_image; + efi_status_t (__efiapi *load_image)(bool, efi_handle_t, + efi_device_path_protocol_t *, + void *, unsigned long, + efi_handle_t *); + efi_status_t (__efiapi *start_image)(efi_handle_t, unsigned long *, + efi_char16_t **); efi_status_t __noreturn (__efiapi *exit)(efi_handle_t, efi_status_t, unsigned long, efi_char16_t *); - void *unload_image; + efi_status_t (__efiapi *unload_image)(efi_handle_t); efi_status_t (__efiapi *exit_boot_services)(efi_handle_t, unsigned long); void *get_next_monotonic_count; @@ -274,11 +289,11 @@ union efi_boot_services { void *locate_handle_buffer; efi_status_t (__efiapi *locate_protocol)(efi_guid_t *, void *, void **); - void *install_multiple_protocol_interfaces; - void *uninstall_multiple_protocol_interfaces; + efi_status_t (__efiapi *install_multiple_protocol_interfaces)(efi_handle_t *, ...); + efi_status_t (__efiapi *uninstall_multiple_protocol_interfaces)(efi_handle_t, ...); void *calculate_crc32; - void *copy_mem; - void *set_mem; + void (__efiapi *copy_mem)(void *, const void *, unsigned long); + void (__efiapi *set_mem)(void *, unsigned long, unsigned char); void *create_event_ex; }; struct { @@ -330,6 +345,76 @@ union efi_boot_services { } mixed_mode; }; +typedef enum { + EfiGcdMemoryTypeNonExistent, + EfiGcdMemoryTypeReserved, + EfiGcdMemoryTypeSystemMemory, + EfiGcdMemoryTypeMemoryMappedIo, + EfiGcdMemoryTypePersistent, + EfiGcdMemoryTypeMoreReliable, + EfiGcdMemoryTypeMaximum +} efi_gcd_memory_type_t; + +typedef struct { + efi_physical_addr_t base_address; + u64 length; + u64 capabilities; + u64 attributes; + efi_gcd_memory_type_t gcd_memory_type; + void *image_handle; + void *device_handle; +} efi_gcd_memory_space_desc_t; + +/* + * EFI DXE Services table + */ +union efi_dxe_services_table { + struct { + efi_table_hdr_t hdr; + void *add_memory_space; + void *allocate_memory_space; + void *free_memory_space; + void *remove_memory_space; + efi_status_t (__efiapi *get_memory_space_descriptor)(efi_physical_addr_t, + efi_gcd_memory_space_desc_t *); + efi_status_t (__efiapi *set_memory_space_attributes)(efi_physical_addr_t, + u64, u64); + void *get_memory_space_map; + void *add_io_space; + void *allocate_io_space; + void *free_io_space; + void *remove_io_space; + void *get_io_space_descriptor; + void *get_io_space_map; + void *dispatch; + void *schedule; + void *trust; + void *process_firmware_volume; + void *set_memory_space_capabilities; + }; + struct { + efi_table_hdr_t hdr; + u32 add_memory_space; + u32 allocate_memory_space; + u32 free_memory_space; + u32 remove_memory_space; + u32 get_memory_space_descriptor; + u32 set_memory_space_attributes; + u32 get_memory_space_map; + u32 add_io_space; + u32 allocate_io_space; + u32 free_io_space; + u32 remove_io_space; + u32 get_io_space_descriptor; + u32 get_io_space_map; + u32 dispatch; + u32 schedule; + u32 trust; + u32 process_firmware_volume; + u32 set_memory_space_capabilities; + } mixed_mode; +}; + typedef union efi_uga_draw_protocol efi_uga_draw_protocol_t; union efi_uga_draw_protocol { @@ -667,6 +752,30 @@ union apple_properties_protocol { typedef u32 efi_tcg2_event_log_format; +#define INITRD_EVENT_TAG_ID 0x8F3B22ECU +#define LOAD_OPTIONS_EVENT_TAG_ID 0x8F3B22EDU +#define EV_EVENT_TAG 0x00000006U +#define EFI_TCG2_EVENT_HEADER_VERSION 0x1 + +struct efi_tcg2_event { + u32 event_size; + struct { + u32 header_size; + u16 header_version; + u32 pcr_index; + u32 event_type; + } __packed event_header; + /* u8[] event follows here */ +} __packed; + +struct efi_tcg2_tagged_event { + u32 tagged_event_id; + u32 tagged_event_data_size; + /* u8 tagged event data follows here */ +} __packed; + +typedef struct efi_tcg2_event efi_tcg2_event_t; +typedef struct efi_tcg2_tagged_event efi_tcg2_tagged_event_t; typedef union efi_tcg2_protocol efi_tcg2_protocol_t; union efi_tcg2_protocol { @@ -677,7 +786,11 @@ union efi_tcg2_protocol { efi_physical_addr_t *, efi_physical_addr_t *, efi_bool_t *); - void *hash_log_extend_event; + efi_status_t (__efiapi *hash_log_extend_event)(efi_tcg2_protocol_t *, + u64, + efi_physical_addr_t, + u64, + const efi_tcg2_event_t *); void *submit_command; void *get_active_pcr_banks; void *set_active_pcr_banks; @@ -694,6 +807,13 @@ union efi_tcg2_protocol { } mixed_mode; }; +struct riscv_efi_boot_protocol { + u64 revision; + + efi_status_t (__efiapi *get_boot_hartid)(struct riscv_efi_boot_protocol *, + unsigned long *boot_hartid); +}; + typedef union efi_load_file_protocol efi_load_file_protocol_t; typedef union efi_load_file_protocol efi_load_file2_protocol_t; @@ -733,7 +853,7 @@ typedef struct { u16 file_path_list_length; const efi_char16_t *description; const efi_device_path_protocol_t *file_path_list; - size_t optional_data_size; + u32 optional_data_size; const void *optional_data; } efi_load_option_unpacked_t; @@ -743,20 +863,16 @@ typedef efi_status_t (*efi_exit_boot_map_processing)( struct efi_boot_memmap *map, void *priv); -efi_status_t efi_exit_boot_services(void *handle, - struct efi_boot_memmap *map, - void *priv, +efi_status_t efi_exit_boot_services(void *handle, void *priv, efi_exit_boot_map_processing priv_func); -efi_status_t allocate_new_fdt_and_exit_boot(void *handle, - unsigned long *new_fdt_addr, - u64 initrd_addr, u64 initrd_size, - char *cmdline_ptr, - unsigned long fdt_addr, - unsigned long fdt_size); +efi_status_t efi_boot_kernel(void *handle, efi_loaded_image_t *image, + unsigned long kernel_addr, char *cmdline_ptr); void *get_fdt(unsigned long *fdt_size); +efi_status_t efi_alloc_virtmap(efi_memory_desc_t **virtmap, + unsigned long *desc_size, u32 *desc_ver); void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size, unsigned long desc_size, efi_memory_desc_t *runtime_map, int *count); @@ -778,11 +894,12 @@ __printf(1, 2) int efi_printk(char const *fmt, ...); void efi_free(unsigned long size, unsigned long addr); -void efi_apply_loadoptions_quirk(const void **load_options, int *load_options_size); +void efi_apply_loadoptions_quirk(const void **load_options, u32 *load_options_size); char *efi_convert_cmdline(efi_loaded_image_t *image, int *cmd_line_len); -efi_status_t efi_get_memory_map(struct efi_boot_memmap *map); +efi_status_t efi_get_memory_map(struct efi_boot_memmap **map, + bool install_cfg_tbl); efi_status_t efi_allocate_pages(unsigned long size, unsigned long *addr, unsigned long max); @@ -825,10 +942,9 @@ static inline efi_status_t efi_load_dtb(efi_loaded_image_t *image, } efi_status_t efi_load_initrd(efi_loaded_image_t *image, - unsigned long *load_addr, - unsigned long *load_size, unsigned long soft_limit, - unsigned long hard_limit); + unsigned long hard_limit, + const struct linux_efi_initrd **out); /* * This function handles the architcture specific differences between arm and * arm64 regarding where the kernel image must be loaded and any memory that @@ -839,7 +955,8 @@ efi_status_t handle_kernel_image(unsigned long *image_addr, unsigned long *image_size, unsigned long *reserve_addr, unsigned long *reserve_size, - efi_loaded_image_t *image); + efi_loaded_image_t *image, + efi_handle_t image_handle); asmlinkage void __noreturn efi_enter_kernel(unsigned long entrypoint, unsigned long fdt_addr, @@ -858,4 +975,32 @@ efi_enable_reset_attack_mitigation(void) { } void efi_retrieve_tpm2_eventlog(void); +struct efi_smbios_record { + u8 type; + u8 length; + u16 handle; +}; + +struct efi_smbios_type1_record { + struct efi_smbios_record header; + + u8 manufacturer; + u8 product_name; + u8 version; + u8 serial_number; + efi_guid_t uuid; + u8 wakeup_type; + u8 sku_number; + u8 family; +}; + +#define efi_get_smbios_string(__type, __name) ({ \ + int size = sizeof(struct efi_smbios_type ## __type ## _record); \ + int off = offsetof(struct efi_smbios_type ## __type ## _record, \ + __name); \ + __efi_get_smbios_string(__type, off, size); \ +}) + +const u8 *__efi_get_smbios_string(u8 type, int offset, int recsize); + #endif diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c index fe567be0f118..70e9789ff9de 100644 --- a/drivers/firmware/efi/libstub/fdt.c +++ b/drivers/firmware/efi/libstub/fdt.c @@ -28,8 +28,7 @@ static void fdt_update_cell_size(void *fdt) } static efi_status_t update_fdt(void *orig_fdt, unsigned long orig_fdt_size, - void *fdt, int new_fdt_size, char *cmdline_ptr, - u64 initrd_addr, u64 initrd_size) + void *fdt, int new_fdt_size, char *cmdline_ptr) { int node, num_rsv; int status; @@ -93,21 +92,6 @@ static efi_status_t update_fdt(void *orig_fdt, unsigned long orig_fdt_size, goto fdt_set_fail; } - /* Set initrd address/end in device tree, if present */ - if (initrd_size != 0) { - u64 initrd_image_end; - u64 initrd_image_start = cpu_to_fdt64(initrd_addr); - - status = fdt_setprop_var(fdt, node, "linux,initrd-start", initrd_image_start); - if (status) - goto fdt_set_fail; - - initrd_image_end = cpu_to_fdt64(initrd_addr + initrd_size); - status = fdt_setprop_var(fdt, node, "linux,initrd-end", initrd_image_end); - if (status) - goto fdt_set_fail; - } - /* Add FDT entries for EFI runtime services in chosen node. */ node = fdt_subnode_offset(fdt, 0, "chosen"); fdt_val64 = cpu_to_fdt64((u64)(unsigned long)efi_system_table); @@ -170,25 +154,25 @@ static efi_status_t update_fdt_memmap(void *fdt, struct efi_boot_memmap *map) if (node < 0) return EFI_LOAD_ERROR; - fdt_val64 = cpu_to_fdt64((unsigned long)*map->map); + fdt_val64 = cpu_to_fdt64((unsigned long)map->map); err = fdt_setprop_inplace_var(fdt, node, "linux,uefi-mmap-start", fdt_val64); if (err) return EFI_LOAD_ERROR; - fdt_val32 = cpu_to_fdt32(*map->map_size); + fdt_val32 = cpu_to_fdt32(map->map_size); err = fdt_setprop_inplace_var(fdt, node, "linux,uefi-mmap-size", fdt_val32); if (err) return EFI_LOAD_ERROR; - fdt_val32 = cpu_to_fdt32(*map->desc_size); + fdt_val32 = cpu_to_fdt32(map->desc_size); err = fdt_setprop_inplace_var(fdt, node, "linux,uefi-mmap-desc-size", fdt_val32); if (err) return EFI_LOAD_ERROR; - fdt_val32 = cpu_to_fdt32(*map->desc_ver); + fdt_val32 = cpu_to_fdt32(map->desc_ver); err = fdt_setprop_inplace_var(fdt, node, "linux,uefi-mmap-desc-ver", fdt_val32); if (err) @@ -198,22 +182,25 @@ static efi_status_t update_fdt_memmap(void *fdt, struct efi_boot_memmap *map) } struct exit_boot_struct { + struct efi_boot_memmap *boot_memmap; efi_memory_desc_t *runtime_map; - int *runtime_entry_count; + int runtime_entry_count; void *new_fdt_addr; }; -static efi_status_t exit_boot_func(struct efi_boot_memmap *map, - void *priv) +static efi_status_t exit_boot_func(struct efi_boot_memmap *map, void *priv) { struct exit_boot_struct *p = priv; + + p->boot_memmap = map; + /* * Update the memory map with virtual addresses. The function will also * populate @runtime_map with copies of just the EFI_MEMORY_RUNTIME * entries so that we can pass it straight to SetVirtualAddressMap() */ - efi_get_virtmap(*map->map, *map->map_size, *map->desc_size, - p->runtime_map, p->runtime_entry_count); + efi_get_virtmap(map->map, map->map_size, map->desc_size, + p->runtime_map, &p->runtime_entry_count); return update_fdt_memmap(p->new_fdt_addr, map); } @@ -223,86 +210,86 @@ static efi_status_t exit_boot_func(struct efi_boot_memmap *map, #endif /* - * Allocate memory for a new FDT, then add EFI, commandline, and - * initrd related fields to the FDT. This routine increases the - * FDT allocation size until the allocated memory is large - * enough. EFI allocations are in EFI_PAGE_SIZE granules, - * which are fixed at 4K bytes, so in most cases the first - * allocation should succeed. - * EFI boot services are exited at the end of this function. - * There must be no allocations between the get_memory_map() - * call and the exit_boot_services() call, so the exiting of - * boot services is very tightly tied to the creation of the FDT - * with the final memory map in it. + * Allocate memory for a new FDT, then add EFI and commandline related fields + * to the FDT. This routine increases the FDT allocation size until the + * allocated memory is large enough. EFI allocations are in EFI_PAGE_SIZE + * granules, which are fixed at 4K bytes, so in most cases the first allocation + * should succeed. EFI boot services are exited at the end of this function. + * There must be no allocations between the get_memory_map() call and the + * exit_boot_services() call, so the exiting of boot services is very tightly + * tied to the creation of the FDT with the final memory map in it. */ - +static efi_status_t allocate_new_fdt_and_exit_boot(void *handle, + efi_loaded_image_t *image, unsigned long *new_fdt_addr, - u64 initrd_addr, u64 initrd_size, - char *cmdline_ptr, - unsigned long fdt_addr, - unsigned long fdt_size) + char *cmdline_ptr) { - unsigned long map_size, desc_size, buff_size; + unsigned long desc_size; u32 desc_ver; - unsigned long mmap_key; - efi_memory_desc_t *memory_map, *runtime_map; efi_status_t status; - int runtime_entry_count; - struct efi_boot_memmap map; struct exit_boot_struct priv; + unsigned long fdt_addr = 0; + unsigned long fdt_size = 0; - map.map = &runtime_map; - map.map_size = &map_size; - map.desc_size = &desc_size; - map.desc_ver = &desc_ver; - map.key_ptr = &mmap_key; - map.buff_size = &buff_size; + if (!efi_novamap) { + status = efi_alloc_virtmap(&priv.runtime_map, &desc_size, + &desc_ver); + if (status != EFI_SUCCESS) { + efi_err("Unable to retrieve UEFI memory map.\n"); + return status; + } + } /* - * Get a copy of the current memory map that we will use to prepare - * the input for SetVirtualAddressMap(). We don't have to worry about - * subsequent allocations adding entries, since they could not affect - * the number of EFI_MEMORY_RUNTIME regions. + * Unauthenticated device tree data is a security hazard, so ignore + * 'dtb=' unless UEFI Secure Boot is disabled. We assume that secure + * boot is enabled if we can't determine its state. */ - status = efi_get_memory_map(&map); - if (status != EFI_SUCCESS) { - efi_err("Unable to retrieve UEFI memory map.\n"); - return status; + if (!IS_ENABLED(CONFIG_EFI_ARMSTUB_DTB_LOADER) || + efi_get_secureboot() != efi_secureboot_mode_disabled) { + if (strstr(cmdline_ptr, "dtb=")) + efi_err("Ignoring DTB from command line.\n"); + } else { + status = efi_load_dtb(image, &fdt_addr, &fdt_size); + + if (status != EFI_SUCCESS && status != EFI_NOT_READY) { + efi_err("Failed to load device tree!\n"); + goto fail; + } } + if (fdt_addr) { + efi_info("Using DTB from command line\n"); + } else { + /* Look for a device tree configuration table entry. */ + fdt_addr = (uintptr_t)get_fdt(&fdt_size); + if (fdt_addr) + efi_info("Using DTB from configuration table\n"); + } + + if (!fdt_addr) + efi_info("Generating empty DTB\n"); + efi_info("Exiting boot services...\n"); - map.map = &memory_map; status = efi_allocate_pages(MAX_FDT_SIZE, new_fdt_addr, ULONG_MAX); if (status != EFI_SUCCESS) { efi_err("Unable to allocate memory for new device tree.\n"); goto fail; } - /* - * Now that we have done our final memory allocation (and free) - * we can get the memory map key needed for exit_boot_services(). - */ - status = efi_get_memory_map(&map); - if (status != EFI_SUCCESS) - goto fail_free_new_fdt; - status = update_fdt((void *)fdt_addr, fdt_size, - (void *)*new_fdt_addr, MAX_FDT_SIZE, cmdline_ptr, - initrd_addr, initrd_size); + (void *)*new_fdt_addr, MAX_FDT_SIZE, cmdline_ptr); if (status != EFI_SUCCESS) { efi_err("Unable to construct new device tree.\n"); goto fail_free_new_fdt; } - runtime_entry_count = 0; - priv.runtime_map = runtime_map; - priv.runtime_entry_count = &runtime_entry_count; - priv.new_fdt_addr = (void *)*new_fdt_addr; + priv.new_fdt_addr = (void *)*new_fdt_addr; - status = efi_exit_boot_services(handle, &map, &priv, exit_boot_func); + status = efi_exit_boot_services(handle, &priv, exit_boot_func); if (status == EFI_SUCCESS) { efi_set_virtual_address_map_t *svam; @@ -312,8 +299,8 @@ efi_status_t allocate_new_fdt_and_exit_boot(void *handle, /* Install the new virtual address map */ svam = efi_system_table->runtime->set_virtual_address_map; - status = svam(runtime_entry_count * desc_size, desc_size, - desc_ver, runtime_map); + status = svam(priv.runtime_entry_count * desc_size, desc_size, + desc_ver, priv.runtime_map); /* * We are beyond the point of no return here, so if the call to @@ -321,19 +308,21 @@ efi_status_t allocate_new_fdt_and_exit_boot(void *handle, * incoming kernel but proceed normally otherwise. */ if (status != EFI_SUCCESS) { + efi_memory_desc_t *p; int l; /* * Set the virtual address field of all - * EFI_MEMORY_RUNTIME entries to 0. This will signal - * the incoming kernel that no virtual translation has - * been installed. + * EFI_MEMORY_RUNTIME entries to U64_MAX. This will + * signal the incoming kernel that no virtual + * translation has been installed. */ - for (l = 0; l < map_size; l += desc_size) { - efi_memory_desc_t *p = (void *)memory_map + l; + for (l = 0; l < priv.boot_memmap->map_size; + l += priv.boot_memmap->desc_size) { + p = (void *)priv.boot_memmap->map + l; if (p->attribute & EFI_MEMORY_RUNTIME) - p->virt_addr = 0; + p->virt_addr = U64_MAX; } } return EFI_SUCCESS; @@ -345,11 +334,33 @@ fail_free_new_fdt: efi_free(MAX_FDT_SIZE, *new_fdt_addr); fail: - efi_system_table->boottime->free_pool(runtime_map); + efi_free(fdt_size, fdt_addr); + + efi_bs_call(free_pool, priv.runtime_map); return EFI_LOAD_ERROR; } +efi_status_t efi_boot_kernel(void *handle, efi_loaded_image_t *image, + unsigned long kernel_addr, char *cmdline_ptr) +{ + unsigned long fdt_addr; + efi_status_t status; + + status = allocate_new_fdt_and_exit_boot(handle, image, &fdt_addr, + cmdline_ptr); + if (status != EFI_SUCCESS) { + efi_err("Failed to update FDT and exit boot services\n"); + return status; + } + + if (IS_ENABLED(CONFIG_ARM)) + efi_handle_post_ebs_state(); + + efi_enter_kernel(kernel_addr, fdt_addr, fdt_totalsize((void *)fdt_addr)); + /* not reached */ +} + void *get_fdt(unsigned long *fdt_size) { void *fdt; diff --git a/drivers/firmware/efi/libstub/file.c b/drivers/firmware/efi/libstub/file.c index dd95f330fe6e..f756c61396e9 100644 --- a/drivers/firmware/efi/libstub/file.c +++ b/drivers/firmware/efi/libstub/file.c @@ -66,10 +66,28 @@ static efi_status_t efi_open_file(efi_file_protocol_t *volume, static efi_status_t efi_open_volume(efi_loaded_image_t *image, efi_file_protocol_t **fh) { + struct efi_vendor_dev_path *dp = image->file_path; + efi_guid_t li_proto = LOADED_IMAGE_PROTOCOL_GUID; efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID; efi_simple_file_system_protocol_t *io; efi_status_t status; + // If we are using EFI zboot, we should look for the file system + // protocol on the parent image's handle instead + if (IS_ENABLED(CONFIG_EFI_ZBOOT) && + image->parent_handle != NULL && + dp != NULL && + dp->header.type == EFI_DEV_MEDIA && + dp->header.sub_type == EFI_DEV_MEDIA_VENDOR && + !efi_guidcmp(dp->vendorguid, LINUX_EFI_ZBOOT_MEDIA_GUID)) { + status = efi_bs_call(handle_protocol, image->parent_handle, + &li_proto, (void *)&image); + if (status != EFI_SUCCESS) { + efi_err("Failed to locate parent image handle\n"); + return status; + } + } + status = efi_bs_call(handle_protocol, image->device_handle, &fs_proto, (void **)&io); if (status != EFI_SUCCESS) { @@ -136,7 +154,7 @@ efi_status_t handle_cmdline_files(efi_loaded_image_t *image, unsigned long *load_size) { const efi_char16_t *cmdline = image->load_options; - int cmdline_len = image->load_options_size; + u32 cmdline_len = image->load_options_size; unsigned long efi_chunk_size = ULONG_MAX; efi_file_protocol_t *volume = NULL; efi_file_protocol_t *file; @@ -238,6 +256,9 @@ efi_status_t handle_cmdline_files(efi_loaded_image_t *image, if (volume) volume->close(volume); + + if (*load_size == 0) + return EFI_NOT_READY; return EFI_SUCCESS; err_close_file: diff --git a/drivers/firmware/efi/libstub/intrinsics.c b/drivers/firmware/efi/libstub/intrinsics.c new file mode 100644 index 000000000000..a04ab39292b6 --- /dev/null +++ b/drivers/firmware/efi/libstub/intrinsics.c @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/efi.h> +#include <asm/efi.h> +#include <asm/string.h> + +#include "efistub.h" + +#ifdef CONFIG_KASAN +#undef memcpy +#undef memmove +#undef memset +void *__memcpy(void *__dest, const void *__src, size_t __n) __alias(memcpy); +void *__memmove(void *__dest, const void *__src, size_t count) __alias(memmove); +void *__memset(void *s, int c, size_t count) __alias(memset); +#endif + +void *memcpy(void *dst, const void *src, size_t len) +{ + efi_bs_call(copy_mem, dst, src, len); + return dst; +} + +extern void *memmove(void *dst, const void *src, size_t len) __alias(memcpy); + +void *memset(void *dst, int c, size_t len) +{ + efi_bs_call(set_mem, dst, len, c & U8_MAX); + return dst; +} diff --git a/drivers/firmware/efi/libstub/loongarch-stub.c b/drivers/firmware/efi/libstub/loongarch-stub.c new file mode 100644 index 000000000000..32329f2a92f9 --- /dev/null +++ b/drivers/firmware/efi/libstub/loongarch-stub.c @@ -0,0 +1,102 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Author: Yun Liu <liuyun@loongson.cn> + * Huacai Chen <chenhuacai@loongson.cn> + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ + +#include <asm/efi.h> +#include <asm/addrspace.h> +#include "efistub.h" + +typedef void __noreturn (*kernel_entry_t)(bool efi, unsigned long cmdline, + unsigned long systab); + +extern int kernel_asize; +extern int kernel_fsize; +extern int kernel_offset; +extern kernel_entry_t kernel_entry; + +efi_status_t check_platform_features(void) +{ + return EFI_SUCCESS; +} + +efi_status_t handle_kernel_image(unsigned long *image_addr, + unsigned long *image_size, + unsigned long *reserve_addr, + unsigned long *reserve_size, + efi_loaded_image_t *image, + efi_handle_t image_handle) +{ + efi_status_t status; + unsigned long kernel_addr = 0; + + kernel_addr = (unsigned long)&kernel_offset - kernel_offset; + + status = efi_relocate_kernel(&kernel_addr, kernel_fsize, kernel_asize, + PHYSADDR(VMLINUX_LOAD_ADDRESS), SZ_2M, 0x0); + + *image_addr = kernel_addr; + *image_size = kernel_asize; + + return status; +} + +struct exit_boot_struct { + efi_memory_desc_t *runtime_map; + int runtime_entry_count; +}; + +static efi_status_t exit_boot_func(struct efi_boot_memmap *map, void *priv) +{ + struct exit_boot_struct *p = priv; + + /* + * Update the memory map with virtual addresses. The function will also + * populate @runtime_map with copies of just the EFI_MEMORY_RUNTIME + * entries so that we can pass it straight to SetVirtualAddressMap() + */ + efi_get_virtmap(map->map, map->map_size, map->desc_size, + p->runtime_map, &p->runtime_entry_count); + + return EFI_SUCCESS; +} + +efi_status_t efi_boot_kernel(void *handle, efi_loaded_image_t *image, + unsigned long kernel_addr, char *cmdline_ptr) +{ + kernel_entry_t real_kernel_entry; + struct exit_boot_struct priv; + unsigned long desc_size; + efi_status_t status; + u32 desc_ver; + + status = efi_alloc_virtmap(&priv.runtime_map, &desc_size, &desc_ver); + if (status != EFI_SUCCESS) { + efi_err("Unable to retrieve UEFI memory map.\n"); + return status; + } + + efi_info("Exiting boot services\n"); + + efi_novamap = false; + status = efi_exit_boot_services(handle, &priv, exit_boot_func); + if (status != EFI_SUCCESS) + return status; + + /* Install the new virtual address map */ + efi_rt_call(set_virtual_address_map, + priv.runtime_entry_count * desc_size, desc_size, + desc_ver, priv.runtime_map); + + /* Config Direct Mapping */ + csr_write64(CSR_DMW0_INIT, LOONGARCH_CSR_DMWIN0); + csr_write64(CSR_DMW1_INIT, LOONGARCH_CSR_DMWIN1); + + real_kernel_entry = (kernel_entry_t) + ((unsigned long)&kernel_entry - kernel_addr + VMLINUX_LOAD_ADDRESS); + + real_kernel_entry(true, (unsigned long)cmdline_ptr, + (unsigned long)efi_system_table); +} diff --git a/drivers/firmware/efi/libstub/mem.c b/drivers/firmware/efi/libstub/mem.c index feef8d4be113..45841ef55a9f 100644 --- a/drivers/firmware/efi/libstub/mem.c +++ b/drivers/firmware/efi/libstub/mem.c @@ -5,71 +5,66 @@ #include "efistub.h" -static inline bool mmap_has_headroom(unsigned long buff_size, - unsigned long map_size, - unsigned long desc_size) -{ - unsigned long slack = buff_size - map_size; - - return slack / desc_size >= EFI_MMAP_NR_SLACK_SLOTS; -} - /** * efi_get_memory_map() - get memory map - * @map: on return pointer to memory map + * @map: pointer to memory map pointer to which to assign the + * newly allocated memory map + * @install_cfg_tbl: whether or not to install the boot memory map as a + * configuration table * * Retrieve the UEFI memory map. The allocated memory leaves room for * up to EFI_MMAP_NR_SLACK_SLOTS additional memory map entries. * * Return: status code */ -efi_status_t efi_get_memory_map(struct efi_boot_memmap *map) +efi_status_t efi_get_memory_map(struct efi_boot_memmap **map, + bool install_cfg_tbl) { - efi_memory_desc_t *m = NULL; + int memtype = install_cfg_tbl ? EFI_ACPI_RECLAIM_MEMORY + : EFI_LOADER_DATA; + efi_guid_t tbl_guid = LINUX_EFI_BOOT_MEMMAP_GUID; + struct efi_boot_memmap *m, tmp; efi_status_t status; - unsigned long key; - u32 desc_version; - - *map->desc_size = sizeof(*m); - *map->map_size = *map->desc_size * 32; - *map->buff_size = *map->map_size; -again: - status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, - *map->map_size, (void **)&m); + unsigned long size; + + tmp.map_size = 0; + status = efi_bs_call(get_memory_map, &tmp.map_size, NULL, &tmp.map_key, + &tmp.desc_size, &tmp.desc_ver); + if (status != EFI_BUFFER_TOO_SMALL) + return EFI_LOAD_ERROR; + + size = tmp.map_size + tmp.desc_size * EFI_MMAP_NR_SLACK_SLOTS; + status = efi_bs_call(allocate_pool, memtype, sizeof(*m) + size, + (void **)&m); if (status != EFI_SUCCESS) - goto fail; - - *map->desc_size = 0; - key = 0; - status = efi_bs_call(get_memory_map, map->map_size, m, - &key, map->desc_size, &desc_version); - if (status == EFI_BUFFER_TOO_SMALL || - !mmap_has_headroom(*map->buff_size, *map->map_size, - *map->desc_size)) { - efi_bs_call(free_pool, m); + return status; + + if (install_cfg_tbl) { /* - * Make sure there is some entries of headroom so that the - * buffer can be reused for a new map after allocations are - * no longer permitted. Its unlikely that the map will grow to - * exceed this headroom once we are ready to trigger - * ExitBootServices() + * Installing a configuration table might allocate memory, and + * this may modify the memory map. This means we should install + * the configuration table first, and re-install or delete it + * as needed. */ - *map->map_size += *map->desc_size * EFI_MMAP_NR_SLACK_SLOTS; - *map->buff_size = *map->map_size; - goto again; + status = efi_bs_call(install_configuration_table, &tbl_guid, m); + if (status != EFI_SUCCESS) + goto free_map; } - if (status == EFI_SUCCESS) { - if (map->key_ptr) - *map->key_ptr = key; - if (map->desc_ver) - *map->desc_ver = desc_version; - } else { - efi_bs_call(free_pool, m); - } + m->buff_size = m->map_size = size; + status = efi_bs_call(get_memory_map, &m->map_size, m->map, &m->map_key, + &m->desc_size, &m->desc_ver); + if (status != EFI_SUCCESS) + goto uninstall_table; + + *map = m; + return EFI_SUCCESS; -fail: - *map->map = m; +uninstall_table: + if (install_cfg_tbl) + efi_bs_call(install_configuration_table, &tbl_guid, NULL); +free_map: + efi_bs_call(free_pool, m); return status; } diff --git a/drivers/firmware/efi/libstub/random.c b/drivers/firmware/efi/libstub/random.c index 24aa37535372..33ab56769595 100644 --- a/drivers/firmware/efi/libstub/random.c +++ b/drivers/firmware/efi/libstub/random.c @@ -75,7 +75,12 @@ efi_status_t efi_random_get_seed(void) if (status != EFI_SUCCESS) return status; - status = efi_bs_call(allocate_pool, EFI_RUNTIME_SERVICES_DATA, + /* + * Use EFI_ACPI_RECLAIM_MEMORY here so that it is guaranteed that the + * allocation will survive a kexec reboot (although we refresh the seed + * beforehand) + */ + status = efi_bs_call(allocate_pool, EFI_ACPI_RECLAIM_MEMORY, sizeof(*seed) + EFI_RANDOM_SEED_SIZE, (void **)&seed); if (status != EFI_SUCCESS) diff --git a/drivers/firmware/efi/libstub/randomalloc.c b/drivers/firmware/efi/libstub/randomalloc.c index 724155b9e10d..9fb5869896be 100644 --- a/drivers/firmware/efi/libstub/randomalloc.c +++ b/drivers/firmware/efi/libstub/randomalloc.c @@ -55,21 +55,13 @@ efi_status_t efi_random_alloc(unsigned long size, unsigned long *addr, unsigned long random_seed) { - unsigned long map_size, desc_size, total_slots = 0, target_slot; - unsigned long buff_size; + unsigned long total_slots = 0, target_slot; + unsigned long total_mirrored_slots = 0; + struct efi_boot_memmap *map; efi_status_t status; - efi_memory_desc_t *memory_map; int map_offset; - struct efi_boot_memmap map; - map.map = &memory_map; - map.map_size = &map_size; - map.desc_size = &desc_size; - map.desc_ver = NULL; - map.key_ptr = NULL; - map.buff_size = &buff_size; - - status = efi_get_memory_map(&map); + status = efi_get_memory_map(&map, false); if (status != EFI_SUCCESS) return status; @@ -79,15 +71,21 @@ efi_status_t efi_random_alloc(unsigned long size, size = round_up(size, EFI_ALLOC_ALIGN); /* count the suitable slots in each memory map entry */ - for (map_offset = 0; map_offset < map_size; map_offset += desc_size) { - efi_memory_desc_t *md = (void *)memory_map + map_offset; + for (map_offset = 0; map_offset < map->map_size; map_offset += map->desc_size) { + efi_memory_desc_t *md = (void *)map->map + map_offset; unsigned long slots; slots = get_entry_num_slots(md, size, ilog2(align)); MD_NUM_SLOTS(md) = slots; total_slots += slots; + if (md->attribute & EFI_MEMORY_MORE_RELIABLE) + total_mirrored_slots += slots; } + /* consider only mirrored slots for randomization if any exist */ + if (total_mirrored_slots > 0) + total_slots = total_mirrored_slots; + /* find a random number between 0 and total_slots */ target_slot = (total_slots * (u64)(random_seed & U32_MAX)) >> 32; @@ -102,11 +100,15 @@ efi_status_t efi_random_alloc(unsigned long size, * to calculate the randomly chosen address, and allocate it directly * using EFI_ALLOCATE_ADDRESS. */ - for (map_offset = 0; map_offset < map_size; map_offset += desc_size) { - efi_memory_desc_t *md = (void *)memory_map + map_offset; + for (map_offset = 0; map_offset < map->map_size; map_offset += map->desc_size) { + efi_memory_desc_t *md = (void *)map->map + map_offset; efi_physical_addr_t target; unsigned long pages; + if (total_mirrored_slots > 0 && + !(md->attribute & EFI_MEMORY_MORE_RELIABLE)) + continue; + if (target_slot >= MD_NUM_SLOTS(md)) { target_slot -= MD_NUM_SLOTS(md); continue; @@ -122,7 +124,7 @@ efi_status_t efi_random_alloc(unsigned long size, break; } - efi_bs_call(free_pool, memory_map); + efi_bs_call(free_pool, map); return status; } diff --git a/drivers/firmware/efi/libstub/relocate.c b/drivers/firmware/efi/libstub/relocate.c index 8ee9eb2b9039..bf6fbd5d22a1 100644 --- a/drivers/firmware/efi/libstub/relocate.c +++ b/drivers/firmware/efi/libstub/relocate.c @@ -23,21 +23,12 @@ efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align, unsigned long *addr, unsigned long min) { - unsigned long map_size, desc_size, buff_size; - efi_memory_desc_t *map; + struct efi_boot_memmap *map; efi_status_t status; unsigned long nr_pages; int i; - struct efi_boot_memmap boot_map; - boot_map.map = ↦ - boot_map.map_size = &map_size; - boot_map.desc_size = &desc_size; - boot_map.desc_ver = NULL; - boot_map.key_ptr = NULL; - boot_map.buff_size = &buff_size; - - status = efi_get_memory_map(&boot_map); + status = efi_get_memory_map(&map, false); if (status != EFI_SUCCESS) goto fail; @@ -52,12 +43,12 @@ efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align, size = round_up(size, EFI_ALLOC_ALIGN); nr_pages = size / EFI_PAGE_SIZE; - for (i = 0; i < map_size / desc_size; i++) { + for (i = 0; i < map->map_size / map->desc_size; i++) { efi_memory_desc_t *desc; - unsigned long m = (unsigned long)map; + unsigned long m = (unsigned long)map->map; u64 start, end; - desc = efi_early_memdesc_ptr(m, desc_size, i); + desc = efi_early_memdesc_ptr(m, map->desc_size, i); if (desc->type != EFI_CONVENTIONAL_MEMORY) continue; @@ -87,7 +78,7 @@ efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align, } } - if (i == map_size / desc_size) + if (i == map->map_size / map->desc_size) status = EFI_NOT_FOUND; efi_bs_call(free_pool, map); diff --git a/drivers/firmware/efi/libstub/riscv-stub.c b/drivers/firmware/efi/libstub/riscv-stub.c index 380e4e251399..b450ebf95977 100644 --- a/drivers/firmware/efi/libstub/riscv-stub.c +++ b/drivers/firmware/efi/libstub/riscv-stub.c @@ -8,6 +8,7 @@ #include <asm/efi.h> #include <asm/sections.h> +#include <asm/unaligned.h> #include "efistub.h" @@ -21,37 +22,63 @@ #define MIN_KIMG_ALIGN SZ_4M #endif -typedef void __noreturn (*jump_kernel_func)(unsigned int, unsigned long); +typedef void __noreturn (*jump_kernel_func)(unsigned long, unsigned long); -static u32 hartid; +static unsigned long hartid; -static u32 get_boot_hartid_from_fdt(void) +static int get_boot_hartid_from_fdt(void) { const void *fdt; int chosen_node, len; - const fdt32_t *prop; + const void *prop; fdt = get_efi_config_table(DEVICE_TREE_GUID); if (!fdt) - return U32_MAX; + return -EINVAL; chosen_node = fdt_path_offset(fdt, "/chosen"); if (chosen_node < 0) - return U32_MAX; + return -EINVAL; prop = fdt_getprop((void *)fdt, chosen_node, "boot-hartid", &len); - if (!prop || len != sizeof(u32)) - return U32_MAX; + if (!prop) + return -EINVAL; - return fdt32_to_cpu(*prop); + if (len == sizeof(u32)) + hartid = (unsigned long) fdt32_to_cpu(*(fdt32_t *)prop); + else if (len == sizeof(u64)) + hartid = (unsigned long) fdt64_to_cpu(__get_unaligned_t(fdt64_t, prop)); + else + return -EINVAL; + + return 0; +} + +static efi_status_t get_boot_hartid_from_efi(void) +{ + efi_guid_t boot_protocol_guid = RISCV_EFI_BOOT_PROTOCOL_GUID; + struct riscv_efi_boot_protocol *boot_protocol; + efi_status_t status; + + status = efi_bs_call(locate_protocol, &boot_protocol_guid, NULL, + (void **)&boot_protocol); + if (status != EFI_SUCCESS) + return status; + return efi_call_proto(boot_protocol, get_boot_hartid, &hartid); } efi_status_t check_platform_features(void) { - hartid = get_boot_hartid_from_fdt(); - if (hartid == U32_MAX) { - efi_err("/chosen/boot-hartid missing or invalid!\n"); - return EFI_UNSUPPORTED; + efi_status_t status; + int ret; + + status = get_boot_hartid_from_efi(); + if (status != EFI_SUCCESS) { + ret = get_boot_hartid_from_fdt(); + if (ret) { + efi_err("Failed to get boot hartid!\n"); + return EFI_UNSUPPORTED; + } } return EFI_SUCCESS; } @@ -77,7 +104,8 @@ efi_status_t handle_kernel_image(unsigned long *image_addr, unsigned long *image_size, unsigned long *reserve_addr, unsigned long *reserve_size, - efi_loaded_image_t *image) + efi_loaded_image_t *image, + efi_handle_t image_handle) { unsigned long kernel_size = 0; unsigned long preferred_addr; diff --git a/drivers/firmware/efi/libstub/secureboot.c b/drivers/firmware/efi/libstub/secureboot.c index 8a18930f3eb6..516f4f0069bd 100644 --- a/drivers/firmware/efi/libstub/secureboot.c +++ b/drivers/firmware/efi/libstub/secureboot.c @@ -14,7 +14,7 @@ /* SHIM variables */ static const efi_guid_t shim_guid = EFI_SHIM_LOCK_GUID; -static const efi_char16_t shim_MokSBState_name[] = L"MokSBState"; +static const efi_char16_t shim_MokSBState_name[] = L"MokSBStateRT"; static efi_status_t get_var(efi_char16_t *name, efi_guid_t *vendor, u32 *attr, unsigned long *data_size, void *data) @@ -43,8 +43,8 @@ enum efi_secureboot_mode efi_get_secureboot(void) /* * See if a user has put the shim into insecure mode. If so, and if the - * variable doesn't have the runtime attribute set, we might as well - * honor that. + * variable doesn't have the non-volatile attribute set, we might as + * well honor that. */ size = sizeof(moksbstate); status = get_efi_var(shim_MokSBState_name, &shim_guid, @@ -53,7 +53,7 @@ enum efi_secureboot_mode efi_get_secureboot(void) /* If it fails, we don't care why. Default to secure */ if (status != EFI_SUCCESS) goto secure_boot_enabled; - if (!(attr & EFI_VARIABLE_RUNTIME_ACCESS) && moksbstate == 1) + if (!(attr & EFI_VARIABLE_NON_VOLATILE) && moksbstate == 1) return efi_secureboot_mode_disabled; secure_boot_enabled: diff --git a/drivers/firmware/efi/libstub/smbios.c b/drivers/firmware/efi/libstub/smbios.c new file mode 100644 index 000000000000..460418b7f5f5 --- /dev/null +++ b/drivers/firmware/efi/libstub/smbios.c @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright 2022 Google LLC +// Author: Ard Biesheuvel <ardb@google.com> + +#include <linux/efi.h> + +#include "efistub.h" + +typedef struct efi_smbios_protocol efi_smbios_protocol_t; + +struct efi_smbios_protocol { + efi_status_t (__efiapi *add)(efi_smbios_protocol_t *, efi_handle_t, + u16 *, struct efi_smbios_record *); + efi_status_t (__efiapi *update_string)(efi_smbios_protocol_t *, u16 *, + unsigned long *, u8 *); + efi_status_t (__efiapi *remove)(efi_smbios_protocol_t *, u16); + efi_status_t (__efiapi *get_next)(efi_smbios_protocol_t *, u16 *, u8 *, + struct efi_smbios_record **, + efi_handle_t *); + + u8 major_version; + u8 minor_version; +}; + +const u8 *__efi_get_smbios_string(u8 type, int offset, int recsize) +{ + struct efi_smbios_record *record; + efi_smbios_protocol_t *smbios; + efi_status_t status; + u16 handle = 0xfffe; + const u8 *strtable; + + status = efi_bs_call(locate_protocol, &EFI_SMBIOS_PROTOCOL_GUID, NULL, + (void **)&smbios) ?: + efi_call_proto(smbios, get_next, &handle, &type, &record, NULL); + if (status != EFI_SUCCESS) + return NULL; + + strtable = (u8 *)record + recsize; + for (int i = 1; i < ((u8 *)record)[offset]; i++) { + int len = strlen(strtable); + + if (!len) + return NULL; + strtable += len + 1; + } + return strtable; +} diff --git a/drivers/firmware/efi/libstub/systable.c b/drivers/firmware/efi/libstub/systable.c new file mode 100644 index 000000000000..91d016b02f8c --- /dev/null +++ b/drivers/firmware/efi/libstub/systable.c @@ -0,0 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/efi.h> +#include <asm/efi.h> + +#include "efistub.h" + +const efi_system_table_t *efi_system_table; diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c index f14c4ff5839f..33a7811e12c6 100644 --- a/drivers/firmware/efi/libstub/x86-stub.c +++ b/drivers/firmware/efi/libstub/x86-stub.c @@ -22,6 +22,7 @@ #define MAXMEM_X86_64_4LEVEL (1ull << 46) const efi_system_table_t *efi_system_table; +const efi_dxe_services_table_t *efi_dxe_table; extern u32 image_offset; static efi_loaded_image_t *image = NULL; @@ -211,9 +212,109 @@ static void retrieve_apple_device_properties(struct boot_params *boot_params) } } +static void +adjust_memory_range_protection(unsigned long start, unsigned long size) +{ + efi_status_t status; + efi_gcd_memory_space_desc_t desc; + unsigned long end, next; + unsigned long rounded_start, rounded_end; + unsigned long unprotect_start, unprotect_size; + + if (efi_dxe_table == NULL) + return; + + rounded_start = rounddown(start, EFI_PAGE_SIZE); + rounded_end = roundup(start + size, EFI_PAGE_SIZE); + + /* + * Don't modify memory region attributes, they are + * already suitable, to lower the possibility to + * encounter firmware bugs. + */ + + for (end = start + size; start < end; start = next) { + + status = efi_dxe_call(get_memory_space_descriptor, start, &desc); + + if (status != EFI_SUCCESS) + return; + + next = desc.base_address + desc.length; + + /* + * Only system memory is suitable for trampoline/kernel image placement, + * so only this type of memory needs its attributes to be modified. + */ + + if (desc.gcd_memory_type != EfiGcdMemoryTypeSystemMemory || + (desc.attributes & (EFI_MEMORY_RO | EFI_MEMORY_XP)) == 0) + continue; + + unprotect_start = max(rounded_start, (unsigned long)desc.base_address); + unprotect_size = min(rounded_end, next) - unprotect_start; + + status = efi_dxe_call(set_memory_space_attributes, + unprotect_start, unprotect_size, + EFI_MEMORY_WB); + + if (status != EFI_SUCCESS) { + efi_warn("Unable to unprotect memory range [%08lx,%08lx]: %lx\n", + unprotect_start, + unprotect_start + unprotect_size, + status); + } + } +} + +/* + * Trampoline takes 2 pages and can be loaded in first megabyte of memory + * with its end placed between 128k and 640k where BIOS might start. + * (see arch/x86/boot/compressed/pgtable_64.c) + * + * We cannot find exact trampoline placement since memory map + * can be modified by UEFI, and it can alter the computed address. + */ + +#define TRAMPOLINE_PLACEMENT_BASE ((128 - 8)*1024) +#define TRAMPOLINE_PLACEMENT_SIZE (640*1024 - (128 - 8)*1024) + +void startup_32(struct boot_params *boot_params); + +static void +setup_memory_protection(unsigned long image_base, unsigned long image_size) +{ + /* + * Allow execution of possible trampoline used + * for switching between 4- and 5-level page tables + * and relocated kernel image. + */ + + adjust_memory_range_protection(TRAMPOLINE_PLACEMENT_BASE, + TRAMPOLINE_PLACEMENT_SIZE); + +#ifdef CONFIG_64BIT + if (image_base != (unsigned long)startup_32) + adjust_memory_range_protection(image_base, image_size); +#else + /* + * Clear protection flags on a whole range of possible + * addresses used for KASLR. We don't need to do that + * on x86_64, since KASLR/extraction is performed after + * dedicated identity page tables are built and we only + * need to remove possible protection on relocated image + * itself disregarding further relocations. + */ + adjust_memory_range_protection(LOAD_PHYSICAL_ADDR, + KERNEL_IMAGE_SIZE - LOAD_PHYSICAL_ADDR); +#endif +} + static const efi_char16_t apple[] = L"Apple"; -static void setup_quirks(struct boot_params *boot_params) +static void setup_quirks(struct boot_params *boot_params, + unsigned long image_base, + unsigned long image_size) { efi_char16_t *fw_vendor = (efi_char16_t *)(unsigned long) efi_table_attr(efi_system_table, fw_vendor); @@ -222,6 +323,9 @@ static void setup_quirks(struct boot_params *boot_params) if (IS_ENABLED(CONFIG_APPLE_PROPERTIES)) retrieve_apple_device_properties(boot_params); } + + if (IS_ENABLED(CONFIG_EFI_DXE_MEM_ATTRIBUTES)) + setup_memory_protection(image_base, image_size); } /* @@ -341,8 +445,6 @@ static void __noreturn efi_exit(efi_handle_t handle, efi_status_t status) asm("hlt"); } -void startup_32(struct boot_params *boot_params); - void __noreturn efi_stub_entry(efi_handle_t handle, efi_system_table_t *sys_table_arg, struct boot_params *boot_params); @@ -414,6 +516,13 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle, hdr->ramdisk_image = 0; hdr->ramdisk_size = 0; + /* + * Disregard any setup data that was provided by the bootloader: + * setup_data could be pointing anywhere, and we have no way of + * authenticating or validating the payload. + */ + hdr->setup_data = 0; + efi_stub_entry(handle, sys_table_arg, boot_params); /* not reached */ @@ -613,32 +722,22 @@ static efi_status_t exit_boot_func(struct efi_boot_memmap *map, efi_set_u64_split((unsigned long)efi_system_table, &p->efi->efi_systab, &p->efi->efi_systab_hi); - p->efi->efi_memdesc_size = *map->desc_size; - p->efi->efi_memdesc_version = *map->desc_ver; - efi_set_u64_split((unsigned long)*map->map, + p->efi->efi_memdesc_size = map->desc_size; + p->efi->efi_memdesc_version = map->desc_ver; + efi_set_u64_split((unsigned long)map->map, &p->efi->efi_memmap, &p->efi->efi_memmap_hi); - p->efi->efi_memmap_size = *map->map_size; + p->efi->efi_memmap_size = map->map_size; return EFI_SUCCESS; } static efi_status_t exit_boot(struct boot_params *boot_params, void *handle) { - unsigned long map_sz, key, desc_size, buff_size; - efi_memory_desc_t *mem_map; struct setup_data *e820ext = NULL; __u32 e820ext_size = 0; efi_status_t status; - __u32 desc_version; - struct efi_boot_memmap map; struct exit_boot_struct priv; - map.map = &mem_map; - map.map_size = &map_sz; - map.desc_size = &desc_size; - map.desc_ver = &desc_version; - map.key_ptr = &key; - map.buff_size = &buff_size; priv.boot_params = boot_params; priv.efi = &boot_params->efi_info; @@ -647,7 +746,7 @@ static efi_status_t exit_boot(struct boot_params *boot_params, void *handle) return status; /* Might as well exit boot services now */ - status = efi_exit_boot_services(handle, &map, &priv, exit_boot_func); + status = efi_exit_boot_services(handle, &priv, exit_boot_func); if (status != EFI_SUCCESS) return status; @@ -666,21 +765,28 @@ static efi_status_t exit_boot(struct boot_params *boot_params, void *handle) * relocated by efi_relocate_kernel. * On failure, we exit to the firmware via efi_exit instead of returning. */ -unsigned long efi_main(efi_handle_t handle, - efi_system_table_t *sys_table_arg, - struct boot_params *boot_params) +asmlinkage unsigned long efi_main(efi_handle_t handle, + efi_system_table_t *sys_table_arg, + struct boot_params *boot_params) { unsigned long bzimage_addr = (unsigned long)startup_32; unsigned long buffer_start, buffer_end; struct setup_header *hdr = &boot_params->hdr; + const struct linux_efi_initrd *initrd = NULL; efi_status_t status; efi_system_table = sys_table_arg; - /* Check if we were booted by the EFI firmware */ if (efi_system_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) efi_exit(handle, EFI_INVALID_PARAMETER); + efi_dxe_table = get_efi_config_table(EFI_DXE_SERVICES_TABLE_GUID); + if (efi_dxe_table && + efi_dxe_table->hdr.signature != EFI_DXE_SERVICES_TABLE_SIGNATURE) { + efi_warn("Ignoring DXE services table: invalid signature\n"); + efi_dxe_table = NULL; + } + /* * If the kernel isn't already loaded at a suitable address, * relocate it. @@ -761,24 +867,18 @@ unsigned long efi_main(efi_handle_t handle, * arguments will be processed only if image is not NULL, which will be * the case only if we were loaded via the PE entry point. */ - if (!efi_noinitrd) { - unsigned long addr, size; - - status = efi_load_initrd(image, &addr, &size, - hdr->initrd_addr_max, ULONG_MAX); - - if (status != EFI_SUCCESS) { - efi_err("Failed to load initrd!\n"); - goto fail; - } - if (size > 0) { - efi_set_u64_split(addr, &hdr->ramdisk_image, - &boot_params->ext_ramdisk_image); - efi_set_u64_split(size, &hdr->ramdisk_size, - &boot_params->ext_ramdisk_size); - } + status = efi_load_initrd(image, hdr->initrd_addr_max, ULONG_MAX, + &initrd); + if (status != EFI_SUCCESS) + goto fail; + if (initrd && initrd->size > 0) { + efi_set_u64_split(initrd->base, &hdr->ramdisk_image, + &boot_params->ext_ramdisk_image); + efi_set_u64_split(initrd->size, &hdr->ramdisk_size, + &boot_params->ext_ramdisk_size); } + /* * If the boot loader gave us a value for secure_boot then we use that, * otherwise we ask the BIOS. @@ -797,7 +897,7 @@ unsigned long efi_main(efi_handle_t handle, setup_efi_pci(boot_params); - setup_quirks(boot_params); + setup_quirks(boot_params, bzimage_addr, buffer_end - buffer_start); status = exit_boot(boot_params, handle); if (status != EFI_SUCCESS) { diff --git a/drivers/firmware/efi/libstub/zboot-header.S b/drivers/firmware/efi/libstub/zboot-header.S new file mode 100644 index 000000000000..9e6fe061ab07 --- /dev/null +++ b/drivers/firmware/efi/libstub/zboot-header.S @@ -0,0 +1,143 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include <linux/pe.h> + +#ifdef CONFIG_64BIT + .set .Lextra_characteristics, 0x0 + .set .Lpe_opt_magic, PE_OPT_MAGIC_PE32PLUS +#else + .set .Lextra_characteristics, IMAGE_FILE_32BIT_MACHINE + .set .Lpe_opt_magic, PE_OPT_MAGIC_PE32 +#endif + + .section ".head", "a" + .globl __efistub_efi_zboot_header +__efistub_efi_zboot_header: +.Ldoshdr: + .long MZ_MAGIC + .ascii "zimg" // image type + .long __efistub__gzdata_start - .Ldoshdr // payload offset + .long __efistub__gzdata_size - ZBOOT_SIZE_LEN // payload size + .long 0, 0 // reserved + .asciz COMP_TYPE // compression type + .org .Ldoshdr + 0x3c + .long .Lpehdr - .Ldoshdr // PE header offset + +.Lpehdr: + .long PE_MAGIC + .short MACHINE_TYPE + .short .Lsection_count + .long 0 + .long 0 + .long 0 + .short .Lsection_table - .Loptional_header + .short IMAGE_FILE_DEBUG_STRIPPED | \ + IMAGE_FILE_EXECUTABLE_IMAGE | \ + IMAGE_FILE_LINE_NUMS_STRIPPED |\ + .Lextra_characteristics + +.Loptional_header: + .short .Lpe_opt_magic + .byte 0, 0 + .long _etext - .Lefi_header_end + .long __data_size + .long 0 + .long __efistub_efi_zboot_entry - .Ldoshdr + .long .Lefi_header_end - .Ldoshdr + +#ifdef CONFIG_64BIT + .quad 0 +#else + .long _etext - .Ldoshdr, 0x0 +#endif + .long 4096 + .long 512 + .short 0, 0 + .short LINUX_EFISTUB_MAJOR_VERSION // MajorImageVersion + .short LINUX_EFISTUB_MINOR_VERSION // MinorImageVersion + .short 0, 0 + .long 0 + .long _end - .Ldoshdr + + .long .Lefi_header_end - .Ldoshdr + .long 0 + .short IMAGE_SUBSYSTEM_EFI_APPLICATION + .short 0 +#ifdef CONFIG_64BIT + .quad 0, 0, 0, 0 +#else + .long 0, 0, 0, 0 +#endif + .long 0 + .long (.Lsection_table - .) / 8 + + .quad 0 // ExportTable + .quad 0 // ImportTable + .quad 0 // ResourceTable + .quad 0 // ExceptionTable + .quad 0 // CertificationTable + .quad 0 // BaseRelocationTable +#ifdef CONFIG_DEBUG_EFI + .long .Lefi_debug_table - .Ldoshdr // DebugTable + .long .Lefi_debug_table_size +#endif + +.Lsection_table: + .ascii ".text\0\0\0" + .long _etext - .Lefi_header_end + .long .Lefi_header_end - .Ldoshdr + .long _etext - .Lefi_header_end + .long .Lefi_header_end - .Ldoshdr + + .long 0, 0 + .short 0, 0 + .long IMAGE_SCN_CNT_CODE | \ + IMAGE_SCN_MEM_READ | \ + IMAGE_SCN_MEM_EXECUTE + + .ascii ".data\0\0\0" + .long __data_size + .long _etext - .Ldoshdr + .long __data_rawsize + .long _etext - .Ldoshdr + + .long 0, 0 + .short 0, 0 + .long IMAGE_SCN_CNT_INITIALIZED_DATA | \ + IMAGE_SCN_MEM_READ | \ + IMAGE_SCN_MEM_WRITE + + .set .Lsection_count, (. - .Lsection_table) / 40 + +#ifdef CONFIG_DEBUG_EFI + .section ".rodata", "a" + .align 2 +.Lefi_debug_table: + // EFI_IMAGE_DEBUG_DIRECTORY_ENTRY + .long 0 // Characteristics + .long 0 // TimeDateStamp + .short 0 // MajorVersion + .short 0 // MinorVersion + .long IMAGE_DEBUG_TYPE_CODEVIEW // Type + .long .Lefi_debug_entry_size // SizeOfData + .long 0 // RVA + .long .Lefi_debug_entry - .Ldoshdr // FileOffset + + .set .Lefi_debug_table_size, . - .Lefi_debug_table + .previous + +.Lefi_debug_entry: + // EFI_IMAGE_DEBUG_CODEVIEW_NB10_ENTRY + .ascii "NB10" // Signature + .long 0 // Unknown + .long 0 // Unknown2 + .long 0 // Unknown3 + + .asciz ZBOOT_EFI_PATH + + .set .Lefi_debug_entry_size, . - .Lefi_debug_entry +#endif + + .p2align 12 +.Lefi_header_end: + diff --git a/drivers/firmware/efi/libstub/zboot.c b/drivers/firmware/efi/libstub/zboot.c new file mode 100644 index 000000000000..ea72c8f27da6 --- /dev/null +++ b/drivers/firmware/efi/libstub/zboot.c @@ -0,0 +1,302 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/efi.h> +#include <linux/pe.h> +#include <asm/efi.h> +#include <asm/unaligned.h> + +#include "efistub.h" + +static unsigned char zboot_heap[SZ_256K] __aligned(64); +static unsigned long free_mem_ptr, free_mem_end_ptr; + +#define STATIC static +#if defined(CONFIG_KERNEL_GZIP) +#include "../../../../lib/decompress_inflate.c" +#elif defined(CONFIG_KERNEL_LZ4) +#include "../../../../lib/decompress_unlz4.c" +#elif defined(CONFIG_KERNEL_LZMA) +#include "../../../../lib/decompress_unlzma.c" +#elif defined(CONFIG_KERNEL_LZO) +#include "../../../../lib/decompress_unlzo.c" +#elif defined(CONFIG_KERNEL_XZ) +#undef memcpy +#define memcpy memcpy +#undef memmove +#define memmove memmove +#include "../../../../lib/decompress_unxz.c" +#elif defined(CONFIG_KERNEL_ZSTD) +#include "../../../../lib/decompress_unzstd.c" +#endif + +extern char efi_zboot_header[]; +extern char _gzdata_start[], _gzdata_end[]; + +static void log(efi_char16_t str[]) +{ + efi_call_proto(efi_table_attr(efi_system_table, con_out), + output_string, L"EFI decompressor: "); + efi_call_proto(efi_table_attr(efi_system_table, con_out), + output_string, str); + efi_call_proto(efi_table_attr(efi_system_table, con_out), + output_string, L"\n"); +} + +static void error(char *x) +{ + log(L"error() called from decompressor library\n"); +} + +// Local version to avoid pulling in memcmp() +static bool guids_eq(const efi_guid_t *a, const efi_guid_t *b) +{ + const u32 *l = (u32 *)a; + const u32 *r = (u32 *)b; + + return l[0] == r[0] && l[1] == r[1] && l[2] == r[2] && l[3] == r[3]; +} + +static efi_status_t __efiapi +load_file(efi_load_file_protocol_t *this, efi_device_path_protocol_t *rem, + bool boot_policy, unsigned long *bufsize, void *buffer) +{ + unsigned long compressed_size = _gzdata_end - _gzdata_start; + struct efi_vendor_dev_path *vendor_dp; + bool decompress = false; + unsigned long size; + int ret; + + if (rem == NULL || bufsize == NULL) + return EFI_INVALID_PARAMETER; + + if (boot_policy) + return EFI_UNSUPPORTED; + + // Look for our vendor media device node in the remaining file path + if (rem->type == EFI_DEV_MEDIA && + rem->sub_type == EFI_DEV_MEDIA_VENDOR) { + vendor_dp = container_of(rem, struct efi_vendor_dev_path, header); + if (!guids_eq(&vendor_dp->vendorguid, &LINUX_EFI_ZBOOT_MEDIA_GUID)) + return EFI_NOT_FOUND; + + decompress = true; + rem = (void *)(vendor_dp + 1); + } + + if (rem->type != EFI_DEV_END_PATH || + rem->sub_type != EFI_DEV_END_ENTIRE) + return EFI_NOT_FOUND; + + // The uncompressed size of the payload is appended to the raw bit + // stream, and may therefore appear misaligned in memory + size = decompress ? get_unaligned_le32(_gzdata_end - 4) + : compressed_size; + if (buffer == NULL || *bufsize < size) { + *bufsize = size; + return EFI_BUFFER_TOO_SMALL; + } + + if (decompress) { + ret = __decompress(_gzdata_start, compressed_size, NULL, NULL, + buffer, size, NULL, error); + if (ret < 0) { + log(L"Decompression failed"); + return EFI_DEVICE_ERROR; + } + } else { + memcpy(buffer, _gzdata_start, compressed_size); + } + + return EFI_SUCCESS; +} + +// Return the length in bytes of the device path up to the first end node. +static int device_path_length(const efi_device_path_protocol_t *dp) +{ + int len = 0; + + while (dp->type != EFI_DEV_END_PATH) { + len += dp->length; + dp = (void *)((u8 *)dp + dp->length); + } + return len; +} + +static void append_rel_offset_node(efi_device_path_protocol_t **dp, + unsigned long start, unsigned long end) +{ + struct efi_rel_offset_dev_path *rodp = (void *)*dp; + + rodp->header.type = EFI_DEV_MEDIA; + rodp->header.sub_type = EFI_DEV_MEDIA_REL_OFFSET; + rodp->header.length = sizeof(struct efi_rel_offset_dev_path); + rodp->reserved = 0; + rodp->starting_offset = start; + rodp->ending_offset = end; + + *dp = (void *)(rodp + 1); +} + +static void append_ven_media_node(efi_device_path_protocol_t **dp, + efi_guid_t *guid) +{ + struct efi_vendor_dev_path *vmdp = (void *)*dp; + + vmdp->header.type = EFI_DEV_MEDIA; + vmdp->header.sub_type = EFI_DEV_MEDIA_VENDOR; + vmdp->header.length = sizeof(struct efi_vendor_dev_path); + vmdp->vendorguid = *guid; + + *dp = (void *)(vmdp + 1); +} + +static void append_end_node(efi_device_path_protocol_t **dp) +{ + (*dp)->type = EFI_DEV_END_PATH; + (*dp)->sub_type = EFI_DEV_END_ENTIRE; + (*dp)->length = sizeof(struct efi_generic_dev_path); + + ++*dp; +} + +asmlinkage efi_status_t __efiapi +efi_zboot_entry(efi_handle_t handle, efi_system_table_t *systab) +{ + struct efi_mem_mapped_dev_path mmdp = { + .header.type = EFI_DEV_HW, + .header.sub_type = EFI_DEV_MEM_MAPPED, + .header.length = sizeof(struct efi_mem_mapped_dev_path) + }; + efi_device_path_protocol_t *parent_dp, *dpp, *lf2_dp, *li_dp; + efi_load_file2_protocol_t zboot_load_file2; + efi_loaded_image_t *parent, *child; + unsigned long exit_data_size; + efi_handle_t child_handle; + efi_handle_t zboot_handle; + efi_char16_t *exit_data; + efi_status_t status; + void *dp_alloc; + int dp_len; + + WRITE_ONCE(efi_system_table, systab); + + free_mem_ptr = (unsigned long)&zboot_heap; + free_mem_end_ptr = free_mem_ptr + sizeof(zboot_heap); + + exit_data = NULL; + exit_data_size = 0; + + status = efi_bs_call(handle_protocol, handle, + &LOADED_IMAGE_PROTOCOL_GUID, (void **)&parent); + if (status != EFI_SUCCESS) { + log(L"Failed to locate parent's loaded image protocol"); + return status; + } + + status = efi_bs_call(handle_protocol, handle, + &LOADED_IMAGE_DEVICE_PATH_PROTOCOL_GUID, + (void **)&parent_dp); + if (status != EFI_SUCCESS || parent_dp == NULL) { + // Create a MemoryMapped() device path node to describe + // the parent image if no device path was provided. + mmdp.memory_type = parent->image_code_type; + mmdp.starting_addr = (unsigned long)parent->image_base; + mmdp.ending_addr = (unsigned long)parent->image_base + + parent->image_size - 1; + parent_dp = &mmdp.header; + dp_len = sizeof(mmdp); + } else { + dp_len = device_path_length(parent_dp); + } + + // Allocate some pool memory for device path protocol data + status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, + 2 * (dp_len + sizeof(struct efi_rel_offset_dev_path) + + sizeof(struct efi_generic_dev_path)) + + sizeof(struct efi_vendor_dev_path), + (void **)&dp_alloc); + if (status != EFI_SUCCESS) { + log(L"Failed to allocate device path pool memory"); + return status; + } + + // Create a device path describing the compressed payload in this image + // <...parent_dp...>/Offset(<start>, <end>) + lf2_dp = memcpy(dp_alloc, parent_dp, dp_len); + dpp = (void *)((u8 *)lf2_dp + dp_len); + append_rel_offset_node(&dpp, + (unsigned long)(_gzdata_start - efi_zboot_header), + (unsigned long)(_gzdata_end - efi_zboot_header - 1)); + append_end_node(&dpp); + + // Create a device path describing the decompressed payload in this image + // <...parent_dp...>/Offset(<start>, <end>)/VenMedia(ZBOOT_MEDIA_GUID) + dp_len += sizeof(struct efi_rel_offset_dev_path); + li_dp = memcpy(dpp, lf2_dp, dp_len); + dpp = (void *)((u8 *)li_dp + dp_len); + append_ven_media_node(&dpp, &LINUX_EFI_ZBOOT_MEDIA_GUID); + append_end_node(&dpp); + + zboot_handle = NULL; + zboot_load_file2.load_file = load_file; + status = efi_bs_call(install_multiple_protocol_interfaces, + &zboot_handle, + &EFI_DEVICE_PATH_PROTOCOL_GUID, lf2_dp, + &EFI_LOAD_FILE2_PROTOCOL_GUID, &zboot_load_file2, + NULL); + if (status != EFI_SUCCESS) { + log(L"Failed to install LoadFile2 protocol and device path"); + goto free_dpalloc; + } + + status = efi_bs_call(load_image, false, handle, li_dp, NULL, 0, + &child_handle); + if (status != EFI_SUCCESS) { + log(L"Failed to load image"); + goto uninstall_lf2; + } + + status = efi_bs_call(handle_protocol, child_handle, + &LOADED_IMAGE_PROTOCOL_GUID, (void **)&child); + if (status != EFI_SUCCESS) { + log(L"Failed to locate child's loaded image protocol"); + goto unload_image; + } + + // Copy the kernel command line + child->load_options = parent->load_options; + child->load_options_size = parent->load_options_size; + + status = efi_bs_call(start_image, child_handle, &exit_data_size, + &exit_data); + if (status != EFI_SUCCESS) { + log(L"StartImage() returned with error"); + if (exit_data_size > 0) + log(exit_data); + + // If StartImage() returns EFI_SECURITY_VIOLATION, the image is + // not unloaded so we need to do it by hand. + if (status == EFI_SECURITY_VIOLATION) +unload_image: + efi_bs_call(unload_image, child_handle); + } + +uninstall_lf2: + efi_bs_call(uninstall_multiple_protocol_interfaces, + zboot_handle, + &EFI_DEVICE_PATH_PROTOCOL_GUID, lf2_dp, + &EFI_LOAD_FILE2_PROTOCOL_GUID, &zboot_load_file2, + NULL); + +free_dpalloc: + efi_bs_call(free_pool, dp_alloc); + + efi_bs_call(exit, handle, status, exit_data_size, exit_data); + + // Free ExitData in case Exit() returned with a failure code, + // but return the original status code. + log(L"Exit() returned with failure code"); + if (exit_data != NULL) + efi_bs_call(free_pool, exit_data); + return status; +} diff --git a/drivers/firmware/efi/libstub/zboot.lds b/drivers/firmware/efi/libstub/zboot.lds new file mode 100644 index 000000000000..93d33f68333b --- /dev/null +++ b/drivers/firmware/efi/libstub/zboot.lds @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +ENTRY(__efistub_efi_zboot_header); + +SECTIONS +{ + .head : ALIGN(4096) { + *(.head) + } + + .text : { + *(.text* .init.text*) + } + + .rodata : ALIGN(8) { + __efistub__gzdata_start = .; + *(.gzdata) + __efistub__gzdata_end = .; + *(.rodata* .init.rodata* .srodata*) + _etext = ALIGN(4096); + . = _etext; + } + + .data : ALIGN(4096) { + *(.data* .init.data*) + _edata = ALIGN(512); + . = _edata; + } + + .bss : { + *(.bss* .init.bss*) + _end = ALIGN(512); + . = _end; + } + + /DISCARD/ : { + *(.modinfo .init.modinfo) + } +} + +PROVIDE(__efistub__gzdata_size = + ABSOLUTE(__efistub__gzdata_end - __efistub__gzdata_start)); + +PROVIDE(__data_rawsize = ABSOLUTE(_edata - _etext)); +PROVIDE(__data_size = ABSOLUTE(_end - _etext)); diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c index 4df55a55da84..6ec7970dbd40 100644 --- a/drivers/firmware/efi/memmap.c +++ b/drivers/firmware/efi/memmap.c @@ -59,8 +59,7 @@ static void __init efi_memmap_free(void) * Depending on whether mm_init() has already been invoked or not, * either memblock or "normal" page allocation is used. * - * Returns the physical address of the allocated memory map on - * success, zero on failure. + * Returns zero on success, a negative error code on failure. */ int __init efi_memmap_alloc(unsigned int num_entries, struct efi_memory_map_data *data) @@ -245,7 +244,7 @@ int __init efi_memmap_install(struct efi_memory_map_data *data) * @range: Address range (start, end) to split around * * Returns the number of additional EFI memmap entries required to - * accomodate @range. + * accommodate @range. */ int __init efi_memmap_split_count(efi_memory_desc_t *md, struct range *range) { diff --git a/drivers/firmware/efi/mokvar-table.c b/drivers/firmware/efi/mokvar-table.c index 38722d2009e2..5ed0602c2f75 100644 --- a/drivers/firmware/efi/mokvar-table.c +++ b/drivers/firmware/efi/mokvar-table.c @@ -359,4 +359,4 @@ static int __init efi_mokvar_sysfs_init(void) } return err; } -device_initcall(efi_mokvar_sysfs_init); +fs_initcall(efi_mokvar_sysfs_init); diff --git a/drivers/firmware/efi/reboot.c b/drivers/firmware/efi/reboot.c index 73089a24f04b..ceae84c19d22 100644 --- a/drivers/firmware/efi/reboot.c +++ b/drivers/firmware/efi/reboot.c @@ -6,7 +6,7 @@ #include <linux/efi.h> #include <linux/reboot.h> -static void (*orig_pm_power_off)(void); +static struct sys_off_handler *efi_sys_off_handler; int efi_reboot_quirk_mode = -1; @@ -51,15 +51,11 @@ bool __weak efi_poweroff_required(void) return false; } -static void efi_power_off(void) +static int efi_power_off(struct sys_off_data *data) { efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL); - /* - * The above call should not return, if it does fall back to - * the original power off method (typically ACPI poweroff). - */ - if (orig_pm_power_off) - orig_pm_power_off(); + + return NOTIFY_DONE; } static int __init efi_shutdown_init(void) @@ -68,8 +64,13 @@ static int __init efi_shutdown_init(void) return -ENODEV; if (efi_poweroff_required()) { - orig_pm_power_off = pm_power_off; - pm_power_off = efi_power_off; + /* SYS_OFF_PRIO_FIRMWARE + 1 so that it runs before acpi_power_off */ + efi_sys_off_handler = + register_sys_off_handler(SYS_OFF_MODE_POWER_OFF, + SYS_OFF_PRIO_FIRMWARE + 1, + efi_power_off, NULL); + if (IS_ERR(efi_sys_off_handler)) + return PTR_ERR(efi_sys_off_handler); } return 0; diff --git a/drivers/firmware/efi/riscv-runtime.c b/drivers/firmware/efi/riscv-runtime.c index d28e715d2bcc..d0daacd2c903 100644 --- a/drivers/firmware/efi/riscv-runtime.c +++ b/drivers/firmware/efi/riscv-runtime.c @@ -41,7 +41,7 @@ static bool __init efi_virtmap_init(void) if (!(md->attribute & EFI_MEMORY_RUNTIME)) continue; - if (md->virt_addr == 0) + if (md->virt_addr == U64_MAX) return false; ret = efi_create_mapping(&efi_mm, md); diff --git a/drivers/firmware/efi/runtime-map.c b/drivers/firmware/efi/runtime-map.c index ad9ddefc9dcb..92a3d45a795c 100644 --- a/drivers/firmware/efi/runtime-map.c +++ b/drivers/firmware/efi/runtime-map.c @@ -79,6 +79,7 @@ static struct attribute *def_attrs[] = { &map_attribute_attr.attr, NULL }; +ATTRIBUTE_GROUPS(def); static const struct sysfs_ops map_attr_ops = { .show = map_attr_show, @@ -94,7 +95,7 @@ static void map_release(struct kobject *kobj) static struct kobj_type __refdata map_ktype = { .sysfs_ops = &map_attr_ops, - .default_attrs = def_attrs, + .default_groups = def_groups, .release = map_release, }; diff --git a/drivers/firmware/efi/sysfb_efi.c b/drivers/firmware/efi/sysfb_efi.c index 4c7c9dd7733f..7882d4b3f2be 100644 --- a/drivers/firmware/efi/sysfb_efi.c +++ b/drivers/firmware/efi/sysfb_efi.c @@ -26,8 +26,6 @@ #include <linux/sysfb.h> #include <video/vga.h> -#include <asm/efi.h> - enum { OVERRIDE_NONE = 0x0, OVERRIDE_BASE = 0x1, diff --git a/drivers/firmware/efi/tpm.c b/drivers/firmware/efi/tpm.c index 8f665678e9e3..e8d69bd548f3 100644 --- a/drivers/firmware/efi/tpm.c +++ b/drivers/firmware/efi/tpm.c @@ -97,7 +97,7 @@ int __init efi_tpm_eventlog_init(void) goto out_calc; } - memblock_reserve((unsigned long)final_tbl, + memblock_reserve(efi.tpm_final_log, tbl_size + sizeof(*final_tbl)); efi_tpm_final_log_size = tbl_size; diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c index abdc8a6a3963..0ba9f18312f5 100644 --- a/drivers/firmware/efi/vars.c +++ b/drivers/firmware/efi/vars.c @@ -6,1214 +6,238 @@ * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com> */ -#include <linux/capability.h> #include <linux/types.h> +#include <linux/sizes.h> #include <linux/errno.h> #include <linux/init.h> -#include <linux/mm.h> #include <linux/module.h> #include <linux/string.h> #include <linux/smp.h> #include <linux/efi.h> -#include <linux/sysfs.h> -#include <linux/device.h> -#include <linux/slab.h> -#include <linux/ctype.h> #include <linux/ucs2_string.h> /* Private pointer to registered efivars */ static struct efivars *__efivars; -/* - * efivars_lock protects three things: - * 1) efivarfs_list and efivars_sysfs_list - * 2) ->ops calls - * 3) (un)registration of __efivars - */ static DEFINE_SEMAPHORE(efivars_lock); -static bool -validate_device_path(efi_char16_t *var_name, int match, u8 *buffer, - unsigned long len) -{ - struct efi_generic_dev_path *node; - int offset = 0; - - node = (struct efi_generic_dev_path *)buffer; - - if (len < sizeof(*node)) - return false; - - while (offset <= len - sizeof(*node) && - node->length >= sizeof(*node) && - node->length <= len - offset) { - offset += node->length; - - if ((node->type == EFI_DEV_END_PATH || - node->type == EFI_DEV_END_PATH2) && - node->sub_type == EFI_DEV_END_ENTIRE) - return true; - - node = (struct efi_generic_dev_path *)(buffer + offset); - } - - /* - * If we're here then either node->length pointed past the end - * of the buffer or we reached the end of the buffer without - * finding a device path end node. - */ - return false; -} - -static bool -validate_boot_order(efi_char16_t *var_name, int match, u8 *buffer, - unsigned long len) -{ - /* An array of 16-bit integers */ - if ((len % 2) != 0) - return false; - - return true; -} - -static bool -validate_load_option(efi_char16_t *var_name, int match, u8 *buffer, - unsigned long len) -{ - u16 filepathlength; - int i, desclength = 0, namelen; - - namelen = ucs2_strnlen(var_name, EFI_VAR_NAME_LEN); - - /* Either "Boot" or "Driver" followed by four digits of hex */ - for (i = match; i < match+4; i++) { - if (var_name[i] > 127 || - hex_to_bin(var_name[i] & 0xff) < 0) - return true; - } - - /* Reject it if there's 4 digits of hex and then further content */ - if (namelen > match + 4) - return false; - - /* A valid entry must be at least 8 bytes */ - if (len < 8) - return false; - - filepathlength = buffer[4] | buffer[5] << 8; - - /* - * There's no stored length for the description, so it has to be - * found by hand - */ - desclength = ucs2_strsize((efi_char16_t *)(buffer + 6), len - 6) + 2; - - /* Each boot entry must have a descriptor */ - if (!desclength) - return false; - - /* - * If the sum of the length of the description, the claimed filepath - * length and the original header are greater than the length of the - * variable, it's malformed - */ - if ((desclength + filepathlength + 6) > len) - return false; - - /* - * And, finally, check the filepath - */ - return validate_device_path(var_name, match, buffer + desclength + 6, - filepathlength); -} - -static bool -validate_uint16(efi_char16_t *var_name, int match, u8 *buffer, - unsigned long len) -{ - /* A single 16-bit integer */ - if (len != 2) - return false; - - return true; -} - -static bool -validate_ascii_string(efi_char16_t *var_name, int match, u8 *buffer, - unsigned long len) -{ - int i; - - for (i = 0; i < len; i++) { - if (buffer[i] > 127) - return false; - - if (buffer[i] == 0) - return true; - } - - return false; -} - -struct variable_validate { - efi_guid_t vendor; - char *name; - bool (*validate)(efi_char16_t *var_name, int match, u8 *data, - unsigned long len); -}; - -/* - * This is the list of variables we need to validate, as well as the - * whitelist for what we think is safe not to default to immutable. - * - * If it has a validate() method that's not NULL, it'll go into the - * validation routine. If not, it is assumed valid, but still used for - * whitelisting. - * - * Note that it's sorted by {vendor,name}, but globbed names must come after - * any other name with the same prefix. - */ -static const struct variable_validate variable_validate[] = { - { EFI_GLOBAL_VARIABLE_GUID, "BootNext", validate_uint16 }, - { EFI_GLOBAL_VARIABLE_GUID, "BootOrder", validate_boot_order }, - { EFI_GLOBAL_VARIABLE_GUID, "Boot*", validate_load_option }, - { EFI_GLOBAL_VARIABLE_GUID, "DriverOrder", validate_boot_order }, - { EFI_GLOBAL_VARIABLE_GUID, "Driver*", validate_load_option }, - { EFI_GLOBAL_VARIABLE_GUID, "ConIn", validate_device_path }, - { EFI_GLOBAL_VARIABLE_GUID, "ConInDev", validate_device_path }, - { EFI_GLOBAL_VARIABLE_GUID, "ConOut", validate_device_path }, - { EFI_GLOBAL_VARIABLE_GUID, "ConOutDev", validate_device_path }, - { EFI_GLOBAL_VARIABLE_GUID, "ErrOut", validate_device_path }, - { EFI_GLOBAL_VARIABLE_GUID, "ErrOutDev", validate_device_path }, - { EFI_GLOBAL_VARIABLE_GUID, "Lang", validate_ascii_string }, - { EFI_GLOBAL_VARIABLE_GUID, "OsIndications", NULL }, - { EFI_GLOBAL_VARIABLE_GUID, "PlatformLang", validate_ascii_string }, - { EFI_GLOBAL_VARIABLE_GUID, "Timeout", validate_uint16 }, - { LINUX_EFI_CRASH_GUID, "*", NULL }, - { NULL_GUID, "", NULL }, -}; - -/* - * Check if @var_name matches the pattern given in @match_name. - * - * @var_name: an array of @len non-NUL characters. - * @match_name: a NUL-terminated pattern string, optionally ending in "*". A - * final "*" character matches any trailing characters @var_name, - * including the case when there are none left in @var_name. - * @match: on output, the number of non-wildcard characters in @match_name - * that @var_name matches, regardless of the return value. - * @return: whether @var_name fully matches @match_name. - */ -static bool -variable_matches(const char *var_name, size_t len, const char *match_name, - int *match) -{ - for (*match = 0; ; (*match)++) { - char c = match_name[*match]; - - switch (c) { - case '*': - /* Wildcard in @match_name means we've matched. */ - return true; - - case '\0': - /* @match_name has ended. Has @var_name too? */ - return (*match == len); - - default: - /* - * We've reached a non-wildcard char in @match_name. - * Continue only if there's an identical character in - * @var_name. - */ - if (*match < len && c == var_name[*match]) - continue; - return false; - } - } -} - -bool -efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data, - unsigned long data_size) -{ - int i; - unsigned long utf8_size; - u8 *utf8_name; - - utf8_size = ucs2_utf8size(var_name); - utf8_name = kmalloc(utf8_size + 1, GFP_KERNEL); - if (!utf8_name) - return false; - - ucs2_as_utf8(utf8_name, var_name, utf8_size); - utf8_name[utf8_size] = '\0'; - - for (i = 0; variable_validate[i].name[0] != '\0'; i++) { - const char *name = variable_validate[i].name; - int match = 0; - - if (efi_guidcmp(vendor, variable_validate[i].vendor)) - continue; - - if (variable_matches(utf8_name, utf8_size+1, name, &match)) { - if (variable_validate[i].validate == NULL) - break; - kfree(utf8_name); - return variable_validate[i].validate(var_name, match, - data, data_size); - } - } - kfree(utf8_name); - return true; -} -EXPORT_SYMBOL_GPL(efivar_validate); - -bool -efivar_variable_is_removable(efi_guid_t vendor, const char *var_name, - size_t len) -{ - int i; - bool found = false; - int match = 0; - - /* - * Check if our variable is in the validated variables list - */ - for (i = 0; variable_validate[i].name[0] != '\0'; i++) { - if (efi_guidcmp(variable_validate[i].vendor, vendor)) - continue; - - if (variable_matches(var_name, len, - variable_validate[i].name, &match)) { - found = true; - break; - } - } - - /* - * If it's in our list, it is removable. - */ - return found; -} -EXPORT_SYMBOL_GPL(efivar_variable_is_removable); - -static efi_status_t -check_var_size(u32 attributes, unsigned long size) +static efi_status_t check_var_size(bool nonblocking, u32 attributes, + unsigned long size) { const struct efivar_operations *fops; - - if (!__efivars) - return EFI_UNSUPPORTED; - - fops = __efivars->ops; - - if (!fops->query_variable_store) - return EFI_UNSUPPORTED; - - return fops->query_variable_store(attributes, size, false); -} - -static efi_status_t -check_var_size_nonblocking(u32 attributes, unsigned long size) -{ - const struct efivar_operations *fops; - - if (!__efivars) - return EFI_UNSUPPORTED; + efi_status_t status; fops = __efivars->ops; if (!fops->query_variable_store) - return EFI_UNSUPPORTED; - - return fops->query_variable_store(attributes, size, true); -} - -static bool variable_is_present(efi_char16_t *variable_name, efi_guid_t *vendor, - struct list_head *head) -{ - struct efivar_entry *entry, *n; - unsigned long strsize1, strsize2; - bool found = false; - - strsize1 = ucs2_strsize(variable_name, 1024); - list_for_each_entry_safe(entry, n, head, list) { - strsize2 = ucs2_strsize(entry->var.VariableName, 1024); - if (strsize1 == strsize2 && - !memcmp(variable_name, &(entry->var.VariableName), - strsize2) && - !efi_guidcmp(entry->var.VendorGuid, - *vendor)) { - found = true; - break; - } - } - return found; -} - -/* - * Returns the size of variable_name, in bytes, including the - * terminating NULL character, or variable_name_size if no NULL - * character is found among the first variable_name_size bytes. - */ -static unsigned long var_name_strnsize(efi_char16_t *variable_name, - unsigned long variable_name_size) -{ - unsigned long len; - efi_char16_t c; - - /* - * The variable name is, by definition, a NULL-terminated - * string, so make absolutely sure that variable_name_size is - * the value we expect it to be. If not, return the real size. - */ - for (len = 2; len <= variable_name_size; len += sizeof(c)) { - c = variable_name[(len / sizeof(c)) - 1]; - if (!c) - break; - } - - return min(len, variable_name_size); -} - -/* - * Print a warning when duplicate EFI variables are encountered and - * disable the sysfs workqueue since the firmware is buggy. - */ -static void dup_variable_bug(efi_char16_t *str16, efi_guid_t *vendor_guid, - unsigned long len16) -{ - size_t i, len8 = len16 / sizeof(efi_char16_t); - char *str8; - - str8 = kzalloc(len8, GFP_KERNEL); - if (!str8) - return; - - for (i = 0; i < len8; i++) - str8[i] = str16[i]; - - printk(KERN_WARNING "efivars: duplicate variable: %s-%pUl\n", - str8, vendor_guid); - kfree(str8); + status = EFI_UNSUPPORTED; + else + status = fops->query_variable_store(attributes, size, + nonblocking); + if (status == EFI_UNSUPPORTED) + return (size <= SZ_64K) ? EFI_SUCCESS : EFI_OUT_OF_RESOURCES; + return status; } /** - * efivar_init - build the initial list of EFI variables - * @func: callback function to invoke for every variable - * @data: function-specific data to pass to @func - * @duplicates: error if we encounter duplicates on @head? - * @head: initialised head of variable list - * - * Get every EFI variable from the firmware and invoke @func. @func - * should call efivar_entry_add() to build the list of variables. + * efivars_kobject - get the kobject for the registered efivars * - * Returns 0 on success, or a kernel error code on failure. + * If efivars_register() has not been called we return NULL, + * otherwise return the kobject used at registration time. */ -int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *), - void *data, bool duplicates, struct list_head *head) +struct kobject *efivars_kobject(void) { - const struct efivar_operations *ops; - unsigned long variable_name_size = 1024; - efi_char16_t *variable_name; - efi_status_t status; - efi_guid_t vendor_guid; - int err = 0; - if (!__efivars) - return -EFAULT; - - ops = __efivars->ops; - - variable_name = kzalloc(variable_name_size, GFP_KERNEL); - if (!variable_name) { - printk(KERN_ERR "efivars: Memory allocation failed.\n"); - return -ENOMEM; - } - - if (down_interruptible(&efivars_lock)) { - err = -EINTR; - goto free; - } - - /* - * Per EFI spec, the maximum storage allocated for both - * the variable name and variable data is 1024 bytes. - */ - - do { - variable_name_size = 1024; - - status = ops->get_next_variable(&variable_name_size, - variable_name, - &vendor_guid); - switch (status) { - case EFI_SUCCESS: - if (duplicates) - up(&efivars_lock); - - variable_name_size = var_name_strnsize(variable_name, - variable_name_size); - - /* - * Some firmware implementations return the - * same variable name on multiple calls to - * get_next_variable(). Terminate the loop - * immediately as there is no guarantee that - * we'll ever see a different variable name, - * and may end up looping here forever. - */ - if (duplicates && - variable_is_present(variable_name, &vendor_guid, - head)) { - dup_variable_bug(variable_name, &vendor_guid, - variable_name_size); - status = EFI_NOT_FOUND; - } else { - err = func(variable_name, vendor_guid, - variable_name_size, data); - if (err) - status = EFI_NOT_FOUND; - } - - if (duplicates) { - if (down_interruptible(&efivars_lock)) { - err = -EINTR; - goto free; - } - } - - break; - case EFI_UNSUPPORTED: - err = -EOPNOTSUPP; - status = EFI_NOT_FOUND; - break; - case EFI_NOT_FOUND: - break; - default: - printk(KERN_WARNING "efivars: get_next_variable: status=%lx\n", - status); - status = EFI_NOT_FOUND; - break; - } - - } while (status != EFI_NOT_FOUND); - - up(&efivars_lock); -free: - kfree(variable_name); + return NULL; - return err; + return __efivars->kobject; } -EXPORT_SYMBOL_GPL(efivar_init); +EXPORT_SYMBOL_GPL(efivars_kobject); /** - * efivar_entry_add - add entry to variable list - * @entry: entry to add to list - * @head: list head + * efivars_register - register an efivars + * @efivars: efivars to register + * @ops: efivars operations + * @kobject: @efivars-specific kobject * - * Returns 0 on success, or a kernel error code on failure. + * Only a single efivars can be registered at any time. */ -int efivar_entry_add(struct efivar_entry *entry, struct list_head *head) +int efivars_register(struct efivars *efivars, + const struct efivar_operations *ops, + struct kobject *kobject) { if (down_interruptible(&efivars_lock)) return -EINTR; - list_add(&entry->list, head); - up(&efivars_lock); - return 0; -} -EXPORT_SYMBOL_GPL(efivar_entry_add); + efivars->ops = ops; + efivars->kobject = kobject; -/** - * efivar_entry_remove - remove entry from variable list - * @entry: entry to remove from list - * - * Returns 0 on success, or a kernel error code on failure. - */ -int efivar_entry_remove(struct efivar_entry *entry) -{ - if (down_interruptible(&efivars_lock)) - return -EINTR; - list_del(&entry->list); - up(&efivars_lock); + __efivars = efivars; - return 0; -} -EXPORT_SYMBOL_GPL(efivar_entry_remove); + pr_info("Registered efivars operations\n"); -/* - * efivar_entry_list_del_unlock - remove entry from variable list - * @entry: entry to remove - * - * Remove @entry from the variable list and release the list lock. - * - * NOTE: slightly weird locking semantics here - we expect to be - * called with the efivars lock already held, and we release it before - * returning. This is because this function is usually called after - * set_variable() while the lock is still held. - */ -static void efivar_entry_list_del_unlock(struct efivar_entry *entry) -{ - list_del(&entry->list); up(&efivars_lock); -} - -/** - * __efivar_entry_delete - delete an EFI variable - * @entry: entry containing EFI variable to delete - * - * Delete the variable from the firmware but leave @entry on the - * variable list. - * - * This function differs from efivar_entry_delete() because it does - * not remove @entry from the variable list. Also, it is safe to be - * called from within a efivar_entry_iter_begin() and - * efivar_entry_iter_end() region, unlike efivar_entry_delete(). - * - * Returns 0 on success, or a converted EFI status code if - * set_variable() fails. - */ -int __efivar_entry_delete(struct efivar_entry *entry) -{ - efi_status_t status; - - if (!__efivars) - return -EINVAL; - - status = __efivars->ops->set_variable(entry->var.VariableName, - &entry->var.VendorGuid, - 0, 0, NULL); - - return efi_status_to_err(status); -} -EXPORT_SYMBOL_GPL(__efivar_entry_delete); - -/** - * efivar_entry_delete - delete variable and remove entry from list - * @entry: entry containing variable to delete - * - * Delete the variable from the firmware and remove @entry from the - * variable list. It is the caller's responsibility to free @entry - * once we return. - * - * Returns 0 on success, -EINTR if we can't grab the semaphore, - * converted EFI status code if set_variable() fails. - */ -int efivar_entry_delete(struct efivar_entry *entry) -{ - const struct efivar_operations *ops; - efi_status_t status; - if (down_interruptible(&efivars_lock)) - return -EINTR; - - if (!__efivars) { - up(&efivars_lock); - return -EINVAL; - } - ops = __efivars->ops; - status = ops->set_variable(entry->var.VariableName, - &entry->var.VendorGuid, - 0, 0, NULL); - if (!(status == EFI_SUCCESS || status == EFI_NOT_FOUND)) { - up(&efivars_lock); - return efi_status_to_err(status); - } - - efivar_entry_list_del_unlock(entry); return 0; } -EXPORT_SYMBOL_GPL(efivar_entry_delete); +EXPORT_SYMBOL_GPL(efivars_register); /** - * efivar_entry_set - call set_variable() - * @entry: entry containing the EFI variable to write - * @attributes: variable attributes - * @size: size of @data buffer - * @data: buffer containing variable data - * @head: head of variable list - * - * Calls set_variable() for an EFI variable. If creating a new EFI - * variable, this function is usually followed by efivar_entry_add(). - * - * Before writing the variable, the remaining EFI variable storage - * space is checked to ensure there is enough room available. - * - * If @head is not NULL a lookup is performed to determine whether - * the entry is already on the list. + * efivars_unregister - unregister an efivars + * @efivars: efivars to unregister * - * Returns 0 on success, -EINTR if we can't grab the semaphore, - * -EEXIST if a lookup is performed and the entry already exists on - * the list, or a converted EFI status code if set_variable() fails. + * The caller must have already removed every entry from the list, + * failure to do so is an error. */ -int efivar_entry_set(struct efivar_entry *entry, u32 attributes, - unsigned long size, void *data, struct list_head *head) +int efivars_unregister(struct efivars *efivars) { - const struct efivar_operations *ops; - efi_status_t status; - efi_char16_t *name = entry->var.VariableName; - efi_guid_t vendor = entry->var.VendorGuid; + int rv; if (down_interruptible(&efivars_lock)) return -EINTR; if (!__efivars) { - up(&efivars_lock); - return -EINVAL; - } - ops = __efivars->ops; - if (head && efivar_entry_find(name, vendor, head, false)) { - up(&efivars_lock); - return -EEXIST; - } - - status = check_var_size(attributes, size + ucs2_strsize(name, 1024)); - if (status == EFI_SUCCESS || status == EFI_UNSUPPORTED) - status = ops->set_variable(name, &vendor, - attributes, size, data); - - up(&efivars_lock); - - return efi_status_to_err(status); - -} -EXPORT_SYMBOL_GPL(efivar_entry_set); - -/* - * efivar_entry_set_nonblocking - call set_variable_nonblocking() - * - * This function is guaranteed to not block and is suitable for calling - * from crash/panic handlers. - * - * Crucially, this function will not block if it cannot acquire - * efivars_lock. Instead, it returns -EBUSY. - */ -static int -efivar_entry_set_nonblocking(efi_char16_t *name, efi_guid_t vendor, - u32 attributes, unsigned long size, void *data) -{ - const struct efivar_operations *ops; - efi_status_t status; - - if (down_trylock(&efivars_lock)) - return -EBUSY; - - if (!__efivars) { - up(&efivars_lock); - return -EINVAL; - } - - status = check_var_size_nonblocking(attributes, - size + ucs2_strsize(name, 1024)); - if (status != EFI_SUCCESS) { - up(&efivars_lock); - return -ENOSPC; - } - - ops = __efivars->ops; - status = ops->set_variable_nonblocking(name, &vendor, attributes, - size, data); - - up(&efivars_lock); - return efi_status_to_err(status); -} - -/** - * efivar_entry_set_safe - call set_variable() if enough space in firmware - * @name: buffer containing the variable name - * @vendor: variable vendor guid - * @attributes: variable attributes - * @block: can we block in this context? - * @size: size of @data buffer - * @data: buffer containing variable data - * - * Ensures there is enough free storage in the firmware for this variable, and - * if so, calls set_variable(). If creating a new EFI variable, this function - * is usually followed by efivar_entry_add(). - * - * Returns 0 on success, -ENOSPC if the firmware does not have enough - * space for set_variable() to succeed, or a converted EFI status code - * if set_variable() fails. - */ -int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes, - bool block, unsigned long size, void *data) -{ - const struct efivar_operations *ops; - efi_status_t status; - - if (!__efivars) - return -EINVAL; - - ops = __efivars->ops; - if (!ops->query_variable_store) - return -ENOSYS; - - /* - * If the EFI variable backend provides a non-blocking - * ->set_variable() operation and we're in a context where we - * cannot block, then we need to use it to avoid live-locks, - * since the implication is that the regular ->set_variable() - * will block. - * - * If no ->set_variable_nonblocking() is provided then - * ->set_variable() is assumed to be non-blocking. - */ - if (!block && ops->set_variable_nonblocking) - return efivar_entry_set_nonblocking(name, vendor, attributes, - size, data); - - if (!block) { - if (down_trylock(&efivars_lock)) - return -EBUSY; - } else { - if (down_interruptible(&efivars_lock)) - return -EINTR; + printk(KERN_ERR "efivars not registered\n"); + rv = -EINVAL; + goto out; } - status = check_var_size(attributes, size + ucs2_strsize(name, 1024)); - if (status != EFI_SUCCESS) { - up(&efivars_lock); - return -ENOSPC; + if (__efivars != efivars) { + rv = -EINVAL; + goto out; } - status = ops->set_variable(name, &vendor, attributes, size, data); + pr_info("Unregistered efivars operations\n"); + __efivars = NULL; + rv = 0; +out: up(&efivars_lock); - - return efi_status_to_err(status); + return rv; } -EXPORT_SYMBOL_GPL(efivar_entry_set_safe); +EXPORT_SYMBOL_GPL(efivars_unregister); -/** - * efivar_entry_find - search for an entry - * @name: the EFI variable name - * @guid: the EFI variable vendor's guid - * @head: head of the variable list - * @remove: should we remove the entry from the list? - * - * Search for an entry on the variable list that has the EFI variable - * name @name and vendor guid @guid. If an entry is found on the list - * and @remove is true, the entry is removed from the list. - * - * The caller MUST call efivar_entry_iter_begin() and - * efivar_entry_iter_end() before and after the invocation of this - * function, respectively. - * - * Returns the entry if found on the list, %NULL otherwise. - */ -struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid, - struct list_head *head, bool remove) +int efivar_supports_writes(void) { - struct efivar_entry *entry, *n; - int strsize1, strsize2; - bool found = false; - - list_for_each_entry_safe(entry, n, head, list) { - strsize1 = ucs2_strsize(name, 1024); - strsize2 = ucs2_strsize(entry->var.VariableName, 1024); - if (strsize1 == strsize2 && - !memcmp(name, &(entry->var.VariableName), strsize1) && - !efi_guidcmp(guid, entry->var.VendorGuid)) { - found = true; - break; - } - } - - if (!found) - return NULL; - - if (remove) { - if (entry->scanning) { - /* - * The entry will be deleted - * after scanning is completed. - */ - entry->deleting = true; - } else - list_del(&entry->list); - } - - return entry; + return __efivars && __efivars->ops->set_variable; } -EXPORT_SYMBOL_GPL(efivar_entry_find); +EXPORT_SYMBOL_GPL(efivar_supports_writes); -/** - * efivar_entry_size - obtain the size of a variable - * @entry: entry for this variable - * @size: location to store the variable's size +/* + * efivar_lock() - obtain the efivar lock, wait for it if needed + * @return 0 on success, error code on failure */ -int efivar_entry_size(struct efivar_entry *entry, unsigned long *size) +int efivar_lock(void) { - const struct efivar_operations *ops; - efi_status_t status; - - *size = 0; - if (down_interruptible(&efivars_lock)) return -EINTR; - if (!__efivars) { + if (!__efivars->ops) { up(&efivars_lock); - return -EINVAL; + return -ENODEV; } - ops = __efivars->ops; - status = ops->get_variable(entry->var.VariableName, - &entry->var.VendorGuid, NULL, size, NULL); - up(&efivars_lock); - - if (status != EFI_BUFFER_TOO_SMALL) - return efi_status_to_err(status); - return 0; } -EXPORT_SYMBOL_GPL(efivar_entry_size); - -/** - * __efivar_entry_get - call get_variable() - * @entry: read data for this variable - * @attributes: variable attributes - * @size: size of @data buffer - * @data: buffer to store variable data - * - * The caller MUST call efivar_entry_iter_begin() and - * efivar_entry_iter_end() before and after the invocation of this - * function, respectively. - */ -int __efivar_entry_get(struct efivar_entry *entry, u32 *attributes, - unsigned long *size, void *data) -{ - efi_status_t status; - - if (!__efivars) - return -EINVAL; - - status = __efivars->ops->get_variable(entry->var.VariableName, - &entry->var.VendorGuid, - attributes, size, data); - - return efi_status_to_err(status); -} -EXPORT_SYMBOL_GPL(__efivar_entry_get); +EXPORT_SYMBOL_NS_GPL(efivar_lock, EFIVAR); -/** - * efivar_entry_get - call get_variable() - * @entry: read data for this variable - * @attributes: variable attributes - * @size: size of @data buffer - * @data: buffer to store variable data +/* + * efivar_lock() - obtain the efivar lock if it is free + * @return 0 on success, error code on failure */ -int efivar_entry_get(struct efivar_entry *entry, u32 *attributes, - unsigned long *size, void *data) +int efivar_trylock(void) { - efi_status_t status; - - if (down_interruptible(&efivars_lock)) - return -EINTR; - - if (!__efivars) { + if (down_trylock(&efivars_lock)) + return -EBUSY; + if (!__efivars->ops) { up(&efivars_lock); - return -EINVAL; + return -ENODEV; } - - status = __efivars->ops->get_variable(entry->var.VariableName, - &entry->var.VendorGuid, - attributes, size, data); - up(&efivars_lock); - - return efi_status_to_err(status); -} -EXPORT_SYMBOL_GPL(efivar_entry_get); - -/** - * efivar_entry_set_get_size - call set_variable() and get new size (atomic) - * @entry: entry containing variable to set and get - * @attributes: attributes of variable to be written - * @size: size of data buffer - * @data: buffer containing data to write - * @set: did the set_variable() call succeed? - * - * This is a pretty special (complex) function. See efivarfs_file_write(). - * - * Atomically call set_variable() for @entry and if the call is - * successful, return the new size of the variable from get_variable() - * in @size. The success of set_variable() is indicated by @set. - * - * Returns 0 on success, -EINVAL if the variable data is invalid, - * -ENOSPC if the firmware does not have enough available space, or a - * converted EFI status code if either of set_variable() or - * get_variable() fail. - * - * If the EFI variable does not exist when calling set_variable() - * (EFI_NOT_FOUND), @entry is removed from the variable list. - */ -int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes, - unsigned long *size, void *data, bool *set) -{ - const struct efivar_operations *ops; - efi_char16_t *name = entry->var.VariableName; - efi_guid_t *vendor = &entry->var.VendorGuid; - efi_status_t status; - int err; - - *set = false; - - if (efivar_validate(*vendor, name, data, *size) == false) - return -EINVAL; - - /* - * The lock here protects the get_variable call, the conditional - * set_variable call, and removal of the variable from the efivars - * list (in the case of an authenticated delete). - */ - if (down_interruptible(&efivars_lock)) - return -EINTR; - - if (!__efivars) { - err = -EINVAL; - goto out; - } - - /* - * Ensure that the available space hasn't shrunk below the safe level - */ - status = check_var_size(attributes, *size + ucs2_strsize(name, 1024)); - if (status != EFI_SUCCESS) { - if (status != EFI_UNSUPPORTED) { - err = efi_status_to_err(status); - goto out; - } - - if (*size > 65536) { - err = -ENOSPC; - goto out; - } - } - - ops = __efivars->ops; - - status = ops->set_variable(name, vendor, attributes, *size, data); - if (status != EFI_SUCCESS) { - err = efi_status_to_err(status); - goto out; - } - - *set = true; - - /* - * Writing to the variable may have caused a change in size (which - * could either be an append or an overwrite), or the variable to be - * deleted. Perform a GetVariable() so we can tell what actually - * happened. - */ - *size = 0; - status = ops->get_variable(entry->var.VariableName, - &entry->var.VendorGuid, - NULL, size, NULL); - - if (status == EFI_NOT_FOUND) - efivar_entry_list_del_unlock(entry); - else - up(&efivars_lock); - - if (status && status != EFI_BUFFER_TOO_SMALL) - return efi_status_to_err(status); - return 0; - -out: - up(&efivars_lock); - return err; - -} -EXPORT_SYMBOL_GPL(efivar_entry_set_get_size); - -/** - * efivar_entry_iter_begin - begin iterating the variable list - * - * Lock the variable list to prevent entry insertion and removal until - * efivar_entry_iter_end() is called. This function is usually used in - * conjunction with __efivar_entry_iter() or efivar_entry_iter(). - */ -int efivar_entry_iter_begin(void) -{ - return down_interruptible(&efivars_lock); } -EXPORT_SYMBOL_GPL(efivar_entry_iter_begin); +EXPORT_SYMBOL_NS_GPL(efivar_trylock, EFIVAR); -/** - * efivar_entry_iter_end - finish iterating the variable list - * - * Unlock the variable list and allow modifications to the list again. +/* + * efivar_unlock() - release the efivar lock */ -void efivar_entry_iter_end(void) +void efivar_unlock(void) { up(&efivars_lock); } -EXPORT_SYMBOL_GPL(efivar_entry_iter_end); +EXPORT_SYMBOL_NS_GPL(efivar_unlock, EFIVAR); -/** - * __efivar_entry_iter - iterate over variable list - * @func: callback function - * @head: head of the variable list - * @data: function-specific data to pass to callback - * @prev: entry to begin iterating from - * - * Iterate over the list of EFI variables and call @func with every - * entry on the list. It is safe for @func to remove entries in the - * list via efivar_entry_delete(). - * - * You MUST call efivar_entry_iter_begin() before this function, and - * efivar_entry_iter_end() afterwards. - * - * It is possible to begin iteration from an arbitrary entry within - * the list by passing @prev. @prev is updated on return to point to - * the last entry passed to @func. To begin iterating from the - * beginning of the list @prev must be %NULL. - * - * The restrictions for @func are the same as documented for - * efivar_entry_iter(). - */ -int __efivar_entry_iter(int (*func)(struct efivar_entry *, void *), - struct list_head *head, void *data, - struct efivar_entry **prev) -{ - struct efivar_entry *entry, *n; - int err = 0; - - if (!prev || !*prev) { - list_for_each_entry_safe(entry, n, head, list) { - err = func(entry, data); - if (err) - break; - } - - if (prev) - *prev = entry; - - return err; - } - - - list_for_each_entry_safe_continue((*prev), n, head, list) { - err = func(*prev, data); - if (err) - break; - } - - return err; -} -EXPORT_SYMBOL_GPL(__efivar_entry_iter); - -/** - * efivar_entry_iter - iterate over variable list - * @func: callback function - * @head: head of variable list - * @data: function-specific data to pass to callback - * - * Iterate over the list of EFI variables and call @func with every - * entry on the list. It is safe for @func to remove entries in the - * list via efivar_entry_delete() while iterating. +/* + * efivar_get_variable() - retrieve a variable identified by name/vendor * - * Some notes for the callback function: - * - a non-zero return value indicates an error and terminates the loop - * - @func is called from atomic context + * Must be called with efivars_lock held. */ -int efivar_entry_iter(int (*func)(struct efivar_entry *, void *), - struct list_head *head, void *data) +efi_status_t efivar_get_variable(efi_char16_t *name, efi_guid_t *vendor, + u32 *attr, unsigned long *size, void *data) { - int err = 0; - - err = efivar_entry_iter_begin(); - if (err) - return err; - err = __efivar_entry_iter(func, head, data, NULL); - efivar_entry_iter_end(); - - return err; + return __efivars->ops->get_variable(name, vendor, attr, size, data); } -EXPORT_SYMBOL_GPL(efivar_entry_iter); +EXPORT_SYMBOL_NS_GPL(efivar_get_variable, EFIVAR); -/** - * efivars_kobject - get the kobject for the registered efivars +/* + * efivar_get_next_variable() - enumerate the next name/vendor pair * - * If efivars_register() has not been called we return NULL, - * otherwise return the kobject used at registration time. + * Must be called with efivars_lock held. */ -struct kobject *efivars_kobject(void) +efi_status_t efivar_get_next_variable(unsigned long *name_size, + efi_char16_t *name, efi_guid_t *vendor) { - if (!__efivars) - return NULL; - - return __efivars->kobject; + return __efivars->ops->get_next_variable(name_size, name, vendor); } -EXPORT_SYMBOL_GPL(efivars_kobject); +EXPORT_SYMBOL_NS_GPL(efivar_get_next_variable, EFIVAR); -/** - * efivars_register - register an efivars - * @efivars: efivars to register - * @ops: efivars operations - * @kobject: @efivars-specific kobject +/* + * efivar_set_variable_locked() - set a variable identified by name/vendor * - * Only a single efivars can be registered at any time. + * Must be called with efivars_lock held. If @nonblocking is set, it will use + * non-blocking primitives so it is guaranteed not to sleep. */ -int efivars_register(struct efivars *efivars, - const struct efivar_operations *ops, - struct kobject *kobject) +efi_status_t efivar_set_variable_locked(efi_char16_t *name, efi_guid_t *vendor, + u32 attr, unsigned long data_size, + void *data, bool nonblocking) { - if (down_interruptible(&efivars_lock)) - return -EINTR; - - efivars->ops = ops; - efivars->kobject = kobject; - - __efivars = efivars; + efi_set_variable_t *setvar; + efi_status_t status; - pr_info("Registered efivars operations\n"); + if (data_size > 0) { + status = check_var_size(nonblocking, attr, + data_size + ucs2_strsize(name, 1024)); + if (status != EFI_SUCCESS) + return status; + } - up(&efivars_lock); + /* + * If no _nonblocking variant exists, the ordinary one + * is assumed to be non-blocking. + */ + setvar = __efivars->ops->set_variable_nonblocking; + if (!setvar || !nonblocking) + setvar = __efivars->ops->set_variable; - return 0; + return setvar(name, vendor, attr, data_size, data); } -EXPORT_SYMBOL_GPL(efivars_register); +EXPORT_SYMBOL_NS_GPL(efivar_set_variable_locked, EFIVAR); -/** - * efivars_unregister - unregister an efivars - * @efivars: efivars to unregister +/* + * efivar_set_variable() - set a variable identified by name/vendor * - * The caller must have already removed every entry from the list, - * failure to do so is an error. + * Can be called without holding the efivars_lock. Will sleep on obtaining the + * lock, or on obtaining other locks that are needed in order to complete the + * call. */ -int efivars_unregister(struct efivars *efivars) +efi_status_t efivar_set_variable(efi_char16_t *name, efi_guid_t *vendor, + u32 attr, unsigned long data_size, void *data) { - int rv; - - if (down_interruptible(&efivars_lock)) - return -EINTR; - - if (!__efivars) { - printk(KERN_ERR "efivars not registered\n"); - rv = -EINVAL; - goto out; - } - - if (__efivars != efivars) { - rv = -EINVAL; - goto out; - } - - pr_info("Unregistered efivars operations\n"); - __efivars = NULL; + efi_status_t status; - rv = 0; -out: - up(&efivars_lock); - return rv; -} -EXPORT_SYMBOL_GPL(efivars_unregister); + if (efivar_lock()) + return EFI_ABORTED; -int efivar_supports_writes(void) -{ - return __efivars && __efivars->ops->set_variable; + status = efivar_set_variable_locked(name, vendor, attr, data_size, + data, false); + efivar_unlock(); + return status; } -EXPORT_SYMBOL_GPL(efivar_supports_writes); +EXPORT_SYMBOL_NS_GPL(efivar_set_variable, EFIVAR); diff --git a/drivers/firmware/google/Kconfig b/drivers/firmware/google/Kconfig index 97968aece54f..983e07dc022e 100644 --- a/drivers/firmware/google/Kconfig +++ b/drivers/firmware/google/Kconfig @@ -3,9 +3,9 @@ menuconfig GOOGLE_FIRMWARE bool "Google Firmware Drivers" default n help - These firmware drivers are used by Google's servers. They are - only useful if you are working directly on one of their - proprietary servers. If in doubt, say "N". + These firmware drivers are used by Google servers, + Chromebooks and other devices using coreboot firmware. + If in doubt, say "N". if GOOGLE_FIRMWARE @@ -21,7 +21,7 @@ config GOOGLE_SMI config GOOGLE_COREBOOT_TABLE tristate "Coreboot Table Access" - depends on ACPI || OF + depends on HAS_IOMEM && (ACPI || OF) help This option enables the coreboot_table module, which provides other firmware modules access to the coreboot table. The coreboot table diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c index adaa492c3d2d..4e2575dfeb90 100644 --- a/drivers/firmware/google/gsmi.c +++ b/drivers/firmware/google/gsmi.c @@ -681,6 +681,15 @@ static struct notifier_block gsmi_die_notifier = { static int gsmi_panic_callback(struct notifier_block *nb, unsigned long reason, void *arg) { + + /* + * Panic callbacks are executed with all other CPUs stopped, + * so we must not attempt to spin waiting for gsmi_dev.lock + * to be released. + */ + if (spin_is_locked(&gsmi_dev.lock)) + return NOTIFY_DONE; + gsmi_shutdown_reason(GSMI_SHUTDOWN_PANIC); return NOTIFY_DONE; } diff --git a/drivers/firmware/imx/rm.c b/drivers/firmware/imx/rm.c index a12db6ff323b..d492b99e1c6c 100644 --- a/drivers/firmware/imx/rm.c +++ b/drivers/firmware/imx/rm.c @@ -43,3 +43,48 @@ bool imx_sc_rm_is_resource_owned(struct imx_sc_ipc *ipc, u16 resource) return hdr->func; } EXPORT_SYMBOL(imx_sc_rm_is_resource_owned); + +struct imx_sc_msg_rm_get_resource_owner { + struct imx_sc_rpc_msg hdr; + union { + struct { + u16 resource; + } req; + struct { + u8 val; + } resp; + } data; +} __packed __aligned(4); + +/* + * This function get @resource partition number + * + * @param[in] ipc IPC handle + * @param[in] resource resource the control is associated with + * @param[out] pt pointer to return the partition number + * + * @return Returns 0 for success and < 0 for errors. + */ +int imx_sc_rm_get_resource_owner(struct imx_sc_ipc *ipc, u16 resource, u8 *pt) +{ + struct imx_sc_msg_rm_get_resource_owner msg; + struct imx_sc_rpc_msg *hdr = &msg.hdr; + int ret; + + hdr->ver = IMX_SC_RPC_VERSION; + hdr->svc = IMX_SC_RPC_SVC_RM; + hdr->func = IMX_SC_RM_FUNC_GET_RESOURCE_OWNER; + hdr->size = 2; + + msg.data.req.resource = resource; + + ret = imx_scu_call_rpc(ipc, &msg, true); + if (ret) + return ret; + + if (pt) + *pt = msg.data.resp.val; + + return 0; +} +EXPORT_SYMBOL(imx_sc_rm_get_resource_owner); diff --git a/drivers/firmware/imx/scu-pd.c b/drivers/firmware/imx/scu-pd.c index ff6569c4a53b..af3d057e6421 100644 --- a/drivers/firmware/imx/scu-pd.c +++ b/drivers/firmware/imx/scu-pd.c @@ -155,6 +155,10 @@ static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = { { "vpu-pid", IMX_SC_R_VPU_PID0, 8, true, 0 }, { "vpu-dec0", IMX_SC_R_VPU_DEC_0, 1, false, 0 }, { "vpu-enc0", IMX_SC_R_VPU_ENC_0, 1, false, 0 }, + { "vpu-enc1", IMX_SC_R_VPU_ENC_1, 1, false, 0 }, + { "vpu-mu0", IMX_SC_R_VPU_MU_0, 1, false, 0 }, + { "vpu-mu1", IMX_SC_R_VPU_MU_1, 1, false, 0 }, + { "vpu-mu2", IMX_SC_R_VPU_MU_2, 1, false, 0 }, /* GPU SS */ { "gpu0-pid", IMX_SC_R_GPU_0_PID0, 4, true, 0 }, diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c index 24945e2da77b..8e59be3782cb 100644 --- a/drivers/firmware/memmap.c +++ b/drivers/firmware/memmap.c @@ -69,6 +69,7 @@ static struct attribute *def_attrs[] = { &memmap_type_attr.attr, NULL }; +ATTRIBUTE_GROUPS(def); static const struct sysfs_ops memmap_attr_ops = { .show = memmap_attr_show, @@ -118,7 +119,7 @@ static void __meminit release_firmware_map_entry(struct kobject *kobj) static struct kobj_type __refdata memmap_ktype = { .release = release_firmware_map_entry, .sysfs_ops = &memmap_attr_ops, - .default_attrs = def_attrs, + .default_groups = def_groups, }; /* diff --git a/drivers/firmware/mtk-adsp-ipc.c b/drivers/firmware/mtk-adsp-ipc.c new file mode 100644 index 000000000000..3c071f814455 --- /dev/null +++ b/drivers/firmware/mtk-adsp-ipc.c @@ -0,0 +1,145 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 MediaTek Corporation. All rights reserved. + * Author: Allen-KH Cheng <allen-kh.cheng@mediatek.com> + */ + +#include <linux/firmware/mediatek/mtk-adsp-ipc.h> +#include <linux/kernel.h> +#include <linux/mailbox_client.h> +#include <linux/module.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +static const char * const adsp_mbox_ch_names[MTK_ADSP_MBOX_NUM] = { "rx", "tx" }; + +/* + * mtk_adsp_ipc_send - send ipc cmd to MTK ADSP + * + * @ipc: ADSP IPC handle + * @idx: index of the mailbox channel + * @msg: IPC cmd (reply or request) + * + * Returns zero for success from mbox_send_message + * negative value for error + */ +int mtk_adsp_ipc_send(struct mtk_adsp_ipc *ipc, unsigned int idx, uint32_t msg) +{ + struct mtk_adsp_chan *adsp_chan; + int ret; + + if (idx >= MTK_ADSP_MBOX_NUM) + return -EINVAL; + + adsp_chan = &ipc->chans[idx]; + ret = mbox_send_message(adsp_chan->ch, &msg); + if (ret < 0) + return ret; + + return 0; +} +EXPORT_SYMBOL_GPL(mtk_adsp_ipc_send); + +/* + * mtk_adsp_ipc_recv - recv callback used by MTK ADSP mailbox + * + * @c: mbox client + * @msg: message received + * + * Users of ADSP IPC will need to privde handle_reply and handle_request + * callbacks. + */ +static void mtk_adsp_ipc_recv(struct mbox_client *c, void *msg) +{ + struct mtk_adsp_chan *chan = container_of(c, struct mtk_adsp_chan, cl); + struct device *dev = c->dev; + + switch (chan->idx) { + case MTK_ADSP_MBOX_REPLY: + chan->ipc->ops->handle_reply(chan->ipc); + break; + case MTK_ADSP_MBOX_REQUEST: + chan->ipc->ops->handle_request(chan->ipc); + break; + default: + dev_err(dev, "wrong mbox chan %d\n", chan->idx); + break; + } +} + +static int mtk_adsp_ipc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct mtk_adsp_ipc *adsp_ipc; + struct mtk_adsp_chan *adsp_chan; + struct mbox_client *cl; + int ret; + int i, j; + + device_set_of_node_from_dev(&pdev->dev, pdev->dev.parent); + + adsp_ipc = devm_kzalloc(dev, sizeof(*adsp_ipc), GFP_KERNEL); + if (!adsp_ipc) + return -ENOMEM; + + for (i = 0; i < MTK_ADSP_MBOX_NUM; i++) { + adsp_chan = &adsp_ipc->chans[i]; + cl = &adsp_chan->cl; + cl->dev = dev->parent; + cl->tx_block = false; + cl->knows_txdone = false; + cl->tx_prepare = NULL; + cl->rx_callback = mtk_adsp_ipc_recv; + + adsp_chan->ipc = adsp_ipc; + adsp_chan->idx = i; + adsp_chan->ch = mbox_request_channel_byname(cl, adsp_mbox_ch_names[i]); + if (IS_ERR(adsp_chan->ch)) { + ret = PTR_ERR(adsp_chan->ch); + if (ret != -EPROBE_DEFER) + dev_err(dev, "Failed to request mbox chan %s ret %d\n", + adsp_mbox_ch_names[i], ret); + + for (j = 0; j < i; j++) { + adsp_chan = &adsp_ipc->chans[j]; + mbox_free_channel(adsp_chan->ch); + } + + return ret; + } + } + + adsp_ipc->dev = dev; + dev_set_drvdata(dev, adsp_ipc); + dev_dbg(dev, "MTK ADSP IPC initialized\n"); + + return 0; +} + +static int mtk_adsp_ipc_remove(struct platform_device *pdev) +{ + struct mtk_adsp_ipc *adsp_ipc = dev_get_drvdata(&pdev->dev); + struct mtk_adsp_chan *adsp_chan; + int i; + + for (i = 0; i < MTK_ADSP_MBOX_NUM; i++) { + adsp_chan = &adsp_ipc->chans[i]; + mbox_free_channel(adsp_chan->ch); + } + + return 0; +} + +static struct platform_driver mtk_adsp_ipc_driver = { + .driver = { + .name = "mtk-adsp-ipc", + }, + .probe = mtk_adsp_ipc_probe, + .remove = mtk_adsp_ipc_remove, +}; +builtin_platform_driver(mtk_adsp_ipc_driver); + +MODULE_AUTHOR("Allen-KH Cheng <allen-kh.cheng@mediatek.com>"); +MODULE_DESCRIPTION("MTK ADSP IPC Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c index cfb448eabdaa..e7bcfca4159f 100644 --- a/drivers/firmware/psci/psci.c +++ b/drivers/firmware/psci/psci.c @@ -9,6 +9,7 @@ #include <linux/acpi.h> #include <linux/arm-smccc.h> #include <linux/cpuidle.h> +#include <linux/debugfs.h> #include <linux/errno.h> #include <linux/linkage.h> #include <linux/of.h> @@ -163,6 +164,8 @@ int psci_set_osi_mode(bool enable) PSCI_1_0_SUSPEND_MODE_PC; err = invoke_psci_fn(PSCI_1_0_FN_SET_SUSPEND_MODE, suspend_mode, 0, 0); + if (err < 0) + pr_warn("failed to set %s mode: %d\n", enable ? "OSI" : "PC", err); return psci_to_linux_errno(err); } @@ -274,7 +277,7 @@ static void set_conduit(enum arm_smccc_conduit conduit) psci_conduit = conduit; } -static int get_set_conduit_method(struct device_node *np) +static int get_set_conduit_method(const struct device_node *np) { const char *method; @@ -324,17 +327,130 @@ static void psci_sys_poweroff(void) invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0); } -static int __init psci_features(u32 psci_func_id) +static int psci_features(u32 psci_func_id) { return invoke_psci_fn(PSCI_1_0_FN_PSCI_FEATURES, psci_func_id, 0, 0); } +#ifdef CONFIG_DEBUG_FS + +#define PSCI_ID(ver, _name) \ + { .fn = PSCI_##ver##_FN_##_name, .name = #_name, } +#define PSCI_ID_NATIVE(ver, _name) \ + { .fn = PSCI_FN_NATIVE(ver, _name), .name = #_name, } + +/* A table of all optional functions */ +static const struct { + u32 fn; + const char *name; +} psci_fn_ids[] = { + PSCI_ID_NATIVE(0_2, MIGRATE), + PSCI_ID(0_2, MIGRATE_INFO_TYPE), + PSCI_ID_NATIVE(0_2, MIGRATE_INFO_UP_CPU), + PSCI_ID(1_0, CPU_FREEZE), + PSCI_ID_NATIVE(1_0, CPU_DEFAULT_SUSPEND), + PSCI_ID_NATIVE(1_0, NODE_HW_STATE), + PSCI_ID_NATIVE(1_0, SYSTEM_SUSPEND), + PSCI_ID(1_0, SET_SUSPEND_MODE), + PSCI_ID_NATIVE(1_0, STAT_RESIDENCY), + PSCI_ID_NATIVE(1_0, STAT_COUNT), + PSCI_ID_NATIVE(1_1, SYSTEM_RESET2), + PSCI_ID(1_1, MEM_PROTECT), + PSCI_ID_NATIVE(1_1, MEM_PROTECT_CHECK_RANGE), +}; + +static int psci_debugfs_read(struct seq_file *s, void *data) +{ + int feature, type, i; + u32 ver; + + ver = psci_ops.get_version(); + seq_printf(s, "PSCIv%d.%d\n", + PSCI_VERSION_MAJOR(ver), + PSCI_VERSION_MINOR(ver)); + + /* PSCI_FEATURES is available only starting from 1.0 */ + if (PSCI_VERSION_MAJOR(ver) < 1) + return 0; + + feature = psci_features(ARM_SMCCC_VERSION_FUNC_ID); + if (feature != PSCI_RET_NOT_SUPPORTED) { + ver = invoke_psci_fn(ARM_SMCCC_VERSION_FUNC_ID, 0, 0, 0); + seq_printf(s, "SMC Calling Convention v%d.%d\n", + PSCI_VERSION_MAJOR(ver), + PSCI_VERSION_MINOR(ver)); + } else { + seq_puts(s, "SMC Calling Convention v1.0 is assumed\n"); + } + + feature = psci_features(PSCI_FN_NATIVE(0_2, CPU_SUSPEND)); + if (feature < 0) { + seq_printf(s, "PSCI_FEATURES(CPU_SUSPEND) error (%d)\n", feature); + } else { + seq_printf(s, "OSI is %ssupported\n", + (feature & BIT(0)) ? "" : "not "); + seq_printf(s, "%s StateID format is used\n", + (feature & BIT(1)) ? "Extended" : "Original"); + } + + type = psci_ops.migrate_info_type(); + if (type == PSCI_0_2_TOS_UP_MIGRATE || + type == PSCI_0_2_TOS_UP_NO_MIGRATE) { + unsigned long cpuid; + + seq_printf(s, "Trusted OS %smigrate capable\n", + type == PSCI_0_2_TOS_UP_NO_MIGRATE ? "not " : ""); + cpuid = psci_migrate_info_up_cpu(); + seq_printf(s, "Trusted OS resident on physical CPU 0x%lx (#%d)\n", + cpuid, resident_cpu); + } else if (type == PSCI_0_2_TOS_MP) { + seq_puts(s, "Trusted OS migration not required\n"); + } else { + if (type != PSCI_RET_NOT_SUPPORTED) + seq_printf(s, "MIGRATE_INFO_TYPE returned unknown type (%d)\n", type); + } + + for (i = 0; i < ARRAY_SIZE(psci_fn_ids); i++) { + feature = psci_features(psci_fn_ids[i].fn); + if (feature == PSCI_RET_NOT_SUPPORTED) + continue; + if (feature < 0) + seq_printf(s, "PSCI_FEATURES(%s) error (%d)\n", + psci_fn_ids[i].name, feature); + else + seq_printf(s, "%s is supported\n", psci_fn_ids[i].name); + } + + return 0; +} + +static int psci_debugfs_open(struct inode *inode, struct file *f) +{ + return single_open(f, psci_debugfs_read, NULL); +} + +static const struct file_operations psci_debugfs_ops = { + .owner = THIS_MODULE, + .open = psci_debugfs_open, + .release = single_release, + .read = seq_read, + .llseek = seq_lseek +}; + +static int __init psci_debugfs_init(void) +{ + return PTR_ERR_OR_ZERO(debugfs_create_file("psci", 0444, NULL, NULL, + &psci_debugfs_ops)); +} +late_initcall(psci_debugfs_init) +#endif + #ifdef CONFIG_CPU_IDLE static int psci_suspend_finisher(unsigned long state) { u32 power_state = state; - phys_addr_t pa_cpu_resume = __pa_symbol(function_nocfi(cpu_resume)); + phys_addr_t pa_cpu_resume = __pa_symbol(cpu_resume); return psci_ops.cpu_suspend(power_state, pa_cpu_resume); } @@ -359,7 +475,7 @@ int psci_cpu_suspend_enter(u32 state) static int psci_system_suspend(unsigned long unused) { - phys_addr_t pa_cpu_resume = __pa_symbol(function_nocfi(cpu_resume)); + phys_addr_t pa_cpu_resume = __pa_symbol(cpu_resume); return invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND), pa_cpu_resume, 0, 0); @@ -528,7 +644,7 @@ typedef int (*psci_initcall_t)(const struct device_node *); * * Probe based on PSCI PSCI_VERSION function */ -static int __init psci_0_2_init(struct device_node *np) +static int __init psci_0_2_init(const struct device_node *np) { int err; @@ -549,7 +665,7 @@ static int __init psci_0_2_init(struct device_node *np) /* * PSCI < v0.2 get PSCI Function IDs via DT. */ -static int __init psci_0_1_init(struct device_node *np) +static int __init psci_0_1_init(const struct device_node *np) { u32 id; int err; @@ -585,7 +701,7 @@ static int __init psci_0_1_init(struct device_node *np) return 0; } -static int __init psci_1_0_init(struct device_node *np) +static int __init psci_1_0_init(const struct device_node *np) { int err; diff --git a/drivers/firmware/qcom_scm-legacy.c b/drivers/firmware/qcom_scm-legacy.c index 1829ba220576..9f918b9e6f8f 100644 --- a/drivers/firmware/qcom_scm-legacy.c +++ b/drivers/firmware/qcom_scm-legacy.c @@ -120,6 +120,9 @@ static void __scm_legacy_do(const struct arm_smccc_args *smc, /** * scm_legacy_call() - Sends a command to the SCM and waits for the command to * finish processing. + * @dev: device + * @desc: descriptor structure containing arguments and return values + * @res: results from SMC call * * A note on cache maintenance: * Note that any buffers that are expected to be accessed by the secure world @@ -211,6 +214,7 @@ out: /** * scm_legacy_call_atomic() - Send an atomic SCM command with up to 5 arguments * and 3 return values + * @unused: device, legacy argument, not used, can be NULL * @desc: SCM call descriptor containing arguments * @res: SCM call return values * diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c index 7db8066b19fd..cdbfe54c8146 100644 --- a/drivers/firmware/qcom_scm.c +++ b/drivers/firmware/qcom_scm.c @@ -7,6 +7,7 @@ #include <linux/cpumask.h> #include <linux/export.h> #include <linux/dma-mapping.h> +#include <linux/interconnect.h> #include <linux/module.h> #include <linux/types.h> #include <linux/qcom_scm.h> @@ -31,8 +32,13 @@ struct qcom_scm { struct clk *core_clk; struct clk *iface_clk; struct clk *bus_clk; + struct icc_path *path; struct reset_controller_dev reset; + /* control access to the interconnect path */ + struct mutex scm_bw_lock; + int scm_vote_count; + u64 dload_mode_addr; }; @@ -49,26 +55,12 @@ struct qcom_scm_mem_map_info { __le64 mem_size; }; -#define QCOM_SCM_FLAG_COLDBOOT_CPU0 0x00 -#define QCOM_SCM_FLAG_COLDBOOT_CPU1 0x01 -#define QCOM_SCM_FLAG_COLDBOOT_CPU2 0x08 -#define QCOM_SCM_FLAG_COLDBOOT_CPU3 0x20 - -#define QCOM_SCM_FLAG_WARMBOOT_CPU0 0x04 -#define QCOM_SCM_FLAG_WARMBOOT_CPU1 0x02 -#define QCOM_SCM_FLAG_WARMBOOT_CPU2 0x10 -#define QCOM_SCM_FLAG_WARMBOOT_CPU3 0x40 - -struct qcom_scm_wb_entry { - int flag; - void *entry; +/* Each bit configures cold/warm boot address for one of the 4 CPUs */ +static const u8 qcom_scm_cpu_cold_bits[QCOM_SCM_BOOT_MAX_CPUS] = { + 0, BIT(0), BIT(3), BIT(5) }; - -static struct qcom_scm_wb_entry qcom_scm_wb[] = { - { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU0 }, - { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU1 }, - { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU2 }, - { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU3 }, +static const u8 qcom_scm_cpu_warm_bits[QCOM_SCM_BOOT_MAX_CPUS] = { + BIT(2), BIT(1), BIT(4), BIT(6) }; static const char * const qcom_scm_convention_names[] = { @@ -113,6 +105,42 @@ static void qcom_scm_clk_disable(void) clk_disable_unprepare(__scm->bus_clk); } +static int qcom_scm_bw_enable(void) +{ + int ret = 0; + + if (!__scm->path) + return 0; + + if (IS_ERR(__scm->path)) + return -EINVAL; + + mutex_lock(&__scm->scm_bw_lock); + if (!__scm->scm_vote_count) { + ret = icc_set_bw(__scm->path, 0, UINT_MAX); + if (ret < 0) { + dev_err(__scm->dev, "failed to set bandwidth request\n"); + goto err_bw; + } + } + __scm->scm_vote_count++; +err_bw: + mutex_unlock(&__scm->scm_bw_lock); + + return ret; +} + +static void qcom_scm_bw_disable(void) +{ + if (IS_ERR_OR_NULL(__scm->path)) + return; + + mutex_lock(&__scm->scm_bw_lock); + if (__scm->scm_vote_count-- == 1) + icc_set_bw(__scm->path, 0, 0); + mutex_unlock(&__scm->scm_bw_lock); +} + enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN; static DEFINE_SPINLOCK(scm_query_lock); @@ -179,9 +207,8 @@ found: /** * qcom_scm_call() - Invoke a syscall in the secure world * @dev: device - * @svc_id: service identifier - * @cmd_id: command identifier * @desc: Descriptor structure containing arguments and return values + * @res: Structure containing results from SMC/HVC call * * Sends a command to the SCM and waits for the command to finish processing. * This should *only* be called in pre-emptible context. @@ -205,8 +232,6 @@ static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc, /** * qcom_scm_call_atomic() - atomic variation of qcom_scm_call() * @dev: device - * @svc_id: service identifier - * @cmd_id: command identifier * @desc: Descriptor structure containing arguments and return values * @res: Structure containing results from SMC/HVC call * @@ -260,97 +285,83 @@ static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id, return ret ? false : !!res.result[0]; } -/** - * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus - * @entry: Entry point function for the cpus - * @cpus: The cpumask of cpus that will use the entry point - * - * Set the Linux entry point for the SCM to transfer control to when coming - * out of a power down. CPU power down may be executed on cpuidle or hotplug. - */ -int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus) +static int qcom_scm_set_boot_addr(void *entry, const u8 *cpu_bits) { - int ret; - int flags = 0; int cpu; + unsigned int flags = 0; struct qcom_scm_desc desc = { .svc = QCOM_SCM_SVC_BOOT, .cmd = QCOM_SCM_BOOT_SET_ADDR, .arginfo = QCOM_SCM_ARGS(2), + .owner = ARM_SMCCC_OWNER_SIP, }; - /* - * Reassign only if we are switching from hotplug entry point - * to cpuidle entry point or vice versa. - */ - for_each_cpu(cpu, cpus) { - if (entry == qcom_scm_wb[cpu].entry) - continue; - flags |= qcom_scm_wb[cpu].flag; + for_each_present_cpu(cpu) { + if (cpu >= QCOM_SCM_BOOT_MAX_CPUS) + return -EINVAL; + flags |= cpu_bits[cpu]; } - /* No change in entry function */ - if (!flags) - return 0; - desc.args[0] = flags; desc.args[1] = virt_to_phys(entry); - ret = qcom_scm_call(__scm->dev, &desc, NULL); - if (!ret) { - for_each_cpu(cpu, cpus) - qcom_scm_wb[cpu].entry = entry; - } - - return ret; + return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL); } -EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr); -/** - * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus - * @entry: Entry point function for the cpus - * @cpus: The cpumask of cpus that will use the entry point - * - * Set the cold boot address of the cpus. Any cpu outside the supported - * range would be removed from the cpu present mask. - */ -int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) +static int qcom_scm_set_boot_addr_mc(void *entry, unsigned int flags) { - int flags = 0; - int cpu; - int scm_cb_flags[] = { - QCOM_SCM_FLAG_COLDBOOT_CPU0, - QCOM_SCM_FLAG_COLDBOOT_CPU1, - QCOM_SCM_FLAG_COLDBOOT_CPU2, - QCOM_SCM_FLAG_COLDBOOT_CPU3, - }; struct qcom_scm_desc desc = { .svc = QCOM_SCM_SVC_BOOT, - .cmd = QCOM_SCM_BOOT_SET_ADDR, - .arginfo = QCOM_SCM_ARGS(2), + .cmd = QCOM_SCM_BOOT_SET_ADDR_MC, .owner = ARM_SMCCC_OWNER_SIP, + .arginfo = QCOM_SCM_ARGS(6), + .args = { + virt_to_phys(entry), + /* Apply to all CPUs in all affinity levels */ + ~0ULL, ~0ULL, ~0ULL, ~0ULL, + flags, + }, }; - if (!cpus || cpumask_empty(cpus)) - return -EINVAL; + /* Need a device for DMA of the additional arguments */ + if (!__scm || __get_convention() == SMC_CONVENTION_LEGACY) + return -EOPNOTSUPP; - for_each_cpu(cpu, cpus) { - if (cpu < ARRAY_SIZE(scm_cb_flags)) - flags |= scm_cb_flags[cpu]; - else - set_cpu_present(cpu, false); - } + return qcom_scm_call(__scm->dev, &desc, NULL); +} - desc.args[0] = flags; - desc.args[1] = virt_to_phys(entry); +/** + * qcom_scm_set_warm_boot_addr() - Set the warm boot address for all cpus + * @entry: Entry point function for the cpus + * + * Set the Linux entry point for the SCM to transfer control to when coming + * out of a power down. CPU power down may be executed on cpuidle or hotplug. + */ +int qcom_scm_set_warm_boot_addr(void *entry) +{ + if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_WARMBOOT)) + /* Fallback to old SCM call */ + return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_warm_bits); + return 0; +} +EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr); - return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL); +/** + * qcom_scm_set_cold_boot_addr() - Set the cold boot address for all cpus + * @entry: Entry point function for the cpus + */ +int qcom_scm_set_cold_boot_addr(void *entry) +{ + if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_COLDBOOT)) + /* Fallback to old SCM call */ + return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_cold_bits); + return 0; } EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr); /** * qcom_scm_cpu_power_down() - Power down the cpu - * @flags - Flags to flush cache + * @flags: Flags to flush cache * * This is an end point to power down cpu. If there was a pending interrupt, * the control would return from this function, otherwise, the cpu jumps to the @@ -435,10 +446,16 @@ static void qcom_scm_set_download_mode(bool enable) * and optional blob of data used for authenticating the metadata * and the rest of the firmware * @size: size of the metadata + * @ctx: optional metadata context * - * Returns 0 on success. + * Return: 0 on success. + * + * Upon successful return, the PAS metadata context (@ctx) will be used to + * track the metadata allocation, this needs to be released by invoking + * qcom_scm_pas_metadata_release() by the caller. */ -int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size) +int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size, + struct qcom_scm_pas_metadata *ctx) { dma_addr_t mdata_phys; void *mdata_buf; @@ -467,22 +484,50 @@ int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size) ret = qcom_scm_clk_enable(); if (ret) - goto free_metadata; + goto out; + + ret = qcom_scm_bw_enable(); + if (ret) + return ret; desc.args[1] = mdata_phys; ret = qcom_scm_call(__scm->dev, &desc, &res); + qcom_scm_bw_disable(); qcom_scm_clk_disable(); -free_metadata: - dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys); +out: + if (ret < 0 || !ctx) { + dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys); + } else if (ctx) { + ctx->ptr = mdata_buf; + ctx->phys = mdata_phys; + ctx->size = size; + } return ret ? : res.result[0]; } EXPORT_SYMBOL(qcom_scm_pas_init_image); /** + * qcom_scm_pas_metadata_release() - release metadata context + * @ctx: metadata context + */ +void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx) +{ + if (!ctx->ptr) + return; + + dma_free_coherent(__scm->dev, ctx->size, ctx->ptr, ctx->phys); + + ctx->ptr = NULL; + ctx->phys = 0; + ctx->size = 0; +} +EXPORT_SYMBOL(qcom_scm_pas_metadata_release); + +/** * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral * for firmware loading * @peripheral: peripheral id @@ -509,7 +554,12 @@ int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size) if (ret) return ret; + ret = qcom_scm_bw_enable(); + if (ret) + return ret; + ret = qcom_scm_call(__scm->dev, &desc, &res); + qcom_scm_bw_disable(); qcom_scm_clk_disable(); return ret ? : res.result[0]; @@ -539,7 +589,12 @@ int qcom_scm_pas_auth_and_reset(u32 peripheral) if (ret) return ret; + ret = qcom_scm_bw_enable(); + if (ret) + return ret; + ret = qcom_scm_call(__scm->dev, &desc, &res); + qcom_scm_bw_disable(); qcom_scm_clk_disable(); return ret ? : res.result[0]; @@ -568,8 +623,13 @@ int qcom_scm_pas_shutdown(u32 peripheral) if (ret) return ret; + ret = qcom_scm_bw_enable(); + if (ret) + return ret; + ret = qcom_scm_call(__scm->dev, &desc, &res); + qcom_scm_bw_disable(); qcom_scm_clk_disable(); return ret ? : res.result[0]; @@ -749,12 +809,6 @@ int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) }; int ret; - desc.args[0] = addr; - desc.args[1] = size; - desc.args[2] = spare; - desc.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL, - QCOM_SCM_VAL); - ret = qcom_scm_call(__scm->dev, &desc, NULL); /* the pg table has been initialized already, ignore the error */ @@ -765,6 +819,21 @@ int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) } EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init); +int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_MP, + .cmd = QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE, + .arginfo = QCOM_SCM_ARGS(2), + .args[0] = size, + .args[1] = spare, + .owner = ARM_SMCCC_OWNER_SIP, + }; + + return qcom_scm_call(__scm->dev, &desc, NULL); +} +EXPORT_SYMBOL(qcom_scm_iommu_set_cp_pool_size); + int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size, u32 cp_nonpixel_start, u32 cp_nonpixel_size) @@ -1131,6 +1200,22 @@ int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp) } EXPORT_SYMBOL(qcom_scm_hdcp_req); +int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_SMMU_PROGRAM, + .cmd = QCOM_SCM_SMMU_PT_FORMAT, + .arginfo = QCOM_SCM_ARGS(3), + .args[0] = sec_id, + .args[1] = ctx_num, + .args[2] = pt_fmt, /* 0: LPAE AArch32 - 1: AArch64 */ + .owner = ARM_SMCCC_OWNER_SIP, + }; + + return qcom_scm_call(__scm->dev, &desc, NULL); +} +EXPORT_SYMBOL(qcom_scm_iommu_set_pt_format); + int qcom_scm_qsmmu500_wait_safe_toggle(bool en) { struct qcom_scm_desc desc = { @@ -1254,8 +1339,15 @@ static int qcom_scm_probe(struct platform_device *pdev) if (ret < 0) return ret; + mutex_init(&scm->scm_bw_lock); + clks = (unsigned long)of_device_get_match_data(&pdev->dev); + scm->path = devm_of_icc_get(&pdev->dev, NULL); + if (IS_ERR(scm->path)) + return dev_err_probe(&pdev->dev, PTR_ERR(scm->path), + "failed to acquire interconnect path\n"); + scm->core_clk = devm_clk_get(&pdev->dev, "core"); if (IS_ERR(scm->core_clk)) { if (PTR_ERR(scm->core_clk) == -EPROBE_DEFER) @@ -1314,7 +1406,7 @@ static int qcom_scm_probe(struct platform_device *pdev) /* * If requested enable "download mode", from this point on warmboot - * will cause the the boot stages to enter download mode, unless + * will cause the boot stages to enter download mode, unless * disabled below by a clean shutdown/reboot. */ if (download_mode) @@ -1356,6 +1448,10 @@ static const struct of_device_id qcom_scm_dt_match[] = { SCM_HAS_IFACE_CLK | SCM_HAS_BUS_CLK) }, + { .compatible = "qcom,scm-msm8976", .data = (void *)(SCM_HAS_CORE_CLK | + SCM_HAS_IFACE_CLK | + SCM_HAS_BUS_CLK) + }, { .compatible = "qcom,scm-msm8994" }, { .compatible = "qcom,scm-msm8996" }, { .compatible = "qcom,scm" }, diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h index d92156ceb3ac..db3d08a01209 100644 --- a/drivers/firmware/qcom_scm.h +++ b/drivers/firmware/qcom_scm.h @@ -78,8 +78,13 @@ extern int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc, #define QCOM_SCM_BOOT_SET_ADDR 0x01 #define QCOM_SCM_BOOT_TERMINATE_PC 0x02 #define QCOM_SCM_BOOT_SET_DLOAD_MODE 0x10 +#define QCOM_SCM_BOOT_SET_ADDR_MC 0x11 #define QCOM_SCM_BOOT_SET_REMOTE_STATE 0x0a #define QCOM_SCM_FLUSH_FLAG_MASK 0x3 +#define QCOM_SCM_BOOT_MAX_CPUS 4 +#define QCOM_SCM_BOOT_MC_FLAG_AARCH64 BIT(0) +#define QCOM_SCM_BOOT_MC_FLAG_COLDBOOT BIT(1) +#define QCOM_SCM_BOOT_MC_FLAG_WARMBOOT BIT(2) #define QCOM_SCM_SVC_PIL 0x02 #define QCOM_SCM_PIL_PAS_INIT_IMAGE 0x01 @@ -100,6 +105,7 @@ extern int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc, #define QCOM_SCM_MP_RESTORE_SEC_CFG 0x02 #define QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE 0x03 #define QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT 0x04 +#define QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE 0x05 #define QCOM_SCM_MP_VIDEO_VAR 0x08 #define QCOM_SCM_MP_ASSIGN 0x16 @@ -119,11 +125,10 @@ extern int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc, #define QCOM_SCM_LMH_LIMIT_DCVSH 0x10 #define QCOM_SCM_SVC_SMMU_PROGRAM 0x15 +#define QCOM_SCM_SMMU_PT_FORMAT 0x01 #define QCOM_SCM_SMMU_CONFIG_ERRATA1 0x03 #define QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL 0x02 -extern void __qcom_scm_init(void); - /* common error codes */ #define QCOM_SCM_V2_EBUSY -12 #define QCOM_SCM_ENOMEM -5 diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c index 172c751a4f6c..a69399a6b7c0 100644 --- a/drivers/firmware/qemu_fw_cfg.c +++ b/drivers/firmware/qemu_fw_cfg.c @@ -388,14 +388,13 @@ static void fw_cfg_sysfs_cache_cleanup(void) struct fw_cfg_sysfs_entry *entry, *next; list_for_each_entry_safe(entry, next, &fw_cfg_entry_cache, list) { - /* will end up invoking fw_cfg_sysfs_cache_delist() - * via each object's release() method (i.e. destructor) - */ + fw_cfg_sysfs_cache_delist(entry); + kobject_del(&entry->kobj); kobject_put(&entry->kobj); } } -/* default_attrs: per-entry attributes and show methods */ +/* per-entry attributes and show methods */ #define FW_CFG_SYSFS_ATTR(_attr) \ struct fw_cfg_sysfs_attribute fw_cfg_sysfs_attr_##_attr = { \ @@ -428,6 +427,7 @@ static struct attribute *fw_cfg_sysfs_entry_attrs[] = { &fw_cfg_sysfs_attr_name.attr, NULL, }; +ATTRIBUTE_GROUPS(fw_cfg_sysfs_entry); /* sysfs_ops: find fw_cfg_[entry, attribute] and call appropriate show method */ static ssize_t fw_cfg_sysfs_attr_show(struct kobject *kobj, struct attribute *a, @@ -448,13 +448,12 @@ static void fw_cfg_sysfs_release_entry(struct kobject *kobj) { struct fw_cfg_sysfs_entry *entry = to_entry(kobj); - fw_cfg_sysfs_cache_delist(entry); kfree(entry); } /* kobj_type: ties together all properties required to register an entry */ static struct kobj_type fw_cfg_sysfs_entry_ktype = { - .default_attrs = fw_cfg_sysfs_entry_attrs, + .default_groups = fw_cfg_sysfs_entry_groups, .sysfs_ops = &fw_cfg_sysfs_attr_ops, .release = fw_cfg_sysfs_release_entry, }; @@ -601,20 +600,18 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f) /* set file entry information */ entry->size = be32_to_cpu(f->size); entry->select = be16_to_cpu(f->select); - memcpy(entry->name, f->name, FW_CFG_MAX_FILE_PATH); + strscpy(entry->name, f->name, FW_CFG_MAX_FILE_PATH); /* register entry under "/sys/firmware/qemu_fw_cfg/by_key/" */ err = kobject_init_and_add(&entry->kobj, &fw_cfg_sysfs_entry_ktype, fw_cfg_sel_ko, "%d", entry->select); - if (err) { - kobject_put(&entry->kobj); - return err; - } + if (err) + goto err_put_entry; /* add raw binary content access */ err = sysfs_create_bin_file(&entry->kobj, &fw_cfg_sysfs_attr_raw); if (err) - goto err_add_raw; + goto err_del_entry; /* try adding "/sys/firmware/qemu_fw_cfg/by_name/" symlink */ fw_cfg_build_symlink(fw_cfg_fname_kset, &entry->kobj, entry->name); @@ -623,9 +620,10 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f) fw_cfg_sysfs_cache_enlist(entry); return 0; -err_add_raw: +err_del_entry: kobject_del(&entry->kobj); - kfree(entry); +err_put_entry: + kobject_put(&entry->kobj); return err; } diff --git a/drivers/firmware/scpi_pm_domain.c b/drivers/firmware/scpi_pm_domain.c index 51201600d789..800673910b51 100644 --- a/drivers/firmware/scpi_pm_domain.c +++ b/drivers/firmware/scpi_pm_domain.c @@ -16,7 +16,6 @@ struct scpi_pm_domain { struct generic_pm_domain genpd; struct scpi_ops *ops; u32 domain; - char name[30]; }; /* @@ -110,8 +109,13 @@ static int scpi_pm_domain_probe(struct platform_device *pdev) scpi_pd->domain = i; scpi_pd->ops = scpi_ops; - sprintf(scpi_pd->name, "%pOFn.%d", np, i); - scpi_pd->genpd.name = scpi_pd->name; + scpi_pd->genpd.name = devm_kasprintf(dev, GFP_KERNEL, + "%pOFn.%d", np, i); + if (!scpi_pd->genpd.name) { + dev_err(dev, "Failed to allocate genpd name:%pOFn.%d\n", + np, i); + continue; + } scpi_pd->genpd.power_off = scpi_pd_power_off; scpi_pd->genpd.power_on = scpi_pd_power_on; diff --git a/drivers/firmware/smccc/kvm_guest.c b/drivers/firmware/smccc/kvm_guest.c index 2d3e866decaa..89a68e7eeaa6 100644 --- a/drivers/firmware/smccc/kvm_guest.c +++ b/drivers/firmware/smccc/kvm_guest.c @@ -4,6 +4,7 @@ #include <linux/arm-smccc.h> #include <linux/bitmap.h> +#include <linux/cache.h> #include <linux/kernel.h> #include <linux/string.h> diff --git a/drivers/firmware/stratix10-rsu.c b/drivers/firmware/stratix10-rsu.c index 9378075d04e9..e51c95f8d445 100644 --- a/drivers/firmware/stratix10-rsu.c +++ b/drivers/firmware/stratix10-rsu.c @@ -24,12 +24,16 @@ #define RSU_DCMF1_MASK GENMASK_ULL(63, 32) #define RSU_DCMF2_MASK GENMASK_ULL(31, 0) #define RSU_DCMF3_MASK GENMASK_ULL(63, 32) +#define RSU_DCMF0_STATUS_MASK GENMASK_ULL(15, 0) +#define RSU_DCMF1_STATUS_MASK GENMASK_ULL(31, 16) +#define RSU_DCMF2_STATUS_MASK GENMASK_ULL(47, 32) +#define RSU_DCMF3_STATUS_MASK GENMASK_ULL(63, 48) #define RSU_TIMEOUT (msecs_to_jiffies(SVC_RSU_REQUEST_TIMEOUT_MS)) #define INVALID_RETRY_COUNTER 0xFF #define INVALID_DCMF_VERSION 0xFF - +#define INVALID_DCMF_STATUS 0xFFFFFFFF typedef void (*rsu_callback)(struct stratix10_svc_client *client, struct stratix10_svc_cb_data *data); @@ -49,6 +53,10 @@ typedef void (*rsu_callback)(struct stratix10_svc_client *client, * @dcmf_version.dcmf1: Quartus dcmf1 version * @dcmf_version.dcmf2: Quartus dcmf2 version * @dcmf_version.dcmf3: Quartus dcmf3 version + * @dcmf_status.dcmf0: dcmf0 status + * @dcmf_status.dcmf1: dcmf1 status + * @dcmf_status.dcmf2: dcmf2 status + * @dcmf_status.dcmf3: dcmf3 status * @retry_counter: the current image's retry counter * @max_retry: the preset max retry value */ @@ -73,6 +81,13 @@ struct stratix10_rsu_priv { unsigned int dcmf3; } dcmf_version; + struct { + unsigned int dcmf0; + unsigned int dcmf1; + unsigned int dcmf2; + unsigned int dcmf3; + } dcmf_status; + unsigned int retry_counter; unsigned int max_retry; }; @@ -129,7 +144,7 @@ static void rsu_command_callback(struct stratix10_svc_client *client, struct stratix10_rsu_priv *priv = client->priv; if (data->status == BIT(SVC_STATUS_NO_SUPPORT)) - dev_warn(client->dev, "FW doesn't support notify\n"); + dev_warn(client->dev, "Secure FW doesn't support notify\n"); else if (data->status == BIT(SVC_STATUS_ERROR)) dev_err(client->dev, "Failure, returned status is %lu\n", BIT(data->status)); @@ -156,7 +171,7 @@ static void rsu_retry_callback(struct stratix10_svc_client *client, if (data->status == BIT(SVC_STATUS_OK)) priv->retry_counter = *counter; else if (data->status == BIT(SVC_STATUS_NO_SUPPORT)) - dev_warn(client->dev, "FW doesn't support retry\n"); + dev_warn(client->dev, "Secure FW doesn't support retry\n"); else dev_err(client->dev, "Failed to get retry counter %lu\n", BIT(data->status)); @@ -181,7 +196,7 @@ static void rsu_max_retry_callback(struct stratix10_svc_client *client, if (data->status == BIT(SVC_STATUS_OK)) priv->max_retry = *max_retry; else if (data->status == BIT(SVC_STATUS_NO_SUPPORT)) - dev_warn(client->dev, "FW doesn't support max retry\n"); + dev_warn(client->dev, "Secure FW doesn't support max retry\n"); else dev_err(client->dev, "Failed to get max retry %lu\n", BIT(data->status)); @@ -216,6 +231,35 @@ static void rsu_dcmf_version_callback(struct stratix10_svc_client *client, } /** + * rsu_dcmf_status_callback() - Callback from Intel service layer for getting + * the DCMF status + * @client: pointer to client + * @data: pointer to callback data structure + * + * Callback from Intel service layer for DCMF status + */ +static void rsu_dcmf_status_callback(struct stratix10_svc_client *client, + struct stratix10_svc_cb_data *data) +{ + struct stratix10_rsu_priv *priv = client->priv; + unsigned long long *value = (unsigned long long *)data->kaddr1; + + if (data->status == BIT(SVC_STATUS_OK)) { + priv->dcmf_status.dcmf0 = FIELD_GET(RSU_DCMF0_STATUS_MASK, + *value); + priv->dcmf_status.dcmf1 = FIELD_GET(RSU_DCMF1_STATUS_MASK, + *value); + priv->dcmf_status.dcmf2 = FIELD_GET(RSU_DCMF2_STATUS_MASK, + *value); + priv->dcmf_status.dcmf3 = FIELD_GET(RSU_DCMF3_STATUS_MASK, + *value); + } else + dev_err(client->dev, "failed to get DCMF status\n"); + + complete(&priv->completion); +} + +/** * rsu_send_msg() - send a message to Intel service layer * @priv: pointer to rsu private data * @command: RSU status or update command @@ -361,7 +405,8 @@ static ssize_t max_retry_show(struct device *dev, if (!priv) return -ENODEV; - return sprintf(buf, "0x%08x\n", priv->max_retry); + return scnprintf(buf, sizeof(priv->max_retry), + "0x%08x\n", priv->max_retry); } static ssize_t dcmf0_show(struct device *dev, @@ -408,6 +453,61 @@ static ssize_t dcmf3_show(struct device *dev, return sprintf(buf, "0x%08x\n", priv->dcmf_version.dcmf3); } +static ssize_t dcmf0_status_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct stratix10_rsu_priv *priv = dev_get_drvdata(dev); + + if (!priv) + return -ENODEV; + + if (priv->dcmf_status.dcmf0 == INVALID_DCMF_STATUS) + return -EIO; + + return sprintf(buf, "0x%08x\n", priv->dcmf_status.dcmf0); +} + +static ssize_t dcmf1_status_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct stratix10_rsu_priv *priv = dev_get_drvdata(dev); + + if (!priv) + return -ENODEV; + + if (priv->dcmf_status.dcmf1 == INVALID_DCMF_STATUS) + return -EIO; + + return sprintf(buf, "0x%08x\n", priv->dcmf_status.dcmf1); +} + +static ssize_t dcmf2_status_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct stratix10_rsu_priv *priv = dev_get_drvdata(dev); + + if (!priv) + return -ENODEV; + + if (priv->dcmf_status.dcmf2 == INVALID_DCMF_STATUS) + return -EIO; + + return sprintf(buf, "0x%08x\n", priv->dcmf_status.dcmf2); +} + +static ssize_t dcmf3_status_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct stratix10_rsu_priv *priv = dev_get_drvdata(dev); + + if (!priv) + return -ENODEV; + + if (priv->dcmf_status.dcmf3 == INVALID_DCMF_STATUS) + return -EIO; + + return sprintf(buf, "0x%08x\n", priv->dcmf_status.dcmf3); +} static ssize_t reboot_image_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -484,6 +584,10 @@ static DEVICE_ATTR_RO(dcmf0); static DEVICE_ATTR_RO(dcmf1); static DEVICE_ATTR_RO(dcmf2); static DEVICE_ATTR_RO(dcmf3); +static DEVICE_ATTR_RO(dcmf0_status); +static DEVICE_ATTR_RO(dcmf1_status); +static DEVICE_ATTR_RO(dcmf2_status); +static DEVICE_ATTR_RO(dcmf3_status); static DEVICE_ATTR_WO(reboot_image); static DEVICE_ATTR_WO(notify); @@ -500,6 +604,10 @@ static struct attribute *rsu_attrs[] = { &dev_attr_dcmf1.attr, &dev_attr_dcmf2.attr, &dev_attr_dcmf3.attr, + &dev_attr_dcmf0_status.attr, + &dev_attr_dcmf1_status.attr, + &dev_attr_dcmf2_status.attr, + &dev_attr_dcmf3_status.attr, &dev_attr_reboot_image.attr, &dev_attr_notify.attr, NULL @@ -532,6 +640,10 @@ static int stratix10_rsu_probe(struct platform_device *pdev) priv->dcmf_version.dcmf2 = INVALID_DCMF_VERSION; priv->dcmf_version.dcmf3 = INVALID_DCMF_VERSION; priv->max_retry = INVALID_RETRY_COUNTER; + priv->dcmf_status.dcmf0 = INVALID_DCMF_STATUS; + priv->dcmf_status.dcmf1 = INVALID_DCMF_STATUS; + priv->dcmf_status.dcmf2 = INVALID_DCMF_STATUS; + priv->dcmf_status.dcmf3 = INVALID_DCMF_STATUS; mutex_init(&priv->lock); priv->chan = stratix10_svc_request_channel_byname(&priv->client, @@ -561,6 +673,13 @@ static int stratix10_rsu_probe(struct platform_device *pdev) stratix10_svc_free_channel(priv->chan); } + ret = rsu_send_msg(priv, COMMAND_RSU_DCMF_STATUS, + 0, rsu_dcmf_status_callback); + if (ret) { + dev_err(dev, "Error, getting DCMF status %i\n", ret); + stratix10_svc_free_channel(priv->chan); + } + ret = rsu_send_msg(priv, COMMAND_RSU_RETRY, 0, rsu_retry_callback); if (ret) { dev_err(dev, "Error, getting RSU retry %i\n", ret); diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c index 29c0a616b317..b4081f4d88a3 100644 --- a/drivers/firmware/stratix10-svc.c +++ b/drivers/firmware/stratix10-svc.c @@ -34,12 +34,13 @@ * timeout is set to 30 seconds (30 * 1000) at Intel Stratix10 SoC. */ #define SVC_NUM_DATA_IN_FIFO 32 -#define SVC_NUM_CHANNEL 2 +#define SVC_NUM_CHANNEL 3 #define FPGA_CONFIG_DATA_CLAIM_TIMEOUT_MS 200 #define FPGA_CONFIG_STATUS_TIMEOUT_SEC 30 /* stratix10 service layer clients */ #define STRATIX10_RSU "stratix10-rsu" +#define INTEL_FCS "intel-fcs" typedef void (svc_invoke_fn)(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, @@ -53,6 +54,7 @@ struct stratix10_svc_chan; */ struct stratix10_svc { struct platform_device *stratix10_svc_rsu; + struct platform_device *intel_svc_fcs; }; /** @@ -97,8 +99,10 @@ struct stratix10_svc_data_mem { /** * struct stratix10_svc_data - service data structure * @chan: service channel - * @paddr: playload physical address - * @size: playload size + * @paddr: physical address of to be processed payload + * @size: to be processed playload size + * @paddr_output: physical address of processed payload + * @size_output: processed payload size * @command: service command requested by client * @flag: configuration type (full or partial) * @arg: args to be passed via registers and not physically mapped buffers @@ -109,6 +113,8 @@ struct stratix10_svc_data { struct stratix10_svc_chan *chan; phys_addr_t paddr; size_t size; + phys_addr_t paddr_output; + size_t size_output; u32 command; u32 flag; u64 arg[3]; @@ -246,6 +252,7 @@ static void svc_thread_cmd_config_status(struct stratix10_svc_controller *ctrl, { struct arm_smccc_res res; int count_in_sec; + unsigned long a0, a1, a2; cb_data->kaddr1 = NULL; cb_data->kaddr2 = NULL; @@ -254,24 +261,45 @@ static void svc_thread_cmd_config_status(struct stratix10_svc_controller *ctrl, pr_debug("%s: polling config status\n", __func__); + a0 = INTEL_SIP_SMC_FPGA_CONFIG_ISDONE; + a1 = (unsigned long)p_data->paddr; + a2 = (unsigned long)p_data->size; + + if (p_data->command == COMMAND_POLL_SERVICE_STATUS) + a0 = INTEL_SIP_SMC_SERVICE_COMPLETED; + count_in_sec = FPGA_CONFIG_STATUS_TIMEOUT_SEC; while (count_in_sec) { - ctrl->invoke_fn(INTEL_SIP_SMC_FPGA_CONFIG_ISDONE, - 0, 0, 0, 0, 0, 0, 0, &res); + ctrl->invoke_fn(a0, a1, a2, 0, 0, 0, 0, 0, &res); if ((res.a0 == INTEL_SIP_SMC_STATUS_OK) || - (res.a0 == INTEL_SIP_SMC_STATUS_ERROR)) + (res.a0 == INTEL_SIP_SMC_STATUS_ERROR) || + (res.a0 == INTEL_SIP_SMC_STATUS_REJECTED)) break; /* - * configuration is still in progress, wait one second then + * request is still in progress, wait one second then * poll again */ msleep(1000); count_in_sec--; } - if (res.a0 == INTEL_SIP_SMC_STATUS_OK && count_in_sec) + if (!count_in_sec) { + pr_err("%s: poll status timeout\n", __func__); + cb_data->status = BIT(SVC_STATUS_BUSY); + } else if (res.a0 == INTEL_SIP_SMC_STATUS_OK) { cb_data->status = BIT(SVC_STATUS_COMPLETED); + cb_data->kaddr2 = (res.a2) ? + svc_pa_to_va(res.a2) : NULL; + cb_data->kaddr3 = (res.a3) ? &res.a3 : NULL; + } else { + pr_err("%s: poll status error\n", __func__); + cb_data->kaddr1 = &res.a1; + cb_data->kaddr2 = (res.a2) ? + svc_pa_to_va(res.a2) : NULL; + cb_data->kaddr3 = (res.a3) ? &res.a3 : NULL; + cb_data->status = BIT(SVC_STATUS_ERROR); + } p_data->chan->scl->receive_cb(p_data->chan->scl, cb_data); } @@ -296,6 +324,10 @@ static void svc_thread_recv_status_ok(struct stratix10_svc_data *p_data, case COMMAND_RECONFIG: case COMMAND_RSU_UPDATE: case COMMAND_RSU_NOTIFY: + case COMMAND_FCS_REQUEST_SERVICE: + case COMMAND_FCS_SEND_CERTIFICATE: + case COMMAND_FCS_DATA_ENCRYPTION: + case COMMAND_FCS_DATA_DECRYPTION: cb_data->status = BIT(SVC_STATUS_OK); break; case COMMAND_RECONFIG_DATA_SUBMIT: @@ -306,14 +338,29 @@ static void svc_thread_recv_status_ok(struct stratix10_svc_data *p_data, break; case COMMAND_RSU_RETRY: case COMMAND_RSU_MAX_RETRY: + case COMMAND_RSU_DCMF_STATUS: + case COMMAND_FIRMWARE_VERSION: + cb_data->status = BIT(SVC_STATUS_OK); + cb_data->kaddr1 = &res.a1; + break; + case COMMAND_SMC_SVC_VERSION: cb_data->status = BIT(SVC_STATUS_OK); cb_data->kaddr1 = &res.a1; + cb_data->kaddr2 = &res.a2; break; case COMMAND_RSU_DCMF_VERSION: cb_data->status = BIT(SVC_STATUS_OK); cb_data->kaddr1 = &res.a1; cb_data->kaddr2 = &res.a2; break; + case COMMAND_FCS_RANDOM_NUMBER_GEN: + case COMMAND_FCS_GET_PROVISION_DATA: + case COMMAND_POLL_SERVICE_STATUS: + cb_data->status = BIT(SVC_STATUS_OK); + cb_data->kaddr1 = &res.a1; + cb_data->kaddr2 = svc_pa_to_va(res.a2); + cb_data->kaddr3 = &res.a3; + break; default: pr_warn("it shouldn't happen\n"); break; @@ -340,7 +387,7 @@ static int svc_normal_to_secure_thread(void *data) struct stratix10_svc_data *pdata; struct stratix10_svc_cb_data *cbdata; struct arm_smccc_res res; - unsigned long a0, a1, a2; + unsigned long a0, a1, a2, a3, a4, a5, a6, a7; int ret_fifo = 0; pdata = kmalloc(sizeof(*pdata), GFP_KERNEL); @@ -357,6 +404,11 @@ static int svc_normal_to_secure_thread(void *data) a0 = INTEL_SIP_SMC_FPGA_CONFIG_LOOPBACK; a1 = 0; a2 = 0; + a3 = 0; + a4 = 0; + a5 = 0; + a6 = 0; + a7 = 0; pr_debug("smc_hvc_shm_thread is running\n"); @@ -422,15 +474,79 @@ static int svc_normal_to_secure_thread(void *data) a1 = 0; a2 = 0; break; + case COMMAND_FIRMWARE_VERSION: + a0 = INTEL_SIP_SMC_FIRMWARE_VERSION; + a1 = 0; + a2 = 0; + break; + + /* for FCS */ + case COMMAND_FCS_DATA_ENCRYPTION: + a0 = INTEL_SIP_SMC_FCS_CRYPTION; + a1 = 1; + a2 = (unsigned long)pdata->paddr; + a3 = (unsigned long)pdata->size; + a4 = (unsigned long)pdata->paddr_output; + a5 = (unsigned long)pdata->size_output; + break; + case COMMAND_FCS_DATA_DECRYPTION: + a0 = INTEL_SIP_SMC_FCS_CRYPTION; + a1 = 0; + a2 = (unsigned long)pdata->paddr; + a3 = (unsigned long)pdata->size; + a4 = (unsigned long)pdata->paddr_output; + a5 = (unsigned long)pdata->size_output; + break; + case COMMAND_FCS_RANDOM_NUMBER_GEN: + a0 = INTEL_SIP_SMC_FCS_RANDOM_NUMBER; + a1 = (unsigned long)pdata->paddr; + a2 = 0; + break; + case COMMAND_FCS_REQUEST_SERVICE: + a0 = INTEL_SIP_SMC_FCS_SERVICE_REQUEST; + a1 = (unsigned long)pdata->paddr; + a2 = (unsigned long)pdata->size; + break; + case COMMAND_FCS_SEND_CERTIFICATE: + a0 = INTEL_SIP_SMC_FCS_SEND_CERTIFICATE; + a1 = (unsigned long)pdata->paddr; + a2 = (unsigned long)pdata->size; + break; + case COMMAND_FCS_GET_PROVISION_DATA: + a0 = INTEL_SIP_SMC_FCS_GET_PROVISION_DATA; + a1 = (unsigned long)pdata->paddr; + a2 = 0; + break; + + /* for polling */ + case COMMAND_POLL_SERVICE_STATUS: + a0 = INTEL_SIP_SMC_SERVICE_COMPLETED; + a1 = (unsigned long)pdata->paddr; + a2 = (unsigned long)pdata->size; + break; + case COMMAND_RSU_DCMF_STATUS: + a0 = INTEL_SIP_SMC_RSU_DCMF_STATUS; + a1 = 0; + a2 = 0; + break; + case COMMAND_SMC_SVC_VERSION: + a0 = INTEL_SIP_SMC_SVC_VERSION; + a1 = 0; + a2 = 0; + break; default: pr_warn("it shouldn't happen\n"); break; } pr_debug("%s: before SMC call -- a0=0x%016x a1=0x%016x", - __func__, (unsigned int)a0, (unsigned int)a1); + __func__, + (unsigned int)a0, + (unsigned int)a1); pr_debug(" a2=0x%016x\n", (unsigned int)a2); - - ctrl->invoke_fn(a0, a1, a2, 0, 0, 0, 0, 0, &res); + pr_debug(" a3=0x%016x\n", (unsigned int)a3); + pr_debug(" a4=0x%016x\n", (unsigned int)a4); + pr_debug(" a5=0x%016x\n", (unsigned int)a5); + ctrl->invoke_fn(a0, a1, a2, a3, a4, a5, a6, a7, &res); pr_debug("%s: after SMC call -- res.a0=0x%016x", __func__, (unsigned int)res.a0); @@ -462,6 +578,7 @@ static int svc_normal_to_secure_thread(void *data) pdata, cbdata); break; case COMMAND_RECONFIG_STATUS: + case COMMAND_POLL_SERVICE_STATUS: svc_thread_cmd_config_status(ctrl, pdata, cbdata); break; @@ -472,14 +589,31 @@ static int svc_normal_to_secure_thread(void *data) break; case INTEL_SIP_SMC_STATUS_REJECTED: pr_debug("%s: STATUS_REJECTED\n", __func__); + /* for FCS */ + switch (pdata->command) { + case COMMAND_FCS_REQUEST_SERVICE: + case COMMAND_FCS_SEND_CERTIFICATE: + case COMMAND_FCS_GET_PROVISION_DATA: + case COMMAND_FCS_DATA_ENCRYPTION: + case COMMAND_FCS_DATA_DECRYPTION: + case COMMAND_FCS_RANDOM_NUMBER_GEN: + cbdata->status = BIT(SVC_STATUS_INVALID_PARAM); + cbdata->kaddr1 = NULL; + cbdata->kaddr2 = NULL; + cbdata->kaddr3 = NULL; + pdata->chan->scl->receive_cb(pdata->chan->scl, + cbdata); + break; + } break; case INTEL_SIP_SMC_STATUS_ERROR: case INTEL_SIP_SMC_RSU_ERROR: pr_err("%s: STATUS_ERROR\n", __func__); cbdata->status = BIT(SVC_STATUS_ERROR); - cbdata->kaddr1 = NULL; - cbdata->kaddr2 = NULL; - cbdata->kaddr3 = NULL; + cbdata->kaddr1 = &res.a1; + cbdata->kaddr2 = (res.a2) ? + svc_pa_to_va(res.a2) : NULL; + cbdata->kaddr3 = (res.a3) ? &res.a3 : NULL; pdata->chan->scl->receive_cb(pdata->chan->scl, cbdata); break; default: @@ -487,11 +621,10 @@ static int svc_normal_to_secure_thread(void *data) /* * be compatible with older version firmware which - * doesn't support RSU notify or retry + * doesn't support newer RSU commands */ - if ((pdata->command == COMMAND_RSU_RETRY) || - (pdata->command == COMMAND_RSU_MAX_RETRY) || - (pdata->command == COMMAND_RSU_NOTIFY)) { + if ((pdata->command != COMMAND_RSU_UPDATE) && + (pdata->command != COMMAND_RSU_STATUS)) { cbdata->status = BIT(SVC_STATUS_NO_SUPPORT); cbdata->kaddr1 = NULL; @@ -845,8 +978,19 @@ int stratix10_svc_send(struct stratix10_svc_chan *chan, void *msg) list_for_each_entry(p_mem, &svc_data_mem, node) if (p_mem->vaddr == p_msg->payload) { p_data->paddr = p_mem->paddr; + p_data->size = p_msg->payload_length; break; } + if (p_msg->payload_output) { + list_for_each_entry(p_mem, &svc_data_mem, node) + if (p_mem->vaddr == p_msg->payload_output) { + p_data->paddr_output = + p_mem->paddr; + p_data->size_output = + p_msg->payload_length_output; + break; + } + } } p_data->command = p_msg->command; @@ -941,17 +1085,17 @@ EXPORT_SYMBOL_GPL(stratix10_svc_allocate_memory); void stratix10_svc_free_memory(struct stratix10_svc_chan *chan, void *kaddr) { struct stratix10_svc_data_mem *pmem; - size_t size = 0; list_for_each_entry(pmem, &svc_data_mem, node) if (pmem->vaddr == kaddr) { - size = pmem->size; - break; + gen_pool_free(chan->ctrl->genpool, + (unsigned long)kaddr, pmem->size); + pmem->vaddr = NULL; + list_del(&pmem->node); + return; } - gen_pool_free(chan->ctrl->genpool, (unsigned long)kaddr, size); - pmem->vaddr = NULL; - list_del(&pmem->node); + list_del(&svc_data_mem); } EXPORT_SYMBOL_GPL(stratix10_svc_free_memory); @@ -1029,6 +1173,11 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev) chans[1].name = SVC_CLIENT_RSU; spin_lock_init(&chans[1].lock); + chans[2].scl = NULL; + chans[2].ctrl = controller; + chans[2].name = SVC_CLIENT_FCS; + spin_lock_init(&chans[2].lock); + list_add_tail(&controller->node, &svc_ctrl); platform_set_drvdata(pdev, controller); @@ -1047,8 +1196,22 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev) } ret = platform_device_add(svc->stratix10_svc_rsu); - if (ret) - goto err_put_device; + if (ret) { + platform_device_put(svc->stratix10_svc_rsu); + return ret; + } + + svc->intel_svc_fcs = platform_device_alloc(INTEL_FCS, 1); + if (!svc->intel_svc_fcs) { + dev_err(dev, "failed to allocate %s device\n", INTEL_FCS); + return -ENOMEM; + } + + ret = platform_device_add(svc->intel_svc_fcs); + if (ret) { + platform_device_put(svc->intel_svc_fcs); + return ret; + } dev_set_drvdata(dev, svc); @@ -1056,8 +1219,6 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev) return 0; -err_put_device: - platform_device_put(svc->stratix10_svc_rsu); err_free_kfifo: kfifo_free(&controller->svc_fifo); return ret; @@ -1068,6 +1229,7 @@ static int stratix10_svc_drv_remove(struct platform_device *pdev) struct stratix10_svc *svc = dev_get_drvdata(&pdev->dev); struct stratix10_svc_controller *ctrl = platform_get_drvdata(pdev); + platform_device_unregister(svc->intel_svc_fcs); platform_device_unregister(svc->stratix10_svc_rsu); kfifo_free(&ctrl->svc_fifo); diff --git a/drivers/firmware/sysfb.c b/drivers/firmware/sysfb.c index 2bfbb05f7d89..3fd3563d962b 100644 --- a/drivers/firmware/sysfb.c +++ b/drivers/firmware/sysfb.c @@ -34,21 +34,59 @@ #include <linux/screen_info.h> #include <linux/sysfb.h> +static struct platform_device *pd; +static DEFINE_MUTEX(disable_lock); +static bool disabled; + +static bool sysfb_unregister(void) +{ + if (IS_ERR_OR_NULL(pd)) + return false; + + platform_device_unregister(pd); + pd = NULL; + + return true; +} + +/** + * sysfb_disable() - disable the Generic System Framebuffers support + * + * This disables the registration of system framebuffer devices that match the + * generic drivers that make use of the system framebuffer set up by firmware. + * + * It also unregisters a device if this was already registered by sysfb_init(). + * + * Context: The function can sleep. A @disable_lock mutex is acquired to serialize + * against sysfb_init(), that registers a system framebuffer device. + */ +void sysfb_disable(void) +{ + mutex_lock(&disable_lock); + sysfb_unregister(); + disabled = true; + mutex_unlock(&disable_lock); +} +EXPORT_SYMBOL_GPL(sysfb_disable); + static __init int sysfb_init(void) { struct screen_info *si = &screen_info; struct simplefb_platform_data mode; - struct platform_device *pd; const char *name; bool compatible; - int ret; + int ret = 0; + + mutex_lock(&disable_lock); + if (disabled) + goto unlock_mutex; /* try to create a simple-framebuffer device */ compatible = sysfb_parse_mode(si, &mode); if (compatible) { - ret = sysfb_create_simplefb(si, &mode); - if (!ret) - return 0; + pd = sysfb_create_simplefb(si, &mode); + if (!IS_ERR(pd)) + goto unlock_mutex; } /* if the FB is incompatible, create a legacy framebuffer device */ @@ -56,12 +94,18 @@ static __init int sysfb_init(void) name = "efi-framebuffer"; else if (si->orig_video_isVGA == VIDEO_TYPE_VLFB) name = "vesa-framebuffer"; + else if (si->orig_video_isVGA == VIDEO_TYPE_VGAC) + name = "vga-framebuffer"; + else if (si->orig_video_isVGA == VIDEO_TYPE_EGAC) + name = "ega-framebuffer"; else name = "platform-framebuffer"; pd = platform_device_alloc(name, 0); - if (!pd) - return -ENOMEM; + if (!pd) { + ret = -ENOMEM; + goto unlock_mutex; + } sysfb_apply_efi_quirks(pd); @@ -73,9 +117,11 @@ static __init int sysfb_init(void) if (ret) goto err; - return 0; + goto unlock_mutex; err: platform_device_put(pd); +unlock_mutex: + mutex_unlock(&disable_lock); return ret; } diff --git a/drivers/firmware/sysfb_simplefb.c b/drivers/firmware/sysfb_simplefb.c index b86761904949..a353e27f83f5 100644 --- a/drivers/firmware/sysfb_simplefb.c +++ b/drivers/firmware/sysfb_simplefb.c @@ -57,8 +57,8 @@ __init bool sysfb_parse_mode(const struct screen_info *si, return false; } -__init int sysfb_create_simplefb(const struct screen_info *si, - const struct simplefb_platform_data *mode) +__init struct platform_device *sysfb_create_simplefb(const struct screen_info *si, + const struct simplefb_platform_data *mode) { struct platform_device *pd; struct resource res; @@ -76,7 +76,7 @@ __init int sysfb_create_simplefb(const struct screen_info *si, base |= (u64)si->ext_lfb_base << 32; if (!base || (u64)(resource_size_t)base != base) { printk(KERN_DEBUG "sysfb: inaccessible VRAM base\n"); - return -EINVAL; + return ERR_PTR(-EINVAL); } /* @@ -93,32 +93,41 @@ __init int sysfb_create_simplefb(const struct screen_info *si, length = mode->height * mode->stride; if (length > size) { printk(KERN_WARNING "sysfb: VRAM smaller than advertised\n"); - return -EINVAL; + return ERR_PTR(-EINVAL); } length = PAGE_ALIGN(length); /* setup IORESOURCE_MEM as framebuffer memory */ memset(&res, 0, sizeof(res)); - res.flags = IORESOURCE_MEM | IORESOURCE_BUSY; + res.flags = IORESOURCE_MEM; res.name = simplefb_resname; res.start = base; res.end = res.start + length - 1; if (res.end <= res.start) - return -EINVAL; + return ERR_PTR(-EINVAL); pd = platform_device_alloc("simple-framebuffer", 0); if (!pd) - return -ENOMEM; + return ERR_PTR(-ENOMEM); sysfb_apply_efi_quirks(pd); ret = platform_device_add_resources(pd, &res, 1); if (ret) - return ret; + goto err_put_device; ret = platform_device_add_data(pd, mode, sizeof(*mode)); if (ret) - return ret; + goto err_put_device; - return platform_device_add(pd); + ret = platform_device_add(pd); + if (ret) + goto err_put_device; + + return pd; + +err_put_device: + platform_device_put(pd); + + return ERR_PTR(ret); } diff --git a/drivers/firmware/tegra/bpmp-debugfs.c b/drivers/firmware/tegra/bpmp-debugfs.c index 6d66fe03fb6a..9d3874cdaaee 100644 --- a/drivers/firmware/tegra/bpmp-debugfs.c +++ b/drivers/firmware/tegra/bpmp-debugfs.c @@ -77,13 +77,14 @@ static const char *get_filename(struct tegra_bpmp *bpmp, const char *root_path, *filename = NULL; char *root_path_buf; size_t root_len; + size_t root_path_buf_len = 512; - root_path_buf = kzalloc(512, GFP_KERNEL); + root_path_buf = kzalloc(root_path_buf_len, GFP_KERNEL); if (!root_path_buf) goto out; root_path = dentry_path(bpmp->debugfs_mirror, root_path_buf, - sizeof(root_path_buf)); + root_path_buf_len); if (IS_ERR(root_path)) goto out; @@ -376,18 +377,11 @@ static ssize_t bpmp_debug_store(struct file *file, const char __user *buf, if (!filename) return -ENOENT; - databuf = kmalloc(count, GFP_KERNEL); - if (!databuf) - return -ENOMEM; - - if (copy_from_user(databuf, buf, count)) { - err = -EFAULT; - goto free_ret; - } + databuf = memdup_user(buf, count); + if (IS_ERR(databuf)) + return PTR_ERR(databuf); err = mrq_debug_write(bpmp, filename, databuf, count); - -free_ret: kfree(databuf); return err ?: count; @@ -473,7 +467,7 @@ static int bpmp_populate_debugfs_inband(struct tegra_bpmp *bpmp, mode |= attrs & DEBUGFS_S_IWUSR ? 0200 : 0; dentry = debugfs_create_file(name, mode, parent, bpmp, &bpmp_debug_fops); - if (!dentry) { + if (IS_ERR(dentry)) { err = -ENOMEM; goto out; } @@ -724,7 +718,7 @@ static int bpmp_populate_dir(struct tegra_bpmp *bpmp, struct seqbuf *seqbuf, if (t & DEBUGFS_S_ISDIR) { dentry = debugfs_create_dir(name, parent); - if (!dentry) + if (IS_ERR(dentry)) return -ENOMEM; err = bpmp_populate_dir(bpmp, seqbuf, dentry, depth+1); if (err < 0) @@ -737,7 +731,7 @@ static int bpmp_populate_dir(struct tegra_bpmp *bpmp, struct seqbuf *seqbuf, dentry = debugfs_create_file(name, mode, parent, bpmp, &debugfs_fops); - if (!dentry) + if (IS_ERR(dentry)) return -ENOMEM; } } @@ -787,11 +781,11 @@ int tegra_bpmp_init_debugfs(struct tegra_bpmp *bpmp) return 0; root = debugfs_create_dir("bpmp", NULL); - if (!root) + if (IS_ERR(root)) return -ENOMEM; bpmp->debugfs_mirror = debugfs_create_dir("debug", root); - if (!bpmp->debugfs_mirror) { + if (IS_ERR(bpmp->debugfs_mirror)) { err = -ENOMEM; goto out; } diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c index 5654c5e9862b..037db21de510 100644 --- a/drivers/firmware/tegra/bpmp.c +++ b/drivers/firmware/tegra/bpmp.c @@ -201,7 +201,7 @@ static ssize_t __tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel, int err; if (data && size > 0) - memcpy(data, channel->ib->data, size); + memcpy_fromio(data, channel->ib->data, size); err = tegra_bpmp_ack_response(channel); if (err < 0) @@ -245,7 +245,7 @@ static ssize_t __tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel, channel->ob->flags = flags; if (data && size > 0) - memcpy(channel->ob->data, data, size); + memcpy_toio(channel->ob->data, data, size); return tegra_bpmp_post_request(channel); } @@ -420,7 +420,7 @@ void tegra_bpmp_mrq_return(struct tegra_bpmp_channel *channel, int code, channel->ob->code = code; if (data && size > 0) - memcpy(channel->ob->data, data, size); + memcpy_toio(channel->ob->data, data, size); err = tegra_bpmp_post_response(channel); if (WARN_ON(err < 0)) diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c index 235c7e7869aa..ebc32bbd9b83 100644 --- a/drivers/firmware/ti_sci.c +++ b/drivers/firmware/ti_sci.c @@ -2,7 +2,7 @@ /* * Texas Instruments System Control Interface Protocol Driver * - * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/ + * Copyright (C) 2015-2022 Texas Instruments Incorporated - https://www.ti.com/ * Nishanth Menon */ @@ -12,6 +12,7 @@ #include <linux/debugfs.h> #include <linux/export.h> #include <linux/io.h> +#include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/mailbox_client.h> #include <linux/module.h> @@ -96,6 +97,7 @@ struct ti_sci_desc { * @node: list head * @host_id: Host ID * @users: Number of users of this instance + * @is_suspending: Flag set to indicate in suspend path. */ struct ti_sci_info { struct device *dev; @@ -114,7 +116,7 @@ struct ti_sci_info { u8 host_id; /* protected by ti_sci_list_mutex */ int users; - + bool is_suspending; }; #define cl_to_ti_sci_info(c) container_of(c, struct ti_sci_info, cl) @@ -349,6 +351,8 @@ static struct ti_sci_xfer *ti_sci_get_one_xfer(struct ti_sci_info *info, hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf; xfer->tx_message.len = tx_message_size; + xfer->tx_message.chan_rx = info->chan_rx; + xfer->tx_message.timeout_rx_ms = info->desc->max_rx_timeout_ms; xfer->rx_len = (u8)rx_message_size; reinit_completion(&xfer->done); @@ -406,6 +410,7 @@ static inline int ti_sci_do_xfer(struct ti_sci_info *info, int ret; int timeout; struct device *dev = info->dev; + bool done_state = true; ret = mbox_send_message(info->chan_tx, &xfer->tx_message); if (ret < 0) @@ -413,13 +418,27 @@ static inline int ti_sci_do_xfer(struct ti_sci_info *info, ret = 0; - /* And we wait for the response. */ - timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms); - if (!wait_for_completion_timeout(&xfer->done, timeout)) { + if (!info->is_suspending) { + /* And we wait for the response. */ + timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms); + if (!wait_for_completion_timeout(&xfer->done, timeout)) + ret = -ETIMEDOUT; + } else { + /* + * If we are suspending, we cannot use wait_for_completion_timeout + * during noirq phase, so we must manually poll the completion. + */ + ret = read_poll_timeout_atomic(try_wait_for_completion, done_state, + true, 1, + info->desc->max_rx_timeout_ms * 1000, + false, &xfer->done); + } + + if (ret == -ETIMEDOUT || !done_state) { dev_err(dev, "Mbox timedout in resp(caller: %pS)\n", (void *)_RET_IP_); - ret = -ETIMEDOUT; } + /* * NOTE: we might prefer not to need the mailbox ticker to manage the * transfer queueing since the protocol layer queues things by itself. @@ -1759,7 +1778,7 @@ static int ti_sci_get_resource_range(const struct ti_sci_handle *handle, desc->num = resp->range_num; desc->start_sec = resp->range_start_sec; desc->num_sec = resp->range_num_sec; - }; + } fail: ti_sci_put_one_xfer(&info->minfo, xfer); @@ -3264,6 +3283,35 @@ static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode, return NOTIFY_BAD; } +static void ti_sci_set_is_suspending(struct ti_sci_info *info, bool is_suspending) +{ + info->is_suspending = is_suspending; +} + +static int ti_sci_suspend(struct device *dev) +{ + struct ti_sci_info *info = dev_get_drvdata(dev); + /* + * We must switch operation to polled mode now as drivers and the genpd + * layer may make late TI SCI calls to change clock and device states + * from the noirq phase of suspend. + */ + ti_sci_set_is_suspending(info, true); + + return 0; +} + +static int ti_sci_resume(struct device *dev) +{ + struct ti_sci_info *info = dev_get_drvdata(dev); + + ti_sci_set_is_suspending(info, false); + + return 0; +} + +static DEFINE_SIMPLE_DEV_PM_OPS(ti_sci_pm_ops, ti_sci_suspend, ti_sci_resume); + /* Description for K2G */ static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = { .default_host_id = 2, @@ -3412,7 +3460,7 @@ static int ti_sci_probe(struct platform_device *pdev) ret = register_restart_handler(&info->nb); if (ret) { dev_err(dev, "reboot registration fail(%d)\n", ret); - return ret; + goto out; } } @@ -3472,6 +3520,7 @@ static struct platform_driver ti_sci_driver = { .driver = { .name = "ti-sci", .of_match_table = of_match_ptr(ti_sci_of_match), + .pm = &ti_sci_pm_ops, }, }; module_platform_driver(ti_sci_driver); diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c index 3dd45a7420dc..ff5cabe70a2b 100644 --- a/drivers/firmware/xilinx/zynqmp.c +++ b/drivers/firmware/xilinx/zynqmp.c @@ -2,7 +2,7 @@ /* * Xilinx Zynq MPSoC Firmware layer * - * Copyright (C) 2014-2021 Xilinx, Inc. + * Copyright (C) 2014-2022 Xilinx, Inc. * * Michal Simek <michal.simek@xilinx.com> * Davorin Mista <davorin.mista@aggios.com> @@ -23,6 +23,7 @@ #include <linux/hashtable.h> #include <linux/firmware/xlnx-zynqmp.h> +#include <linux/firmware/xlnx-event-manager.h> #include "zynqmp-debug.h" /* Max HashMap Order for PM API feature check (1<<7 = 128) */ @@ -35,8 +36,28 @@ /* BOOT_PIN_CTRL_MASK- out_val[11:8], out_en[3:0] */ #define CRL_APB_BOOTPIN_CTRL_MASK 0xF0FU +/* IOCTL/QUERY feature payload size */ +#define FEATURE_PAYLOAD_SIZE 2 + +/* Firmware feature check version mask */ +#define FIRMWARE_VERSION_MASK GENMASK(15, 0) + static bool feature_check_enabled; static DEFINE_HASHTABLE(pm_api_features_map, PM_API_FEATURE_CHECK_MAX_ORDER); +static u32 ioctl_features[FEATURE_PAYLOAD_SIZE]; +static u32 query_features[FEATURE_PAYLOAD_SIZE]; + +static struct platform_device *em_dev; + +/** + * struct zynqmp_devinfo - Structure for Zynqmp device instance + * @dev: Device Pointer + * @feature_conf_id: Feature conf id + */ +struct zynqmp_devinfo { + struct device *dev; + u32 feature_conf_id; +}; /** * struct pm_api_feature_data - PM API Feature data @@ -154,21 +175,28 @@ static noinline int do_fw_call_hvc(u64 arg0, u64 arg1, u64 arg2, return zynqmp_pm_ret_code((enum pm_ret_status)res.a0); } -/** - * zynqmp_pm_feature() - Check weather given feature is supported or not - * @api_id: API ID to check - * - * Return: Returns status, either success or error+reason - */ -static int zynqmp_pm_feature(u32 api_id) +static int __do_feature_check_call(const u32 api_id, u32 *ret_payload) { int ret; - u32 ret_payload[PAYLOAD_ARG_CNT]; u64 smc_arg[2]; - struct pm_api_feature_data *feature_data; - if (!feature_check_enabled) - return 0; + smc_arg[0] = PM_SIP_SVC | PM_FEATURE_CHECK; + smc_arg[1] = api_id; + + ret = do_fw_call(smc_arg[0], smc_arg[1], 0, ret_payload); + if (ret) + ret = -EOPNOTSUPP; + else + ret = ret_payload[1]; + + return ret; +} + +static int do_feature_check_call(const u32 api_id) +{ + int ret; + u32 ret_payload[PAYLOAD_ARG_CNT]; + struct pm_api_feature_data *feature_data; /* Check for existing entry in hash table for given api */ hash_for_each_possible(pm_api_features_map, feature_data, hentry, @@ -183,22 +211,86 @@ static int zynqmp_pm_feature(u32 api_id) return -ENOMEM; feature_data->pm_api_id = api_id; - smc_arg[0] = PM_SIP_SVC | PM_FEATURE_CHECK; - smc_arg[1] = api_id; - - ret = do_fw_call(smc_arg[0], smc_arg[1], 0, ret_payload); - if (ret) - ret = -EOPNOTSUPP; - else - ret = ret_payload[1]; + ret = __do_feature_check_call(api_id, ret_payload); feature_data->feature_status = ret; hash_add(pm_api_features_map, &feature_data->hentry, api_id); + if (api_id == PM_IOCTL) + /* Store supported IOCTL IDs mask */ + memcpy(ioctl_features, &ret_payload[2], FEATURE_PAYLOAD_SIZE * 4); + else if (api_id == PM_QUERY_DATA) + /* Store supported QUERY IDs mask */ + memcpy(query_features, &ret_payload[2], FEATURE_PAYLOAD_SIZE * 4); + + return ret; +} +EXPORT_SYMBOL_GPL(zynqmp_pm_feature); + +/** + * zynqmp_pm_feature() - Check whether given feature is supported or not and + * store supported IOCTL/QUERY ID mask + * @api_id: API ID to check + * + * Return: Returns status, either success or error+reason + */ +int zynqmp_pm_feature(const u32 api_id) +{ + int ret; + + if (!feature_check_enabled) + return 0; + + ret = do_feature_check_call(api_id); + return ret; } /** + * zynqmp_pm_is_function_supported() - Check whether given IOCTL/QUERY function + * is supported or not + * @api_id: PM_IOCTL or PM_QUERY_DATA + * @id: IOCTL or QUERY function IDs + * + * Return: Returns status, either success or error+reason + */ +int zynqmp_pm_is_function_supported(const u32 api_id, const u32 id) +{ + int ret; + u32 *bit_mask; + + /* Input arguments validation */ + if (id >= 64 || (api_id != PM_IOCTL && api_id != PM_QUERY_DATA)) + return -EINVAL; + + /* Check feature check API version */ + ret = do_feature_check_call(PM_FEATURE_CHECK); + if (ret < 0) + return ret; + + /* Check if feature check version 2 is supported or not */ + if ((ret & FIRMWARE_VERSION_MASK) == PM_API_VERSION_2) { + /* + * Call feature check for IOCTL/QUERY API to get IOCTL ID or + * QUERY ID feature status. + */ + ret = do_feature_check_call(api_id); + if (ret < 0) + return ret; + + bit_mask = (api_id == PM_IOCTL) ? ioctl_features : query_features; + + if ((bit_mask[(id / 32)] & BIT((id % 32))) == 0U) + return -EOPNOTSUPP; + } else { + return -ENODATA; + } + + return 0; +} +EXPORT_SYMBOL_GPL(zynqmp_pm_is_function_supported); + +/** * zynqmp_pm_invoke_fn() - Invoke the system-level platform management layer * caller function depending on the configuration * @pm_api_id: Requested PM-API call @@ -248,6 +340,20 @@ int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 arg0, u32 arg1, static u32 pm_api_version; static u32 pm_tz_version; +int zynqmp_pm_register_sgi(u32 sgi_num, u32 reset) +{ + int ret; + + ret = zynqmp_pm_invoke_fn(TF_A_PM_REGISTER_SGI, sgi_num, reset, 0, 0, + NULL); + if (!ret) + return ret; + + /* try old implementation as fallback strategy if above fails */ + return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_REGISTER_SGI, sgi_num, + reset, NULL); +} + /** * zynqmp_pm_get_api_version() - Get version number of PMU PM firmware * @version: Returned version value @@ -1117,6 +1223,55 @@ int zynqmp_pm_aes_engine(const u64 address, u32 *out) EXPORT_SYMBOL_GPL(zynqmp_pm_aes_engine); /** + * zynqmp_pm_sha_hash - Access the SHA engine to calculate the hash + * @address: Address of the data/ Address of output buffer where + * hash should be stored. + * @size: Size of the data. + * @flags: + * BIT(0) - for initializing csudma driver and SHA3(Here address + * and size inputs can be NULL). + * BIT(1) - to call Sha3_Update API which can be called multiple + * times when data is not contiguous. + * BIT(2) - to get final hash of the whole updated data. + * Hash will be overwritten at provided address with + * 48 bytes. + * + * Return: Returns status, either success or error code. + */ +int zynqmp_pm_sha_hash(const u64 address, const u32 size, const u32 flags) +{ + u32 lower_addr = lower_32_bits(address); + u32 upper_addr = upper_32_bits(address); + + return zynqmp_pm_invoke_fn(PM_SECURE_SHA, upper_addr, lower_addr, + size, flags, NULL); +} +EXPORT_SYMBOL_GPL(zynqmp_pm_sha_hash); + +/** + * zynqmp_pm_register_notifier() - PM API for register a subsystem + * to be notified about specific + * event/error. + * @node: Node ID to which the event is related. + * @event: Event Mask of Error events for which wants to get notified. + * @wake: Wake subsystem upon capturing the event if value 1 + * @enable: Enable the registration for value 1, disable for value 0 + * + * This function is used to register/un-register for particular node-event + * combination in firmware. + * + * Return: Returns status, either success or error+reason + */ + +int zynqmp_pm_register_notifier(const u32 node, const u32 event, + const u32 wake, const u32 enable) +{ + return zynqmp_pm_invoke_fn(PM_REGISTER_NOTIFIER, node, event, + wake, enable, NULL); +} +EXPORT_SYMBOL_GPL(zynqmp_pm_register_notifier); + +/** * zynqmp_pm_system_shutdown - PM call to request a system shutdown or restart * @type: Shutdown or restart? 0 for shutdown, 1 for restart * @subtype: Specifies which system should be restarted or shut down @@ -1130,6 +1285,64 @@ int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype) } /** + * zynqmp_pm_set_feature_config - PM call to request IOCTL for feature config + * @id: The config ID of the feature to be configured + * @value: The config value of the feature to be configured + * + * Return: Returns 0 on success or error value on failure. + */ +int zynqmp_pm_set_feature_config(enum pm_feature_config_id id, u32 value) +{ + return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_SET_FEATURE_CONFIG, + id, value, NULL); +} + +/** + * zynqmp_pm_get_feature_config - PM call to get value of configured feature + * @id: The config id of the feature to be queried + * @payload: Returned value array + * + * Return: Returns 0 on success or error value on failure. + */ +int zynqmp_pm_get_feature_config(enum pm_feature_config_id id, + u32 *payload) +{ + return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_GET_FEATURE_CONFIG, + id, 0, payload); +} + +/** + * zynqmp_pm_set_sd_config - PM call to set value of SD config registers + * @node: SD node ID + * @config: The config type of SD registers + * @value: Value to be set + * + * Return: Returns 0 on success or error value on failure. + */ +int zynqmp_pm_set_sd_config(u32 node, enum pm_sd_config_type config, u32 value) +{ + return zynqmp_pm_invoke_fn(PM_IOCTL, node, IOCTL_SET_SD_CONFIG, + config, value, NULL); +} +EXPORT_SYMBOL_GPL(zynqmp_pm_set_sd_config); + +/** + * zynqmp_pm_set_gem_config - PM call to set value of GEM config registers + * @node: GEM node ID + * @config: The config type of GEM registers + * @value: Value to be set + * + * Return: Returns 0 on success or error value on failure. + */ +int zynqmp_pm_set_gem_config(u32 node, enum pm_gem_config_type config, + u32 value) +{ + return zynqmp_pm_invoke_fn(PM_IOCTL, node, IOCTL_SET_GEM_CONFIG, + config, value, NULL); +} +EXPORT_SYMBOL_GPL(zynqmp_pm_set_gem_config); + +/** * struct zynqmp_pm_shutdown_scope - Struct for shutdown scope * @subtype: Shutdown subtype * @name: Matching string for scope argument @@ -1397,6 +1610,78 @@ static DEVICE_ATTR_RW(pggs1); static DEVICE_ATTR_RW(pggs2); static DEVICE_ATTR_RW(pggs3); +static ssize_t feature_config_id_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct zynqmp_devinfo *devinfo = dev_get_drvdata(device); + + return sysfs_emit(buf, "%d\n", devinfo->feature_conf_id); +} + +static ssize_t feature_config_id_store(struct device *device, + struct device_attribute *attr, + const char *buf, size_t count) +{ + u32 config_id; + int ret; + struct zynqmp_devinfo *devinfo = dev_get_drvdata(device); + + if (!buf) + return -EINVAL; + + ret = kstrtou32(buf, 10, &config_id); + if (ret) + return ret; + + devinfo->feature_conf_id = config_id; + + return count; +} + +static DEVICE_ATTR_RW(feature_config_id); + +static ssize_t feature_config_value_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + int ret; + u32 ret_payload[PAYLOAD_ARG_CNT]; + struct zynqmp_devinfo *devinfo = dev_get_drvdata(device); + + ret = zynqmp_pm_get_feature_config(devinfo->feature_conf_id, + ret_payload); + if (ret) + return ret; + + return sysfs_emit(buf, "%d\n", ret_payload[1]); +} + +static ssize_t feature_config_value_store(struct device *device, + struct device_attribute *attr, + const char *buf, size_t count) +{ + u32 value; + int ret; + struct zynqmp_devinfo *devinfo = dev_get_drvdata(device); + + if (!buf) + return -EINVAL; + + ret = kstrtou32(buf, 10, &value); + if (ret) + return ret; + + ret = zynqmp_pm_set_feature_config(devinfo->feature_conf_id, + value); + if (ret) + return ret; + + return count; +} + +static DEVICE_ATTR_RW(feature_config_value); + static struct attribute *zynqmp_firmware_attrs[] = { &dev_attr_ggs0.attr, &dev_attr_ggs1.attr, @@ -1408,6 +1693,8 @@ static struct attribute *zynqmp_firmware_attrs[] = { &dev_attr_pggs3.attr, &dev_attr_shutdown_scope.attr, &dev_attr_health_status.attr, + &dev_attr_feature_config_id.attr, + &dev_attr_feature_config_value.attr, NULL, }; @@ -1417,8 +1704,13 @@ static int zynqmp_firmware_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *np; + struct zynqmp_devinfo *devinfo; int ret; + ret = get_set_conduit_method(dev->of_node); + if (ret) + return ret; + np = of_find_compatible_node(NULL, NULL, "xlnx,zynqmp"); if (!np) { np = of_find_compatible_node(NULL, NULL, "xlnx,versal"); @@ -1427,14 +1719,28 @@ static int zynqmp_firmware_probe(struct platform_device *pdev) feature_check_enabled = true; } + + if (!feature_check_enabled) { + ret = do_feature_check_call(PM_FEATURE_CHECK); + if (ret >= 0) + feature_check_enabled = true; + } + of_node_put(np); - ret = get_set_conduit_method(dev->of_node); + devinfo = devm_kzalloc(dev, sizeof(*devinfo), GFP_KERNEL); + if (!devinfo) + return -ENOMEM; + + devinfo->dev = dev; + + platform_set_drvdata(pdev, devinfo); + + /* Check PM API version number */ + ret = zynqmp_pm_get_api_version(&pm_api_version); if (ret) return ret; - /* Check PM API version number */ - zynqmp_pm_get_api_version(&pm_api_version); if (pm_api_version < ZYNQMP_PM_VERSION) { panic("%s Platform Management API version error. Expected: v%d.%d - Found: v%d.%d\n", __func__, @@ -1468,6 +1774,15 @@ static int zynqmp_firmware_probe(struct platform_device *pdev) zynqmp_pm_api_debugfs_init(); + np = of_find_compatible_node(NULL, NULL, "xlnx,versal"); + if (np) { + em_dev = platform_device_register_data(&pdev->dev, "xlnx_event_manager", + -1, NULL, 0); + if (IS_ERR(em_dev)) + dev_err_probe(&pdev->dev, PTR_ERR(em_dev), "EM register fail with error\n"); + } + of_node_put(np); + return of_platform_populate(dev->of_node, NULL, NULL, dev); } @@ -1485,6 +1800,8 @@ static int zynqmp_firmware_remove(struct platform_device *pdev) kfree(feature_data); } + platform_device_unregister(em_dev); + return 0; } |