aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/soc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/soc')
-rw-r--r--drivers/soc/Makefile4
-rw-r--r--drivers/soc/apple/Kconfig24
-rw-r--r--drivers/soc/apple/Makefile6
-rw-r--r--drivers/soc/apple/rtkit-crashlog.c154
-rw-r--r--drivers/soc/apple/rtkit-internal.h62
-rw-r--r--drivers/soc/apple/rtkit.c958
-rw-r--r--drivers/soc/apple/sart.c328
-rw-r--r--drivers/soc/bcm/bcm63xx/bcm-pmb.c3
-rw-r--r--drivers/soc/imx/Makefile1
-rw-r--r--drivers/soc/imx/gpcv2.c430
-rw-r--r--drivers/soc/imx/imx8m-blk-ctrl.c126
-rw-r--r--drivers/soc/imx/imx8mp-blk-ctrl.c696
-rw-r--r--drivers/soc/mediatek/mt8167-mmsys.h2
-rw-r--r--drivers/soc/mediatek/mt8183-mmsys.h2
-rw-r--r--drivers/soc/mediatek/mt8186-mmsys.h4
-rw-r--r--drivers/soc/mediatek/mt8192-mmsys.h4
-rw-r--r--drivers/soc/mediatek/mt8195-mmsys.h370
-rw-r--r--drivers/soc/mediatek/mt8365-mmsys.h4
-rw-r--r--drivers/soc/mediatek/mtk-cmdq-helper.c25
-rw-r--r--drivers/soc/mediatek/mtk-mmsys.c153
-rw-r--r--drivers/soc/mediatek/mtk-mmsys.h6
-rw-r--r--drivers/soc/mediatek/mtk-mutex.c92
-rw-r--r--drivers/soc/qcom/llcc-qcom.c61
-rw-r--r--drivers/soc/qcom/pdr_interface.c11
-rw-r--r--drivers/soc/qcom/pdr_internal.h20
-rw-r--r--drivers/soc/qcom/rpmhpd.c73
-rw-r--r--drivers/soc/qcom/smem.c305
-rw-r--r--drivers/soc/qcom/smp2p.c1
-rw-r--r--drivers/soc/qcom/smsm.c1
-rw-r--r--drivers/soc/qcom/socinfo.c26
-rw-r--r--drivers/soc/renesas/Kconfig26
-rw-r--r--drivers/soc/renesas/Makefile1
-rw-r--r--drivers/soc/renesas/r8a779g0-sysc.c62
-rw-r--r--drivers/soc/renesas/rcar-gen4-sysc.c3
-rw-r--r--drivers/soc/renesas/rcar-gen4-sysc.h1
-rw-r--r--drivers/soc/renesas/rcar-rst.c1
-rw-r--r--drivers/soc/renesas/renesas-soc.c23
-rw-r--r--drivers/soc/rockchip/Kconfig24
-rw-r--r--drivers/soc/rockchip/grf.c17
-rw-r--r--drivers/soc/rockchip/pm_domains.c128
-rw-r--r--drivers/soc/tegra/Kconfig1
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra.c8
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra30.c17
-rw-r--r--drivers/soc/tegra/pmc.c122
-rw-r--r--drivers/soc/ti/knav_dma.c29
-rw-r--r--drivers/soc/ti/knav_qmss_queue.c21
-rw-r--r--drivers/soc/ti/omap_prm.c7
-rw-r--r--drivers/soc/ti/pm33xx.c6
-rw-r--r--drivers/soc/ti/pruss.c3
-rw-r--r--drivers/soc/ti/ti_sci_pm_domains.c2
-rw-r--r--drivers/soc/ti/wkup_m3_ipc.c210
51 files changed, 4393 insertions, 271 deletions
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index fd7717d597fc..919716e0e700 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -4,7 +4,7 @@
#
obj-$(CONFIG_ARCH_ACTIONS) += actions/
-obj-$(CONFIG_ARCH_APPLE) += apple/
+obj-y += apple/
obj-y += aspeed/
obj-$(CONFIG_ARCH_AT91) += atmel/
obj-y += bcm/
@@ -23,7 +23,7 @@ obj-y += pxa/
obj-y += amlogic/
obj-y += qcom/
obj-y += renesas/
-obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/
+obj-y += rockchip/
obj-$(CONFIG_SOC_SAMSUNG) += samsung/
obj-$(CONFIG_SOC_SIFIVE) += sifive/
obj-y += sunxi/
diff --git a/drivers/soc/apple/Kconfig b/drivers/soc/apple/Kconfig
index 9b8de31d6a8f..a1596fefacff 100644
--- a/drivers/soc/apple/Kconfig
+++ b/drivers/soc/apple/Kconfig
@@ -17,6 +17,30 @@ config APPLE_PMGR_PWRSTATE
controls for SoC devices. This driver manages them through the
generic power domain framework, and also provides reset support.
+config APPLE_RTKIT
+ tristate "Apple RTKit co-processor IPC protocol"
+ depends on MAILBOX
+ depends on ARCH_APPLE || COMPILE_TEST
+ default ARCH_APPLE
+ help
+ Apple SoCs such as the M1 come with various co-processors running
+ their proprietary RTKit operating system. This option enables support
+ for the protocol library used to communicate with those. It is used
+ by various client drivers.
+
+ Say 'y' here if you have an Apple SoC.
+
+config APPLE_SART
+ tristate "Apple SART DMA address filter"
+ depends on ARCH_APPLE || COMPILE_TEST
+ default ARCH_APPLE
+ help
+ Apple SART is a simple DMA address filter used on Apple SoCs such
+ as the M1. It is usually required for the NVMe coprocessor which does
+ not use a proper IOMMU.
+
+ Say 'y' here if you have an Apple SoC.
+
endmenu
endif
diff --git a/drivers/soc/apple/Makefile b/drivers/soc/apple/Makefile
index c114e84667e4..e293770cf66d 100644
--- a/drivers/soc/apple/Makefile
+++ b/drivers/soc/apple/Makefile
@@ -1,2 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_APPLE_PMGR_PWRSTATE) += apple-pmgr-pwrstate.o
+
+obj-$(CONFIG_APPLE_RTKIT) += apple-rtkit.o
+apple-rtkit-y = rtkit.o rtkit-crashlog.o
+
+obj-$(CONFIG_APPLE_SART) += apple-sart.o
+apple-sart-y = sart.o
diff --git a/drivers/soc/apple/rtkit-crashlog.c b/drivers/soc/apple/rtkit-crashlog.c
new file mode 100644
index 000000000000..732deed64660
--- /dev/null
+++ b/drivers/soc/apple/rtkit-crashlog.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple RTKit IPC library
+ * Copyright (C) The Asahi Linux Contributors
+ */
+#include "rtkit-internal.h"
+
+#define FOURCC(a, b, c, d) \
+ (((u32)(a) << 24) | ((u32)(b) << 16) | ((u32)(c) << 8) | ((u32)(d)))
+
+#define APPLE_RTKIT_CRASHLOG_HEADER FOURCC('C', 'L', 'H', 'E')
+#define APPLE_RTKIT_CRASHLOG_STR FOURCC('C', 's', 't', 'r')
+#define APPLE_RTKIT_CRASHLOG_VERSION FOURCC('C', 'v', 'e', 'r')
+#define APPLE_RTKIT_CRASHLOG_MBOX FOURCC('C', 'm', 'b', 'x')
+#define APPLE_RTKIT_CRASHLOG_TIME FOURCC('C', 't', 'i', 'm')
+
+struct apple_rtkit_crashlog_header {
+ u32 fourcc;
+ u32 version;
+ u32 size;
+ u32 flags;
+ u8 _unk[16];
+};
+static_assert(sizeof(struct apple_rtkit_crashlog_header) == 0x20);
+
+struct apple_rtkit_crashlog_mbox_entry {
+ u64 msg0;
+ u64 msg1;
+ u32 timestamp;
+ u8 _unk[4];
+};
+static_assert(sizeof(struct apple_rtkit_crashlog_mbox_entry) == 0x18);
+
+static void apple_rtkit_crashlog_dump_str(struct apple_rtkit *rtk, u8 *bfr,
+ size_t size)
+{
+ u32 idx;
+ u8 *ptr, *end;
+
+ memcpy(&idx, bfr, 4);
+
+ ptr = bfr + 4;
+ end = bfr + size;
+ while (ptr < end) {
+ u8 *newline = memchr(ptr, '\n', end - ptr);
+
+ if (newline) {
+ u8 tmp = *newline;
+ *newline = '\0';
+ dev_warn(rtk->dev, "RTKit: Message (id=%x): %s\n", idx,
+ ptr);
+ *newline = tmp;
+ ptr = newline + 1;
+ } else {
+ dev_warn(rtk->dev, "RTKit: Message (id=%x): %s", idx,
+ ptr);
+ break;
+ }
+ }
+}
+
+static void apple_rtkit_crashlog_dump_version(struct apple_rtkit *rtk, u8 *bfr,
+ size_t size)
+{
+ dev_warn(rtk->dev, "RTKit: Version: %s", bfr + 16);
+}
+
+static void apple_rtkit_crashlog_dump_time(struct apple_rtkit *rtk, u8 *bfr,
+ size_t size)
+{
+ u64 crash_time;
+
+ memcpy(&crash_time, bfr, 8);
+ dev_warn(rtk->dev, "RTKit: Crash time: %lld", crash_time);
+}
+
+static void apple_rtkit_crashlog_dump_mailbox(struct apple_rtkit *rtk, u8 *bfr,
+ size_t size)
+{
+ u32 type, index, i;
+ size_t n_messages;
+ struct apple_rtkit_crashlog_mbox_entry entry;
+
+ memcpy(&type, bfr + 16, 4);
+ memcpy(&index, bfr + 24, 4);
+ n_messages = (size - 28) / sizeof(entry);
+
+ dev_warn(rtk->dev, "RTKit: Mailbox history (type = %d, index = %d)",
+ type, index);
+ for (i = 0; i < n_messages; ++i) {
+ memcpy(&entry, bfr + 28 + i * sizeof(entry), sizeof(entry));
+ dev_warn(rtk->dev, "RTKit: #%03d@%08x: %016llx %016llx", i,
+ entry.timestamp, entry.msg0, entry.msg1);
+ }
+}
+
+void apple_rtkit_crashlog_dump(struct apple_rtkit *rtk, u8 *bfr, size_t size)
+{
+ size_t offset;
+ u32 section_fourcc, section_size;
+ struct apple_rtkit_crashlog_header header;
+
+ memcpy(&header, bfr, sizeof(header));
+ if (header.fourcc != APPLE_RTKIT_CRASHLOG_HEADER) {
+ dev_warn(rtk->dev, "RTKit: Expected crashlog header but got %x",
+ header.fourcc);
+ return;
+ }
+
+ if (header.size > size) {
+ dev_warn(rtk->dev, "RTKit: Crashlog size (%x) is too large",
+ header.size);
+ return;
+ }
+
+ size = header.size;
+ offset = sizeof(header);
+
+ while (offset < size) {
+ memcpy(&section_fourcc, bfr + offset, 4);
+ memcpy(&section_size, bfr + offset + 12, 4);
+
+ switch (section_fourcc) {
+ case APPLE_RTKIT_CRASHLOG_HEADER:
+ dev_dbg(rtk->dev, "RTKit: End of crashlog reached");
+ return;
+ case APPLE_RTKIT_CRASHLOG_STR:
+ apple_rtkit_crashlog_dump_str(rtk, bfr + offset + 16,
+ section_size);
+ break;
+ case APPLE_RTKIT_CRASHLOG_VERSION:
+ apple_rtkit_crashlog_dump_version(
+ rtk, bfr + offset + 16, section_size);
+ break;
+ case APPLE_RTKIT_CRASHLOG_MBOX:
+ apple_rtkit_crashlog_dump_mailbox(
+ rtk, bfr + offset + 16, section_size);
+ break;
+ case APPLE_RTKIT_CRASHLOG_TIME:
+ apple_rtkit_crashlog_dump_time(rtk, bfr + offset + 16,
+ section_size);
+ break;
+ default:
+ dev_warn(rtk->dev,
+ "RTKit: Unknown crashlog section: %x",
+ section_fourcc);
+ }
+
+ offset += section_size;
+ }
+
+ dev_warn(rtk->dev,
+ "RTKit: End of crashlog reached but no footer present");
+}
diff --git a/drivers/soc/apple/rtkit-internal.h b/drivers/soc/apple/rtkit-internal.h
new file mode 100644
index 000000000000..24bd619ec5e4
--- /dev/null
+++ b/drivers/soc/apple/rtkit-internal.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/*
+ * Apple RTKit IPC library
+ * Copyright (C) The Asahi Linux Contributors
+ */
+
+#ifndef _APPLE_RTKIT_INTERAL_H
+#define _APPLE_RTKIT_INTERAL_H
+
+#include <linux/apple-mailbox.h>
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/completion.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/soc/apple/rtkit.h>
+#include <linux/workqueue.h>
+
+#define APPLE_RTKIT_APP_ENDPOINT_START 0x20
+#define APPLE_RTKIT_MAX_ENDPOINTS 0x100
+
+struct apple_rtkit {
+ void *cookie;
+ const struct apple_rtkit_ops *ops;
+ struct device *dev;
+
+ const char *mbox_name;
+ int mbox_idx;
+ struct mbox_client mbox_cl;
+ struct mbox_chan *mbox_chan;
+
+ struct completion epmap_completion;
+ struct completion iop_pwr_ack_completion;
+ struct completion ap_pwr_ack_completion;
+
+ int boot_result;
+ int version;
+
+ unsigned int iop_power_state;
+ unsigned int ap_power_state;
+ bool crashed;
+
+ DECLARE_BITMAP(endpoints, APPLE_RTKIT_MAX_ENDPOINTS);
+
+ struct apple_rtkit_shmem ioreport_buffer;
+ struct apple_rtkit_shmem crashlog_buffer;
+
+ struct apple_rtkit_shmem syslog_buffer;
+ char *syslog_msg_buffer;
+ size_t syslog_n_entries;
+ size_t syslog_msg_size;
+
+ struct workqueue_struct *wq;
+};
+
+void apple_rtkit_crashlog_dump(struct apple_rtkit *rtk, u8 *bfr, size_t size);
+
+#endif
diff --git a/drivers/soc/apple/rtkit.c b/drivers/soc/apple/rtkit.c
new file mode 100644
index 000000000000..cf1129e9f76b
--- /dev/null
+++ b/drivers/soc/apple/rtkit.c
@@ -0,0 +1,958 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple RTKit IPC library
+ * Copyright (C) The Asahi Linux Contributors
+ */
+
+#include "rtkit-internal.h"
+
+enum {
+ APPLE_RTKIT_PWR_STATE_OFF = 0x00, /* power off, cannot be restarted */
+ APPLE_RTKIT_PWR_STATE_SLEEP = 0x01, /* sleeping, can be restarted */
+ APPLE_RTKIT_PWR_STATE_QUIESCED = 0x10, /* running but no communication */
+ APPLE_RTKIT_PWR_STATE_ON = 0x20, /* normal operating state */
+};
+
+enum {
+ APPLE_RTKIT_EP_MGMT = 0,
+ APPLE_RTKIT_EP_CRASHLOG = 1,
+ APPLE_RTKIT_EP_SYSLOG = 2,
+ APPLE_RTKIT_EP_DEBUG = 3,
+ APPLE_RTKIT_EP_IOREPORT = 4,
+ APPLE_RTKIT_EP_OSLOG = 8,
+};
+
+#define APPLE_RTKIT_MGMT_TYPE GENMASK_ULL(59, 52)
+
+enum {
+ APPLE_RTKIT_MGMT_HELLO = 1,
+ APPLE_RTKIT_MGMT_HELLO_REPLY = 2,
+ APPLE_RTKIT_MGMT_STARTEP = 5,
+ APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE = 6,
+ APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE_ACK = 7,
+ APPLE_RTKIT_MGMT_EPMAP = 8,
+ APPLE_RTKIT_MGMT_EPMAP_REPLY = 8,
+ APPLE_RTKIT_MGMT_SET_AP_PWR_STATE = 0xb,
+ APPLE_RTKIT_MGMT_SET_AP_PWR_STATE_ACK = 0xb,
+};
+
+#define APPLE_RTKIT_MGMT_HELLO_MINVER GENMASK_ULL(15, 0)
+#define APPLE_RTKIT_MGMT_HELLO_MAXVER GENMASK_ULL(31, 16)
+
+#define APPLE_RTKIT_MGMT_EPMAP_LAST BIT_ULL(51)
+#define APPLE_RTKIT_MGMT_EPMAP_BASE GENMASK_ULL(34, 32)
+#define APPLE_RTKIT_MGMT_EPMAP_BITMAP GENMASK_ULL(31, 0)
+
+#define APPLE_RTKIT_MGMT_EPMAP_REPLY_MORE BIT_ULL(0)
+
+#define APPLE_RTKIT_MGMT_STARTEP_EP GENMASK_ULL(39, 32)
+#define APPLE_RTKIT_MGMT_STARTEP_FLAG BIT_ULL(1)
+
+#define APPLE_RTKIT_MGMT_PWR_STATE GENMASK_ULL(15, 0)
+
+#define APPLE_RTKIT_CRASHLOG_CRASH 1
+
+#define APPLE_RTKIT_BUFFER_REQUEST 1
+#define APPLE_RTKIT_BUFFER_REQUEST_SIZE GENMASK_ULL(51, 44)
+#define APPLE_RTKIT_BUFFER_REQUEST_IOVA GENMASK_ULL(41, 0)
+
+#define APPLE_RTKIT_SYSLOG_TYPE GENMASK_ULL(59, 52)
+
+#define APPLE_RTKIT_SYSLOG_LOG 5
+
+#define APPLE_RTKIT_SYSLOG_INIT 8
+#define APPLE_RTKIT_SYSLOG_N_ENTRIES GENMASK_ULL(7, 0)
+#define APPLE_RTKIT_SYSLOG_MSG_SIZE GENMASK_ULL(31, 24)
+
+#define APPLE_RTKIT_OSLOG_TYPE GENMASK_ULL(63, 56)
+#define APPLE_RTKIT_OSLOG_INIT 1
+#define APPLE_RTKIT_OSLOG_ACK 3
+
+#define APPLE_RTKIT_MIN_SUPPORTED_VERSION 11
+#define APPLE_RTKIT_MAX_SUPPORTED_VERSION 12
+
+struct apple_rtkit_msg {
+ struct completion *completion;
+ struct apple_mbox_msg mbox_msg;
+};
+
+struct apple_rtkit_rx_work {
+ struct apple_rtkit *rtk;
+ u8 ep;
+ u64 msg;
+ struct work_struct work;
+};
+
+bool apple_rtkit_is_running(struct apple_rtkit *rtk)
+{
+ if (rtk->crashed)
+ return false;
+ if ((rtk->iop_power_state & 0xff) != APPLE_RTKIT_PWR_STATE_ON)
+ return false;
+ if ((rtk->ap_power_state & 0xff) != APPLE_RTKIT_PWR_STATE_ON)
+ return false;
+ return true;
+}
+EXPORT_SYMBOL_GPL(apple_rtkit_is_running);
+
+bool apple_rtkit_is_crashed(struct apple_rtkit *rtk)
+{
+ return rtk->crashed;
+}
+EXPORT_SYMBOL_GPL(apple_rtkit_is_crashed);
+
+static void apple_rtkit_management_send(struct apple_rtkit *rtk, u8 type,
+ u64 msg)
+{
+ msg &= ~APPLE_RTKIT_MGMT_TYPE;
+ msg |= FIELD_PREP(APPLE_RTKIT_MGMT_TYPE, type);
+ apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_MGMT, msg, NULL, false);
+}
+
+static void apple_rtkit_management_rx_hello(struct apple_rtkit *rtk, u64 msg)
+{
+ u64 reply;
+
+ int min_ver = FIELD_GET(APPLE_RTKIT_MGMT_HELLO_MINVER, msg);
+ int max_ver = FIELD_GET(APPLE_RTKIT_MGMT_HELLO_MAXVER, msg);
+ int want_ver = min(APPLE_RTKIT_MAX_SUPPORTED_VERSION, max_ver);
+
+ dev_dbg(rtk->dev, "RTKit: Min ver %d, max ver %d\n", min_ver, max_ver);
+
+ if (min_ver > APPLE_RTKIT_MAX_SUPPORTED_VERSION) {
+ dev_err(rtk->dev, "RTKit: Firmware min version %d is too new\n",
+ min_ver);
+ goto abort_boot;
+ }
+
+ if (max_ver < APPLE_RTKIT_MIN_SUPPORTED_VERSION) {
+ dev_err(rtk->dev, "RTKit: Firmware max version %d is too old\n",
+ max_ver);
+ goto abort_boot;
+ }
+
+ dev_info(rtk->dev, "RTKit: Initializing (protocol version %d)\n",
+ want_ver);
+ rtk->version = want_ver;
+
+ reply = FIELD_PREP(APPLE_RTKIT_MGMT_HELLO_MINVER, want_ver);
+ reply |= FIELD_PREP(APPLE_RTKIT_MGMT_HELLO_MAXVER, want_ver);
+ apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_HELLO_REPLY, reply);
+
+ return;
+
+abort_boot:
+ rtk->boot_result = -EINVAL;
+ complete_all(&rtk->epmap_completion);
+}
+
+static void apple_rtkit_management_rx_epmap(struct apple_rtkit *rtk, u64 msg)
+{
+ int i, ep;
+ u64 reply;
+ unsigned long bitmap = FIELD_GET(APPLE_RTKIT_MGMT_EPMAP_BITMAP, msg);
+ u32 base = FIELD_GET(APPLE_RTKIT_MGMT_EPMAP_BASE, msg);
+
+ dev_dbg(rtk->dev,
+ "RTKit: received endpoint bitmap 0x%lx with base 0x%x\n",
+ bitmap, base);
+
+ for_each_set_bit(i, &bitmap, 32) {
+ ep = 32 * base + i;
+ dev_dbg(rtk->dev, "RTKit: Discovered endpoint 0x%02x\n", ep);
+ set_bit(ep, rtk->endpoints);
+ }
+
+ reply = FIELD_PREP(APPLE_RTKIT_MGMT_EPMAP_BASE, base);
+ if (msg & APPLE_RTKIT_MGMT_EPMAP_LAST)
+ reply |= APPLE_RTKIT_MGMT_EPMAP_LAST;
+ else
+ reply |= APPLE_RTKIT_MGMT_EPMAP_REPLY_MORE;
+
+ apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_EPMAP_REPLY, reply);
+
+ if (!(msg & APPLE_RTKIT_MGMT_EPMAP_LAST))
+ return;
+
+ for_each_set_bit(ep, rtk->endpoints, APPLE_RTKIT_APP_ENDPOINT_START) {
+ switch (ep) {
+ /* the management endpoint is started by default */
+ case APPLE_RTKIT_EP_MGMT:
+ break;
+
+ /* without starting these RTKit refuses to boot */
+ case APPLE_RTKIT_EP_SYSLOG:
+ case APPLE_RTKIT_EP_CRASHLOG:
+ case APPLE_RTKIT_EP_DEBUG:
+ case APPLE_RTKIT_EP_IOREPORT:
+ case APPLE_RTKIT_EP_OSLOG:
+ dev_dbg(rtk->dev,
+ "RTKit: Starting system endpoint 0x%02x\n", ep);
+ apple_rtkit_start_ep(rtk, ep);
+ break;
+
+ default:
+ dev_warn(rtk->dev,
+ "RTKit: Unknown system endpoint: 0x%02x\n",
+ ep);
+ }
+ }
+
+ rtk->boot_result = 0;
+ complete_all(&rtk->epmap_completion);
+}
+
+static void apple_rtkit_management_rx_iop_pwr_ack(struct apple_rtkit *rtk,
+ u64 msg)
+{
+ unsigned int new_state = FIELD_GET(APPLE_RTKIT_MGMT_PWR_STATE, msg);
+
+ dev_dbg(rtk->dev, "RTKit: IOP power state transition: 0x%x -> 0x%x\n",
+ rtk->iop_power_state, new_state);
+ rtk->iop_power_state = new_state;
+
+ complete_all(&rtk->iop_pwr_ack_completion);
+}
+
+static void apple_rtkit_management_rx_ap_pwr_ack(struct apple_rtkit *rtk,
+ u64 msg)
+{
+ unsigned int new_state = FIELD_GET(APPLE_RTKIT_MGMT_PWR_STATE, msg);
+
+ dev_dbg(rtk->dev, "RTKit: AP power state transition: 0x%x -> 0x%x\n",
+ rtk->ap_power_state, new_state);
+ rtk->ap_power_state = new_state;
+
+ complete_all(&rtk->ap_pwr_ack_completion);
+}
+
+static void apple_rtkit_management_rx(struct apple_rtkit *rtk, u64 msg)
+{
+ u8 type = FIELD_GET(APPLE_RTKIT_MGMT_TYPE, msg);
+
+ switch (type) {
+ case APPLE_RTKIT_MGMT_HELLO:
+ apple_rtkit_management_rx_hello(rtk, msg);
+ break;
+ case APPLE_RTKIT_MGMT_EPMAP:
+ apple_rtkit_management_rx_epmap(rtk, msg);
+ break;
+ case APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE_ACK:
+ apple_rtkit_management_rx_iop_pwr_ack(rtk, msg);
+ break;
+ case APPLE_RTKIT_MGMT_SET_AP_PWR_STATE_ACK:
+ apple_rtkit_management_rx_ap_pwr_ack(rtk, msg);
+ break;
+ default:
+ dev_warn(
+ rtk->dev,
+ "RTKit: unknown management message: 0x%llx (type: 0x%02x)\n",
+ msg, type);
+ }
+}
+
+static int apple_rtkit_common_rx_get_buffer(struct apple_rtkit *rtk,
+ struct apple_rtkit_shmem *buffer,
+ u8 ep, u64 msg)
+{
+ size_t n_4kpages = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_SIZE, msg);
+ u64 reply;
+ int err;
+
+ buffer->buffer = NULL;
+ buffer->iomem = NULL;
+ buffer->is_mapped = false;
+ buffer->iova = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_IOVA, msg);
+ buffer->size = n_4kpages << 12;
+
+ dev_dbg(rtk->dev, "RTKit: buffer request for 0x%zx bytes at %pad\n",
+ buffer->size, &buffer->iova);
+
+ if (buffer->iova &&
+ (!rtk->ops->shmem_setup || !rtk->ops->shmem_destroy)) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ if (rtk->ops->shmem_setup) {
+ err = rtk->ops->shmem_setup(rtk->cookie, buffer);
+ if (err)
+ goto error;
+ } else {
+ buffer->buffer = dma_alloc_coherent(rtk->dev, buffer->size,
+ &buffer->iova, GFP_KERNEL);
+ if (!buffer->buffer) {
+ err = -ENOMEM;
+ goto error;
+ }
+ }
+
+ if (!buffer->is_mapped) {
+ reply = FIELD_PREP(APPLE_RTKIT_SYSLOG_TYPE,
+ APPLE_RTKIT_BUFFER_REQUEST);
+ reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_SIZE, n_4kpages);
+ reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_IOVA,
+ buffer->iova);
+ apple_rtkit_send_message(rtk, ep, reply, NULL, false);
+ }
+
+ return 0;
+
+error:
+ buffer->buffer = NULL;
+ buffer->iomem = NULL;
+ buffer->iova = 0;
+ buffer->size = 0;
+ buffer->is_mapped = false;
+ return err;
+}
+
+static void apple_rtkit_free_buffer(struct apple_rtkit *rtk,
+ struct apple_rtkit_shmem *bfr)
+{
+ if (bfr->size == 0)
+ return;
+
+ if (rtk->ops->shmem_destroy)
+ rtk->ops->shmem_destroy(rtk->cookie, bfr);
+ else if (bfr->buffer)
+ dma_free_coherent(rtk->dev, bfr->size, bfr->buffer, bfr->iova);
+
+ bfr->buffer = NULL;
+ bfr->iomem = NULL;
+ bfr->iova = 0;
+ bfr->size = 0;
+ bfr->is_mapped = false;
+}
+
+static void apple_rtkit_memcpy(struct apple_rtkit *rtk, void *dst,
+ struct apple_rtkit_shmem *bfr, size_t offset,
+ size_t len)
+{
+ if (bfr->iomem)
+ memcpy_fromio(dst, bfr->iomem + offset, len);
+ else
+ memcpy(dst, bfr->buffer + offset, len);
+}
+
+static void apple_rtkit_crashlog_rx(struct apple_rtkit *rtk, u64 msg)
+{
+ u8 type = FIELD_GET(APPLE_RTKIT_SYSLOG_TYPE, msg);
+ u8 *bfr;
+
+ if (type != APPLE_RTKIT_CRASHLOG_CRASH) {
+ dev_warn(rtk->dev, "RTKit: Unknown crashlog message: %llx\n",
+ msg);
+ return;
+ }
+
+ if (!rtk->crashlog_buffer.size) {
+ apple_rtkit_common_rx_get_buffer(rtk, &rtk->crashlog_buffer,
+ APPLE_RTKIT_EP_CRASHLOG, msg);
+ return;
+ }
+
+ dev_err(rtk->dev, "RTKit: co-processor has crashed\n");
+
+ /*
+ * create a shadow copy here to make sure the co-processor isn't able
+ * to change the log while we're dumping it. this also ensures
+ * the buffer is in normal memory and not iomem for e.g. the SMC
+ */
+ bfr = kzalloc(rtk->crashlog_buffer.size, GFP_KERNEL);
+ if (bfr) {
+ apple_rtkit_memcpy(rtk, bfr, &rtk->crashlog_buffer, 0,
+ rtk->crashlog_buffer.size);
+ apple_rtkit_crashlog_dump(rtk, bfr, rtk->crashlog_buffer.size);
+ kfree(bfr);
+ } else {
+ dev_err(rtk->dev,
+ "RTKit: Couldn't allocate crashlog shadow buffer\n");
+ }
+
+ rtk->crashed = true;
+ if (rtk->ops->crashed)
+ rtk->ops->crashed(rtk->cookie);
+}
+
+static void apple_rtkit_ioreport_rx(struct apple_rtkit *rtk, u64 msg)
+{
+ u8 type = FIELD_GET(APPLE_RTKIT_SYSLOG_TYPE, msg);
+
+ switch (type) {
+ case APPLE_RTKIT_BUFFER_REQUEST:
+ apple_rtkit_common_rx_get_buffer(rtk, &rtk->ioreport_buffer,
+ APPLE_RTKIT_EP_IOREPORT, msg);
+ break;
+ /* unknown, must be ACKed or the co-processor will hang */
+ case 0x8:
+ case 0xc:
+ apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_IOREPORT, msg,
+ NULL, false);
+ break;
+ default:
+ dev_warn(rtk->dev, "RTKit: Unknown ioreport message: %llx\n",
+ msg);
+ }
+}
+
+static void apple_rtkit_syslog_rx_init(struct apple_rtkit *rtk, u64 msg)
+{
+ rtk->syslog_n_entries = FIELD_GET(APPLE_RTKIT_SYSLOG_N_ENTRIES, msg);
+ rtk->syslog_msg_size = FIELD_GET(APPLE_RTKIT_SYSLOG_MSG_SIZE, msg);
+
+ rtk->syslog_msg_buffer = kzalloc(rtk->syslog_msg_size, GFP_KERNEL);
+
+ dev_dbg(rtk->dev,
+ "RTKit: syslog initialized: entries: %zd, msg_size: %zd\n",
+ rtk->syslog_n_entries, rtk->syslog_msg_size);
+}
+
+static void apple_rtkit_syslog_rx_log(struct apple_rtkit *rtk, u64 msg)
+{
+ u8 idx = msg & 0xff;
+ char log_context[24];
+ size_t entry_size = 0x20 + rtk->syslog_msg_size;
+
+ if (!rtk->syslog_msg_buffer) {
+ dev_warn(
+ rtk->dev,
+ "RTKit: received syslog message but no syslog_msg_buffer\n");
+ goto done;
+ }
+ if (!rtk->syslog_buffer.size) {
+ dev_warn(
+ rtk->dev,
+ "RTKit: received syslog message but syslog_buffer.size is zero\n");
+ goto done;
+ }
+ if (!rtk->syslog_buffer.buffer && !rtk->syslog_buffer.iomem) {
+ dev_warn(
+ rtk->dev,
+ "RTKit: received syslog message but no syslog_buffer.buffer or syslog_buffer.iomem\n");
+ goto done;
+ }
+ if (idx > rtk->syslog_n_entries) {
+ dev_warn(rtk->dev, "RTKit: syslog index %d out of range\n",
+ idx);
+ goto done;
+ }
+
+ apple_rtkit_memcpy(rtk, log_context, &rtk->syslog_buffer,
+ idx * entry_size + 8, sizeof(log_context));
+ apple_rtkit_memcpy(rtk, rtk->syslog_msg_buffer, &rtk->syslog_buffer,
+ idx * entry_size + 8 + sizeof(log_context),
+ rtk->syslog_msg_size);
+
+ log_context[sizeof(log_context) - 1] = 0;
+ rtk->syslog_msg_buffer[rtk->syslog_msg_size - 1] = 0;
+ dev_info(rtk->dev, "RTKit: syslog message: %s: %s\n", log_context,
+ rtk->syslog_msg_buffer);
+
+done:
+ apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_SYSLOG, msg, NULL, false);
+}
+
+static void apple_rtkit_syslog_rx(struct apple_rtkit *rtk, u64 msg)
+{
+ u8 type = FIELD_GET(APPLE_RTKIT_SYSLOG_TYPE, msg);
+
+ switch (type) {
+ case APPLE_RTKIT_BUFFER_REQUEST:
+ apple_rtkit_common_rx_get_buffer(rtk, &rtk->syslog_buffer,
+ APPLE_RTKIT_EP_SYSLOG, msg);
+ break;
+ case APPLE_RTKIT_SYSLOG_INIT:
+ apple_rtkit_syslog_rx_init(rtk, msg);
+ break;
+ case APPLE_RTKIT_SYSLOG_LOG:
+ apple_rtkit_syslog_rx_log(rtk, msg);
+ break;
+ default:
+ dev_warn(rtk->dev, "RTKit: Unknown syslog message: %llx\n",
+ msg);
+ }
+}
+
+static void apple_rtkit_oslog_rx_init(struct apple_rtkit *rtk, u64 msg)
+{
+ u64 ack;
+
+ dev_dbg(rtk->dev, "RTKit: oslog init: msg: 0x%llx\n", msg);
+ ack = FIELD_PREP(APPLE_RTKIT_OSLOG_TYPE, APPLE_RTKIT_OSLOG_ACK);
+ apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_OSLOG, ack, NULL, false);
+}
+
+static void apple_rtkit_oslog_rx(struct apple_rtkit *rtk, u64 msg)
+{
+ u8 type = FIELD_GET(APPLE_RTKIT_OSLOG_TYPE, msg);
+
+ switch (type) {
+ case APPLE_RTKIT_OSLOG_INIT:
+ apple_rtkit_oslog_rx_init(rtk, msg);
+ break;
+ default:
+ dev_warn(rtk->dev, "RTKit: Unknown oslog message: %llx\n", msg);
+ }
+}
+
+static void apple_rtkit_rx_work(struct work_struct *work)
+{
+ struct apple_rtkit_rx_work *rtk_work =
+ container_of(work, struct apple_rtkit_rx_work, work);
+ struct apple_rtkit *rtk = rtk_work->rtk;
+
+ switch (rtk_work->ep) {
+ case APPLE_RTKIT_EP_MGMT:
+ apple_rtkit_management_rx(rtk, rtk_work->msg);
+ break;
+ case APPLE_RTKIT_EP_CRASHLOG:
+ apple_rtkit_crashlog_rx(rtk, rtk_work->msg);
+ break;
+ case APPLE_RTKIT_EP_SYSLOG:
+ apple_rtkit_syslog_rx(rtk, rtk_work->msg);
+ break;
+ case APPLE_RTKIT_EP_IOREPORT:
+ apple_rtkit_ioreport_rx(rtk, rtk_work->msg);
+ break;
+ case APPLE_RTKIT_EP_OSLOG:
+ apple_rtkit_oslog_rx(rtk, rtk_work->msg);
+ break;
+ case APPLE_RTKIT_APP_ENDPOINT_START ... 0xff:
+ if (rtk->ops->recv_message)
+ rtk->ops->recv_message(rtk->cookie, rtk_work->ep,
+ rtk_work->msg);
+ else
+ dev_warn(
+ rtk->dev,
+ "Received unexpected message to EP%02d: %llx\n",
+ rtk_work->ep, rtk_work->msg);
+ break;
+ default:
+ dev_warn(rtk->dev,
+ "RTKit: message to unknown endpoint %02x: %llx\n",
+ rtk_work->ep, rtk_work->msg);
+ }
+
+ kfree(rtk_work);
+}
+
+static void apple_rtkit_rx(struct mbox_client *cl, void *mssg)
+{
+ struct apple_rtkit *rtk = container_of(cl, struct apple_rtkit, mbox_cl);
+ struct apple_mbox_msg *msg = mssg;
+ struct apple_rtkit_rx_work *work;
+ u8 ep = msg->msg1;
+
+ /*
+ * The message was read from a MMIO FIFO and we have to make
+ * sure all reads from buffers sent with that message happen
+ * afterwards.
+ */
+ dma_rmb();
+
+ if (!test_bit(ep, rtk->endpoints))
+ dev_warn(rtk->dev,
+ "RTKit: Message to undiscovered endpoint 0x%02x\n",
+ ep);
+
+ if (ep >= APPLE_RTKIT_APP_ENDPOINT_START &&
+ rtk->ops->recv_message_early &&
+ rtk->ops->recv_message_early(rtk->cookie, ep, msg->msg0))
+ return;
+
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return;
+
+ work->rtk = rtk;
+ work->ep = ep;
+ work->msg = msg->msg0;
+ INIT_WORK(&work->work, apple_rtkit_rx_work);
+ queue_work(rtk->wq, &work->work);
+}
+
+static void apple_rtkit_tx_done(struct mbox_client *cl, void *mssg, int r)
+{
+ struct apple_rtkit_msg *msg =
+ container_of(mssg, struct apple_rtkit_msg, mbox_msg);
+
+ if (r == -ETIME)
+ return;
+
+ if (msg->completion)
+ complete(msg->completion);
+ kfree(msg);
+}
+
+int apple_rtkit_send_message(struct apple_rtkit *rtk, u8 ep, u64 message,
+ struct completion *completion, bool atomic)
+{
+ struct apple_rtkit_msg *msg;
+ int ret;
+ gfp_t flags;
+
+ if (rtk->crashed)
+ return -EINVAL;
+ if (ep >= APPLE_RTKIT_APP_ENDPOINT_START &&
+ !apple_rtkit_is_running(rtk))
+ return -EINVAL;
+
+ if (atomic)
+ flags = GFP_ATOMIC;
+ else
+ flags = GFP_KERNEL;
+
+ msg = kzalloc(sizeof(*msg), flags);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->mbox_msg.msg0 = message;
+ msg->mbox_msg.msg1 = ep;
+ msg->completion = completion;
+
+ /*
+ * The message will be sent with a MMIO write. We need the barrier
+ * here to ensure any previous writes to buffers are visible to the
+ * device before that MMIO write happens.
+ */
+ dma_wmb();
+
+ ret = mbox_send_message(rtk->mbox_chan, &msg->mbox_msg);
+ if (ret < 0) {
+ kfree(msg);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apple_rtkit_send_message);
+
+int apple_rtkit_send_message_wait(struct apple_rtkit *rtk, u8 ep, u64 message,
+ unsigned long timeout, bool atomic)
+{
+ DECLARE_COMPLETION_ONSTACK(completion);
+ int ret;
+ long t;
+
+ ret = apple_rtkit_send_message(rtk, ep, message, &completion, atomic);
+ if (ret < 0)
+ return ret;
+
+ if (atomic) {
+ ret = mbox_flush(rtk->mbox_chan, timeout);
+ if (ret < 0)
+ return ret;
+
+ if (try_wait_for_completion(&completion))
+ return 0;
+
+ return -ETIME;
+ } else {
+ t = wait_for_completion_interruptible_timeout(
+ &completion, msecs_to_jiffies(timeout));
+ if (t < 0)
+ return t;
+ else if (t == 0)
+ return -ETIME;
+ return 0;
+ }
+}
+EXPORT_SYMBOL_GPL(apple_rtkit_send_message_wait);
+
+int apple_rtkit_start_ep(struct apple_rtkit *rtk, u8 endpoint)
+{
+ u64 msg;
+
+ if (!test_bit(endpoint, rtk->endpoints))
+ return -EINVAL;
+ if (endpoint >= APPLE_RTKIT_APP_ENDPOINT_START &&
+ !apple_rtkit_is_running(rtk))
+ return -EINVAL;
+
+ msg = FIELD_PREP(APPLE_RTKIT_MGMT_STARTEP_EP, endpoint);
+ msg |= APPLE_RTKIT_MGMT_STARTEP_FLAG;
+ apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_STARTEP, msg);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apple_rtkit_start_ep);
+
+static int apple_rtkit_request_mbox_chan(struct apple_rtkit *rtk)
+{
+ if (rtk->mbox_name)
+ rtk->mbox_chan = mbox_request_channel_byname(&rtk->mbox_cl,
+ rtk->mbox_name);
+ else
+ rtk->mbox_chan =
+ mbox_request_channel(&rtk->mbox_cl, rtk->mbox_idx);
+
+ if (IS_ERR(rtk->mbox_chan))
+ return PTR_ERR(rtk->mbox_chan);
+ return 0;
+}
+
+static struct apple_rtkit *apple_rtkit_init(struct device *dev, void *cookie,
+ const char *mbox_name, int mbox_idx,
+ const struct apple_rtkit_ops *ops)
+{
+ struct apple_rtkit *rtk;
+ int ret;
+
+ if (!ops)
+ return ERR_PTR(-EINVAL);
+
+ rtk = kzalloc(sizeof(*rtk), GFP_KERNEL);
+ if (!rtk)
+ return ERR_PTR(-ENOMEM);
+
+ rtk->dev = dev;
+ rtk->cookie = cookie;
+ rtk->ops = ops;
+
+ init_completion(&rtk->epmap_completion);
+ init_completion(&rtk->iop_pwr_ack_completion);
+ init_completion(&rtk->ap_pwr_ack_completion);
+
+ bitmap_zero(rtk->endpoints, APPLE_RTKIT_MAX_ENDPOINTS);
+ set_bit(APPLE_RTKIT_EP_MGMT, rtk->endpoints);
+
+ rtk->mbox_name = mbox_name;
+ rtk->mbox_idx = mbox_idx;
+ rtk->mbox_cl.dev = dev;
+ rtk->mbox_cl.tx_block = false;
+ rtk->mbox_cl.knows_txdone = false;
+ rtk->mbox_cl.rx_callback = &apple_rtkit_rx;
+ rtk->mbox_cl.tx_done = &apple_rtkit_tx_done;
+
+ rtk->wq = alloc_ordered_workqueue("rtkit-%s", WQ_MEM_RECLAIM,
+ dev_name(rtk->dev));
+ if (!rtk->wq) {
+ ret = -ENOMEM;
+ goto free_rtk;
+ }
+
+ ret = apple_rtkit_request_mbox_chan(rtk);
+ if (ret)
+ goto destroy_wq;
+
+ return rtk;
+
+destroy_wq:
+ destroy_workqueue(rtk->wq);
+free_rtk:
+ kfree(rtk);
+ return ERR_PTR(ret);
+}
+
+static int apple_rtkit_wait_for_completion(struct completion *c)
+{
+ long t;
+
+ t = wait_for_completion_interruptible_timeout(c,
+ msecs_to_jiffies(1000));
+ if (t < 0)
+ return t;
+ else if (t == 0)
+ return -ETIME;
+ else
+ return 0;
+}
+
+int apple_rtkit_reinit(struct apple_rtkit *rtk)
+{
+ /* make sure we don't handle any messages while reinitializing */
+ mbox_free_channel(rtk->mbox_chan);
+ flush_workqueue(rtk->wq);
+
+ apple_rtkit_free_buffer(rtk, &rtk->ioreport_buffer);
+ apple_rtkit_free_buffer(rtk, &rtk->crashlog_buffer);
+ apple_rtkit_free_buffer(rtk, &rtk->syslog_buffer);
+
+ kfree(rtk->syslog_msg_buffer);
+
+ rtk->syslog_msg_buffer = NULL;
+ rtk->syslog_n_entries = 0;
+ rtk->syslog_msg_size = 0;
+
+ bitmap_zero(rtk->endpoints, APPLE_RTKIT_MAX_ENDPOINTS);
+ set_bit(APPLE_RTKIT_EP_MGMT, rtk->endpoints);
+
+ reinit_completion(&rtk->epmap_completion);
+ reinit_completion(&rtk->iop_pwr_ack_completion);
+ reinit_completion(&rtk->ap_pwr_ack_completion);
+
+ rtk->crashed = false;
+ rtk->iop_power_state = APPLE_RTKIT_PWR_STATE_OFF;
+ rtk->ap_power_state = APPLE_RTKIT_PWR_STATE_OFF;
+
+ return apple_rtkit_request_mbox_chan(rtk);
+}
+EXPORT_SYMBOL_GPL(apple_rtkit_reinit);
+
+static int apple_rtkit_set_ap_power_state(struct apple_rtkit *rtk,
+ unsigned int state)
+{
+ u64 msg;
+ int ret;
+
+ reinit_completion(&rtk->ap_pwr_ack_completion);
+
+ msg = FIELD_PREP(APPLE_RTKIT_MGMT_PWR_STATE, state);
+ apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_AP_PWR_STATE,
+ msg);
+
+ ret = apple_rtkit_wait_for_completion(&rtk->ap_pwr_ack_completion);
+ if (ret)
+ return ret;
+
+ if (rtk->ap_power_state != state)
+ return -EINVAL;
+ return 0;
+}
+
+static int apple_rtkit_set_iop_power_state(struct apple_rtkit *rtk,
+ unsigned int state)
+{
+ u64 msg;
+ int ret;
+
+ reinit_completion(&rtk->iop_pwr_ack_completion);
+
+ msg = FIELD_PREP(APPLE_RTKIT_MGMT_PWR_STATE, state);
+ apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE,
+ msg);
+
+ ret = apple_rtkit_wait_for_completion(&rtk->iop_pwr_ack_completion);
+ if (ret)
+ return ret;
+
+ if (rtk->iop_power_state != state)
+ return -EINVAL;
+ return 0;
+}
+
+int apple_rtkit_boot(struct apple_rtkit *rtk)
+{
+ int ret;
+
+ if (apple_rtkit_is_running(rtk))
+ return 0;
+ if (rtk->crashed)
+ return -EINVAL;
+
+ dev_dbg(rtk->dev, "RTKit: waiting for boot to finish\n");
+ ret = apple_rtkit_wait_for_completion(&rtk->epmap_completion);
+ if (ret)
+ return ret;
+ if (rtk->boot_result)
+ return rtk->boot_result;
+
+ dev_dbg(rtk->dev, "RTKit: waiting for IOP power state ACK\n");
+ ret = apple_rtkit_wait_for_completion(&rtk->iop_pwr_ack_completion);
+ if (ret)
+ return ret;
+
+ return apple_rtkit_set_ap_power_state(rtk, APPLE_RTKIT_PWR_STATE_ON);
+}
+EXPORT_SYMBOL_GPL(apple_rtkit_boot);
+
+int apple_rtkit_shutdown(struct apple_rtkit *rtk)
+{
+ int ret;
+
+ /* if OFF is used here the co-processor will not wake up again */
+ ret = apple_rtkit_set_ap_power_state(rtk,
+ APPLE_RTKIT_PWR_STATE_QUIESCED);
+ if (ret)
+ return ret;
+
+ ret = apple_rtkit_set_iop_power_state(rtk, APPLE_RTKIT_PWR_STATE_SLEEP);
+ if (ret)
+ return ret;
+
+ return apple_rtkit_reinit(rtk);
+}
+EXPORT_SYMBOL_GPL(apple_rtkit_shutdown);
+
+int apple_rtkit_quiesce(struct apple_rtkit *rtk)
+{
+ int ret;
+
+ ret = apple_rtkit_set_ap_power_state(rtk,
+ APPLE_RTKIT_PWR_STATE_QUIESCED);
+ if (ret)
+ return ret;
+
+ ret = apple_rtkit_set_iop_power_state(rtk,
+ APPLE_RTKIT_PWR_STATE_QUIESCED);
+ if (ret)
+ return ret;
+
+ ret = apple_rtkit_reinit(rtk);
+ if (ret)
+ return ret;
+
+ rtk->iop_power_state = APPLE_RTKIT_PWR_STATE_QUIESCED;
+ rtk->ap_power_state = APPLE_RTKIT_PWR_STATE_QUIESCED;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apple_rtkit_quiesce);
+
+int apple_rtkit_wake(struct apple_rtkit *rtk)
+{
+ u64 msg;
+
+ if (apple_rtkit_is_running(rtk))
+ return -EINVAL;
+
+ reinit_completion(&rtk->iop_pwr_ack_completion);
+
+ /*
+ * Use open-coded apple_rtkit_set_iop_power_state since apple_rtkit_boot
+ * will wait for the completion anyway.
+ */
+ msg = FIELD_PREP(APPLE_RTKIT_MGMT_PWR_STATE, APPLE_RTKIT_PWR_STATE_ON);
+ apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE,
+ msg);
+
+ return apple_rtkit_boot(rtk);
+}
+EXPORT_SYMBOL_GPL(apple_rtkit_wake);
+
+static void apple_rtkit_free(struct apple_rtkit *rtk)
+{
+ mbox_free_channel(rtk->mbox_chan);
+ destroy_workqueue(rtk->wq);
+
+ apple_rtkit_free_buffer(rtk, &rtk->ioreport_buffer);
+ apple_rtkit_free_buffer(rtk, &rtk->crashlog_buffer);
+ apple_rtkit_free_buffer(rtk, &rtk->syslog_buffer);
+
+ kfree(rtk->syslog_msg_buffer);
+ kfree(rtk);
+}
+
+struct apple_rtkit *devm_apple_rtkit_init(struct device *dev, void *cookie,
+ const char *mbox_name, int mbox_idx,
+ const struct apple_rtkit_ops *ops)
+{
+ struct apple_rtkit *rtk;
+ int ret;
+
+ rtk = apple_rtkit_init(dev, cookie, mbox_name, mbox_idx, ops);
+ if (IS_ERR(rtk))
+ return rtk;
+
+ ret = devm_add_action_or_reset(dev, (void (*)(void *))apple_rtkit_free,
+ rtk);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return rtk;
+}
+EXPORT_SYMBOL_GPL(devm_apple_rtkit_init);
+
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
+MODULE_DESCRIPTION("Apple RTKit driver");
diff --git a/drivers/soc/apple/sart.c b/drivers/soc/apple/sart.c
new file mode 100644
index 000000000000..83804b16ad03
--- /dev/null
+++ b/drivers/soc/apple/sart.c
@@ -0,0 +1,328 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple SART device driver
+ * Copyright (C) The Asahi Linux Contributors
+ *
+ * Apple SART is a simple address filter for some DMA transactions.
+ * Regions of physical memory must be added to the SART's allow
+ * list before any DMA can target these. Unlike a proper
+ * IOMMU no remapping can be done and special support in the
+ * consumer driver is required since not all DMA transactions of
+ * a single device are subject to SART filtering.
+ */
+
+#include <linux/soc/apple/sart.h>
+#include <linux/atomic.h>
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#define APPLE_SART_MAX_ENTRIES 16
+
+/* This is probably a bitfield but the exact meaning of each bit is unknown. */
+#define APPLE_SART_FLAGS_ALLOW 0xff
+
+/* SARTv2 registers */
+#define APPLE_SART2_CONFIG(idx) (0x00 + 4 * (idx))
+#define APPLE_SART2_CONFIG_FLAGS GENMASK(31, 24)
+#define APPLE_SART2_CONFIG_SIZE GENMASK(23, 0)
+#define APPLE_SART2_CONFIG_SIZE_SHIFT 12
+#define APPLE_SART2_CONFIG_SIZE_MAX GENMASK(23, 0)
+
+#define APPLE_SART2_PADDR(idx) (0x40 + 4 * (idx))
+#define APPLE_SART2_PADDR_SHIFT 12
+
+/* SARTv3 registers */
+#define APPLE_SART3_CONFIG(idx) (0x00 + 4 * (idx))
+
+#define APPLE_SART3_PADDR(idx) (0x40 + 4 * (idx))
+#define APPLE_SART3_PADDR_SHIFT 12
+
+#define APPLE_SART3_SIZE(idx) (0x80 + 4 * (idx))
+#define APPLE_SART3_SIZE_SHIFT 12
+#define APPLE_SART3_SIZE_MAX GENMASK(29, 0)
+
+struct apple_sart_ops {
+ void (*get_entry)(struct apple_sart *sart, int index, u8 *flags,
+ phys_addr_t *paddr, size_t *size);
+ void (*set_entry)(struct apple_sart *sart, int index, u8 flags,
+ phys_addr_t paddr_shifted, size_t size_shifted);
+ unsigned int size_shift;
+ unsigned int paddr_shift;
+ size_t size_max;
+};
+
+struct apple_sart {
+ struct device *dev;
+ void __iomem *regs;
+
+ const struct apple_sart_ops *ops;
+
+ unsigned long protected_entries;
+ unsigned long used_entries;
+};
+
+static void sart2_get_entry(struct apple_sart *sart, int index, u8 *flags,
+ phys_addr_t *paddr, size_t *size)
+{
+ u32 cfg = readl(sart->regs + APPLE_SART2_CONFIG(index));
+ phys_addr_t paddr_ = readl(sart->regs + APPLE_SART2_PADDR(index));
+ size_t size_ = FIELD_GET(APPLE_SART2_CONFIG_SIZE, cfg);
+
+ *flags = FIELD_GET(APPLE_SART2_CONFIG_FLAGS, cfg);
+ *size = size_ << APPLE_SART2_CONFIG_SIZE_SHIFT;
+ *paddr = paddr_ << APPLE_SART2_PADDR_SHIFT;
+}
+
+static void sart2_set_entry(struct apple_sart *sart, int index, u8 flags,
+ phys_addr_t paddr_shifted, size_t size_shifted)
+{
+ u32 cfg;
+
+ cfg = FIELD_PREP(APPLE_SART2_CONFIG_FLAGS, flags);
+ cfg |= FIELD_PREP(APPLE_SART2_CONFIG_SIZE, size_shifted);
+
+ writel(paddr_shifted, sart->regs + APPLE_SART2_PADDR(index));
+ writel(cfg, sart->regs + APPLE_SART2_CONFIG(index));
+}
+
+static struct apple_sart_ops sart_ops_v2 = {
+ .get_entry = sart2_get_entry,
+ .set_entry = sart2_set_entry,
+ .size_shift = APPLE_SART2_CONFIG_SIZE_SHIFT,
+ .paddr_shift = APPLE_SART2_PADDR_SHIFT,
+ .size_max = APPLE_SART2_CONFIG_SIZE_MAX,
+};
+
+static void sart3_get_entry(struct apple_sart *sart, int index, u8 *flags,
+ phys_addr_t *paddr, size_t *size)
+{
+ phys_addr_t paddr_ = readl(sart->regs + APPLE_SART3_PADDR(index));
+ size_t size_ = readl(sart->regs + APPLE_SART3_SIZE(index));
+
+ *flags = readl(sart->regs + APPLE_SART3_CONFIG(index));
+ *size = size_ << APPLE_SART3_SIZE_SHIFT;
+ *paddr = paddr_ << APPLE_SART3_PADDR_SHIFT;
+}
+
+static void sart3_set_entry(struct apple_sart *sart, int index, u8 flags,
+ phys_addr_t paddr_shifted, size_t size_shifted)
+{
+ writel(paddr_shifted, sart->regs + APPLE_SART3_PADDR(index));
+ writel(size_shifted, sart->regs + APPLE_SART3_SIZE(index));
+ writel(flags, sart->regs + APPLE_SART3_CONFIG(index));
+}
+
+static struct apple_sart_ops sart_ops_v3 = {
+ .get_entry = sart3_get_entry,
+ .set_entry = sart3_set_entry,
+ .size_shift = APPLE_SART3_SIZE_SHIFT,
+ .paddr_shift = APPLE_SART3_PADDR_SHIFT,
+ .size_max = APPLE_SART3_SIZE_MAX,
+};
+
+static int apple_sart_probe(struct platform_device *pdev)
+{
+ int i;
+ struct apple_sart *sart;
+ struct device *dev = &pdev->dev;
+
+ sart = devm_kzalloc(dev, sizeof(*sart), GFP_KERNEL);
+ if (!sart)
+ return -ENOMEM;
+
+ sart->dev = dev;
+ sart->ops = of_device_get_match_data(dev);
+
+ sart->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(sart->regs))
+ return PTR_ERR(sart->regs);
+
+ for (i = 0; i < APPLE_SART_MAX_ENTRIES; ++i) {
+ u8 flags;
+ size_t size;
+ phys_addr_t paddr;
+
+ sart->ops->get_entry(sart, i, &flags, &paddr, &size);
+
+ if (!flags)
+ continue;
+
+ dev_dbg(sart->dev,
+ "SART bootloader entry: index %02d; flags: 0x%02x; paddr: %pa; size: 0x%zx\n",
+ i, flags, &paddr, size);
+ set_bit(i, &sart->protected_entries);
+ }
+
+ platform_set_drvdata(pdev, sart);
+ return 0;
+}
+
+struct apple_sart *devm_apple_sart_get(struct device *dev)
+{
+ struct device_node *sart_node;
+ struct platform_device *sart_pdev;
+ struct apple_sart *sart;
+ int ret;
+
+ sart_node = of_parse_phandle(dev->of_node, "apple,sart", 0);
+ if (!sart_node)
+ return ERR_PTR(-ENODEV);
+
+ sart_pdev = of_find_device_by_node(sart_node);
+ of_node_put(sart_node);
+
+ if (!sart_pdev)
+ return ERR_PTR(-ENODEV);
+
+ sart = dev_get_drvdata(&sart_pdev->dev);
+ if (!sart) {
+ put_device(&sart_pdev->dev);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ ret = devm_add_action_or_reset(dev, (void (*)(void *))put_device,
+ &sart_pdev->dev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ device_link_add(dev, &sart_pdev->dev,
+ DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER);
+
+ return sart;
+}
+EXPORT_SYMBOL_GPL(devm_apple_sart_get);
+
+static int sart_set_entry(struct apple_sart *sart, int index, u8 flags,
+ phys_addr_t paddr, size_t size)
+{
+ if (size & ((1 << sart->ops->size_shift) - 1))
+ return -EINVAL;
+ if (paddr & ((1 << sart->ops->paddr_shift) - 1))
+ return -EINVAL;
+
+ paddr >>= sart->ops->size_shift;
+ size >>= sart->ops->paddr_shift;
+
+ if (size > sart->ops->size_max)
+ return -EINVAL;
+
+ sart->ops->set_entry(sart, index, flags, paddr, size);
+ return 0;
+}
+
+int apple_sart_add_allowed_region(struct apple_sart *sart, phys_addr_t paddr,
+ size_t size)
+{
+ int i, ret;
+
+ for (i = 0; i < APPLE_SART_MAX_ENTRIES; ++i) {
+ if (test_bit(i, &sart->protected_entries))
+ continue;
+ if (test_and_set_bit(i, &sart->used_entries))
+ continue;
+
+ ret = sart_set_entry(sart, i, APPLE_SART_FLAGS_ALLOW, paddr,
+ size);
+ if (ret) {
+ dev_dbg(sart->dev,
+ "unable to set entry %d to [%pa, 0x%zx]\n",
+ i, &paddr, size);
+ clear_bit(i, &sart->used_entries);
+ return ret;
+ }
+
+ dev_dbg(sart->dev, "wrote [%pa, 0x%zx] to %d\n", &paddr, size,
+ i);
+ return 0;
+ }
+
+ dev_warn(sart->dev,
+ "no free entries left to add [paddr: 0x%pa, size: 0x%zx]\n",
+ &paddr, size);
+
+ return -EBUSY;
+}
+EXPORT_SYMBOL_GPL(apple_sart_add_allowed_region);
+
+int apple_sart_remove_allowed_region(struct apple_sart *sart, phys_addr_t paddr,
+ size_t size)
+{
+ int i;
+
+ dev_dbg(sart->dev,
+ "will remove [paddr: %pa, size: 0x%zx] from allowed regions\n",
+ &paddr, size);
+
+ for (i = 0; i < APPLE_SART_MAX_ENTRIES; ++i) {
+ u8 eflags;
+ size_t esize;
+ phys_addr_t epaddr;
+
+ if (test_bit(i, &sart->protected_entries))
+ continue;
+
+ sart->ops->get_entry(sart, i, &eflags, &epaddr, &esize);
+
+ if (epaddr != paddr || esize != size)
+ continue;
+
+ sart->ops->set_entry(sart, i, 0, 0, 0);
+
+ clear_bit(i, &sart->used_entries);
+ dev_dbg(sart->dev, "cleared entry %d\n", i);
+ return 0;
+ }
+
+ dev_warn(sart->dev, "entry [paddr: 0x%pa, size: 0x%zx] not found\n",
+ &paddr, size);
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(apple_sart_remove_allowed_region);
+
+static void apple_sart_shutdown(struct platform_device *pdev)
+{
+ struct apple_sart *sart = dev_get_drvdata(&pdev->dev);
+ int i;
+
+ for (i = 0; i < APPLE_SART_MAX_ENTRIES; ++i) {
+ if (test_bit(i, &sart->protected_entries))
+ continue;
+
+ sart->ops->set_entry(sart, i, 0, 0, 0);
+ }
+}
+
+static const struct of_device_id apple_sart_of_match[] = {
+ {
+ .compatible = "apple,t6000-sart",
+ .data = &sart_ops_v3,
+ },
+ {
+ .compatible = "apple,t8103-sart",
+ .data = &sart_ops_v2,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, apple_sart_of_match);
+
+static struct platform_driver apple_sart_driver = {
+ .driver = {
+ .name = "apple-sart",
+ .of_match_table = apple_sart_of_match,
+ },
+ .probe = apple_sart_probe,
+ .shutdown = apple_sart_shutdown,
+};
+module_platform_driver(apple_sart_driver);
+
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
+MODULE_DESCRIPTION("Apple SART driver");
diff --git a/drivers/soc/bcm/bcm63xx/bcm-pmb.c b/drivers/soc/bcm/bcm63xx/bcm-pmb.c
index 7bbe46ea5f94..9407cac47fdb 100644
--- a/drivers/soc/bcm/bcm63xx/bcm-pmb.c
+++ b/drivers/soc/bcm/bcm63xx/bcm-pmb.c
@@ -312,6 +312,9 @@ static int bcm_pmb_probe(struct platform_device *pdev)
for (e = table; e->name; e++) {
struct bcm_pmb_pm_domain *pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return -ENOMEM;
+
pd->pmb = pmb;
pd->data = e;
pd->genpd.name = e->name;
diff --git a/drivers/soc/imx/Makefile b/drivers/soc/imx/Makefile
index 8a707077914c..63cd29f6d4d2 100644
--- a/drivers/soc/imx/Makefile
+++ b/drivers/soc/imx/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o
obj-$(CONFIG_IMX_GPCV2_PM_DOMAINS) += gpcv2.o
obj-$(CONFIG_SOC_IMX8M) += soc-imx8m.o
obj-$(CONFIG_SOC_IMX8M) += imx8m-blk-ctrl.o
+obj-$(CONFIG_SOC_IMX8M) += imx8mp-blk-ctrl.o
diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c
index 3cb123016b3e..85aa86e1338a 100644
--- a/drivers/soc/imx/gpcv2.c
+++ b/drivers/soc/imx/gpcv2.c
@@ -21,10 +21,12 @@
#include <dt-bindings/power/imx8mq-power.h>
#include <dt-bindings/power/imx8mm-power.h>
#include <dt-bindings/power/imx8mn-power.h>
+#include <dt-bindings/power/imx8mp-power.h>
#define GPC_LPCR_A_CORE_BSC 0x000
#define GPC_PGC_CPU_MAPPING 0x0ec
+#define IMX8MP_GPC_PGC_CPU_MAPPING 0x1cc
#define IMX7_USB_HSIC_PHY_A_CORE_DOMAIN BIT(6)
#define IMX7_USB_OTG2_PHY_A_CORE_DOMAIN BIT(5)
@@ -65,6 +67,29 @@
#define IMX8MN_OTG1_A53_DOMAIN BIT(4)
#define IMX8MN_MIPI_A53_DOMAIN BIT(2)
+#define IMX8MP_MEDIA_ISPDWP_A53_DOMAIN BIT(20)
+#define IMX8MP_HSIOMIX_A53_DOMAIN BIT(19)
+#define IMX8MP_MIPI_PHY2_A53_DOMAIN BIT(18)
+#define IMX8MP_HDMI_PHY_A53_DOMAIN BIT(17)
+#define IMX8MP_HDMIMIX_A53_DOMAIN BIT(16)
+#define IMX8MP_VPU_VC8000E_A53_DOMAIN BIT(15)
+#define IMX8MP_VPU_G2_A53_DOMAIN BIT(14)
+#define IMX8MP_VPU_G1_A53_DOMAIN BIT(13)
+#define IMX8MP_MEDIAMIX_A53_DOMAIN BIT(12)
+#define IMX8MP_GPU3D_A53_DOMAIN BIT(11)
+#define IMX8MP_VPUMIX_A53_DOMAIN BIT(10)
+#define IMX8MP_GPUMIX_A53_DOMAIN BIT(9)
+#define IMX8MP_GPU2D_A53_DOMAIN BIT(8)
+#define IMX8MP_AUDIOMIX_A53_DOMAIN BIT(7)
+#define IMX8MP_MLMIX_A53_DOMAIN BIT(6)
+#define IMX8MP_USB2_PHY_A53_DOMAIN BIT(5)
+#define IMX8MP_USB1_PHY_A53_DOMAIN BIT(4)
+#define IMX8MP_PCIE_PHY_A53_DOMAIN BIT(3)
+#define IMX8MP_MIPI_PHY1_A53_DOMAIN BIT(2)
+
+#define IMX8MP_GPC_PU_PGC_SW_PUP_REQ 0x0d8
+#define IMX8MP_GPC_PU_PGC_SW_PDN_REQ 0x0e4
+
#define GPC_PU_PGC_SW_PUP_REQ 0x0f8
#define GPC_PU_PGC_SW_PDN_REQ 0x104
@@ -107,8 +132,30 @@
#define IMX8MN_OTG1_SW_Pxx_REQ BIT(2)
#define IMX8MN_MIPI_SW_Pxx_REQ BIT(0)
+#define IMX8MP_DDRMIX_Pxx_REQ BIT(19)
+#define IMX8MP_MEDIA_ISP_DWP_Pxx_REQ BIT(18)
+#define IMX8MP_HSIOMIX_Pxx_REQ BIT(17)
+#define IMX8MP_MIPI_PHY2_Pxx_REQ BIT(16)
+#define IMX8MP_HDMI_PHY_Pxx_REQ BIT(15)
+#define IMX8MP_HDMIMIX_Pxx_REQ BIT(14)
+#define IMX8MP_VPU_VC8K_Pxx_REQ BIT(13)
+#define IMX8MP_VPU_G2_Pxx_REQ BIT(12)
+#define IMX8MP_VPU_G1_Pxx_REQ BIT(11)
+#define IMX8MP_MEDIMIX_Pxx_REQ BIT(10)
+#define IMX8MP_GPU_3D_Pxx_REQ BIT(9)
+#define IMX8MP_VPU_MIX_SHARE_LOGIC_Pxx_REQ BIT(8)
+#define IMX8MP_GPU_SHARE_LOGIC_Pxx_REQ BIT(7)
+#define IMX8MP_GPU_2D_Pxx_REQ BIT(6)
+#define IMX8MP_AUDIOMIX_Pxx_REQ BIT(5)
+#define IMX8MP_MLMIX_Pxx_REQ BIT(4)
+#define IMX8MP_USB2_PHY_Pxx_REQ BIT(3)
+#define IMX8MP_USB1_PHY_Pxx_REQ BIT(2)
+#define IMX8MP_PCIE_PHY_SW_Pxx_REQ BIT(1)
+#define IMX8MP_MIPI_PHY1_SW_Pxx_REQ BIT(0)
+
#define GPC_M4_PU_PDN_FLG 0x1bc
+#define IMX8MP_GPC_PU_PWRHSK 0x190
#define GPC_PU_PWRHSK 0x1fc
#define IMX8M_GPU_HSK_PWRDNACKN BIT(26)
@@ -118,7 +165,6 @@
#define IMX8M_VPU_HSK_PWRDNREQN BIT(5)
#define IMX8M_DISP_HSK_PWRDNREQN BIT(4)
-
#define IMX8MM_GPUMIX_HSK_PWRDNACKN BIT(29)
#define IMX8MM_GPU_HSK_PWRDNACKN (BIT(27) | BIT(28))
#define IMX8MM_VPUMIX_HSK_PWRDNACKN BIT(26)
@@ -137,6 +183,21 @@
#define IMX8MN_DISPMIX_HSK_PWRDNREQN BIT(7)
#define IMX8MN_HSIO_HSK_PWRDNREQN BIT(5)
+#define IMX8MP_MEDIAMIX_PWRDNACKN BIT(30)
+#define IMX8MP_HDMIMIX_PWRDNACKN BIT(29)
+#define IMX8MP_HSIOMIX_PWRDNACKN BIT(28)
+#define IMX8MP_VPUMIX_PWRDNACKN BIT(26)
+#define IMX8MP_GPUMIX_PWRDNACKN BIT(25)
+#define IMX8MP_MLMIX_PWRDNACKN (BIT(23) | BIT(24))
+#define IMX8MP_AUDIOMIX_PWRDNACKN (BIT(20) | BIT(31))
+#define IMX8MP_MEDIAMIX_PWRDNREQN BIT(14)
+#define IMX8MP_HDMIMIX_PWRDNREQN BIT(13)
+#define IMX8MP_HSIOMIX_PWRDNREQN BIT(12)
+#define IMX8MP_VPUMIX_PWRDNREQN BIT(10)
+#define IMX8MP_GPUMIX_PWRDNREQN BIT(9)
+#define IMX8MP_MLMIX_PWRDNREQN (BIT(7) | BIT(8))
+#define IMX8MP_AUDIOMIX_PWRDNREQN (BIT(4) | BIT(15))
+
/*
* The PGC offset values in Reference Manual
* (Rev. 1, 01/2018 and the older ones) GPC chapter's
@@ -179,14 +240,44 @@
#define IMX8MN_PGC_GPUMIX 23
#define IMX8MN_PGC_DISPMIX 26
+#define IMX8MP_PGC_NOC 9
+#define IMX8MP_PGC_MIPI1 12
+#define IMX8MP_PGC_PCIE 13
+#define IMX8MP_PGC_USB1 14
+#define IMX8MP_PGC_USB2 15
+#define IMX8MP_PGC_MLMIX 16
+#define IMX8MP_PGC_AUDIOMIX 17
+#define IMX8MP_PGC_GPU2D 18
+#define IMX8MP_PGC_GPUMIX 19
+#define IMX8MP_PGC_VPUMIX 20
+#define IMX8MP_PGC_GPU3D 21
+#define IMX8MP_PGC_MEDIAMIX 22
+#define IMX8MP_PGC_VPU_G1 23
+#define IMX8MP_PGC_VPU_G2 24
+#define IMX8MP_PGC_VPU_VC8000E 25
+#define IMX8MP_PGC_HDMIMIX 26
+#define IMX8MP_PGC_HDMI 27
+#define IMX8MP_PGC_MIPI2 28
+#define IMX8MP_PGC_HSIOMIX 29
+#define IMX8MP_PGC_MEDIA_ISP_DWP 30
+#define IMX8MP_PGC_DDRMIX 31
+
#define GPC_PGC_CTRL(n) (0x800 + (n) * 0x40)
#define GPC_PGC_SR(n) (GPC_PGC_CTRL(n) + 0xc)
#define GPC_PGC_CTRL_PCR BIT(0)
+struct imx_pgc_regs {
+ u16 map;
+ u16 pup;
+ u16 pdn;
+ u16 hsk;
+};
+
struct imx_pgc_domain {
struct generic_pm_domain genpd;
struct regmap *regmap;
+ const struct imx_pgc_regs *regs;
struct regulator *regulator;
struct reset_control *reset;
struct clk_bulk_data *clks;
@@ -204,12 +295,16 @@ struct imx_pgc_domain {
const int voltage;
const bool keep_clocks;
struct device *dev;
+
+ unsigned int pgc_sw_pup_reg;
+ unsigned int pgc_sw_pdn_reg;
};
struct imx_pgc_domain_data {
const struct imx_pgc_domain *domains;
size_t domains_num;
const struct regmap_access_table *reg_access_table;
+ const struct imx_pgc_regs *pgc_regs;
};
static inline struct imx_pgc_domain *
@@ -249,14 +344,14 @@ static int imx_pgc_power_up(struct generic_pm_domain *genpd)
if (domain->bits.pxx) {
/* request the domain to power up */
- regmap_update_bits(domain->regmap, GPC_PU_PGC_SW_PUP_REQ,
+ regmap_update_bits(domain->regmap, domain->regs->pup,
domain->bits.pxx, domain->bits.pxx);
/*
* As per "5.5.9.4 Example Code 4" in IMX7DRM.pdf wait
* for PUP_REQ/PDN_REQ bit to be cleared
*/
ret = regmap_read_poll_timeout(domain->regmap,
- GPC_PU_PGC_SW_PUP_REQ, reg_val,
+ domain->regs->pup, reg_val,
!(reg_val & domain->bits.pxx),
0, USEC_PER_MSEC);
if (ret) {
@@ -278,11 +373,11 @@ static int imx_pgc_power_up(struct generic_pm_domain *genpd)
/* request the ADB400 to power up */
if (domain->bits.hskreq) {
- regmap_update_bits(domain->regmap, GPC_PU_PWRHSK,
+ regmap_update_bits(domain->regmap, domain->regs->hsk,
domain->bits.hskreq, domain->bits.hskreq);
/*
- * ret = regmap_read_poll_timeout(domain->regmap, GPC_PU_PWRHSK, reg_val,
+ * ret = regmap_read_poll_timeout(domain->regmap, domain->regs->hsk, reg_val,
* (reg_val & domain->bits.hskack), 0,
* USEC_PER_MSEC);
* Technically we need the commented code to wait handshake. But that needs
@@ -329,10 +424,10 @@ static int imx_pgc_power_down(struct generic_pm_domain *genpd)
/* request the ADB400 to power down */
if (domain->bits.hskreq) {
- regmap_clear_bits(domain->regmap, GPC_PU_PWRHSK,
+ regmap_clear_bits(domain->regmap, domain->regs->hsk,
domain->bits.hskreq);
- ret = regmap_read_poll_timeout(domain->regmap, GPC_PU_PWRHSK,
+ ret = regmap_read_poll_timeout(domain->regmap, domain->regs->hsk,
reg_val,
!(reg_val & domain->bits.hskack),
0, USEC_PER_MSEC);
@@ -350,14 +445,14 @@ static int imx_pgc_power_down(struct generic_pm_domain *genpd)
}
/* request the domain to power down */
- regmap_update_bits(domain->regmap, GPC_PU_PGC_SW_PDN_REQ,
+ regmap_update_bits(domain->regmap, domain->regs->pdn,
domain->bits.pxx, domain->bits.pxx);
/*
* As per "5.5.9.4 Example Code 4" in IMX7DRM.pdf wait
* for PUP_REQ/PDN_REQ bit to be cleared
*/
ret = regmap_read_poll_timeout(domain->regmap,
- GPC_PU_PGC_SW_PDN_REQ, reg_val,
+ domain->regs->pdn, reg_val,
!(reg_val & domain->bits.pxx),
0, USEC_PER_MSEC);
if (ret) {
@@ -442,10 +537,18 @@ static const struct regmap_access_table imx7_access_table = {
.n_yes_ranges = ARRAY_SIZE(imx7_yes_ranges),
};
+static const struct imx_pgc_regs imx7_pgc_regs = {
+ .map = GPC_PGC_CPU_MAPPING,
+ .pup = GPC_PU_PGC_SW_PUP_REQ,
+ .pdn = GPC_PU_PGC_SW_PDN_REQ,
+ .hsk = GPC_PU_PWRHSK,
+};
+
static const struct imx_pgc_domain_data imx7_pgc_domain_data = {
.domains = imx7_pgc_domains,
.domains_num = ARRAY_SIZE(imx7_pgc_domains),
.reg_access_table = &imx7_access_table,
+ .pgc_regs = &imx7_pgc_regs,
};
static const struct imx_pgc_domain imx8m_pgc_domains[] = {
@@ -614,6 +717,7 @@ static const struct imx_pgc_domain_data imx8m_pgc_domain_data = {
.domains = imx8m_pgc_domains,
.domains_num = ARRAY_SIZE(imx8m_pgc_domains),
.reg_access_table = &imx8m_access_table,
+ .pgc_regs = &imx7_pgc_regs,
};
static const struct imx_pgc_domain imx8mm_pgc_domains[] = {
@@ -804,6 +908,304 @@ static const struct imx_pgc_domain_data imx8mm_pgc_domain_data = {
.domains = imx8mm_pgc_domains,
.domains_num = ARRAY_SIZE(imx8mm_pgc_domains),
.reg_access_table = &imx8mm_access_table,
+ .pgc_regs = &imx7_pgc_regs,
+};
+
+static const struct imx_pgc_domain imx8mp_pgc_domains[] = {
+ [IMX8MP_POWER_DOMAIN_MIPI_PHY1] = {
+ .genpd = {
+ .name = "mipi-phy1",
+ },
+ .bits = {
+ .pxx = IMX8MP_MIPI_PHY1_SW_Pxx_REQ,
+ .map = IMX8MP_MIPI_PHY1_A53_DOMAIN,
+ },
+ .pgc = BIT(IMX8MP_PGC_MIPI1),
+ },
+
+ [IMX8MP_POWER_DOMAIN_PCIE_PHY] = {
+ .genpd = {
+ .name = "pcie-phy1",
+ },
+ .bits = {
+ .pxx = IMX8MP_PCIE_PHY_SW_Pxx_REQ,
+ .map = IMX8MP_PCIE_PHY_A53_DOMAIN,
+ },
+ .pgc = BIT(IMX8MP_PGC_PCIE),
+ },
+
+ [IMX8MP_POWER_DOMAIN_USB1_PHY] = {
+ .genpd = {
+ .name = "usb-otg1",
+ },
+ .bits = {
+ .pxx = IMX8MP_USB1_PHY_Pxx_REQ,
+ .map = IMX8MP_USB1_PHY_A53_DOMAIN,
+ },
+ .pgc = BIT(IMX8MP_PGC_USB1),
+ },
+
+ [IMX8MP_POWER_DOMAIN_USB2_PHY] = {
+ .genpd = {
+ .name = "usb-otg2",
+ },
+ .bits = {
+ .pxx = IMX8MP_USB2_PHY_Pxx_REQ,
+ .map = IMX8MP_USB2_PHY_A53_DOMAIN,
+ },
+ .pgc = BIT(IMX8MP_PGC_USB2),
+ },
+
+ [IMX8MP_POWER_DOMAIN_MLMIX] = {
+ .genpd = {
+ .name = "mlmix",
+ },
+ .bits = {
+ .pxx = IMX8MP_MLMIX_Pxx_REQ,
+ .map = IMX8MP_MLMIX_A53_DOMAIN,
+ .hskreq = IMX8MP_MLMIX_PWRDNREQN,
+ .hskack = IMX8MP_MLMIX_PWRDNACKN,
+ },
+ .pgc = BIT(IMX8MP_PGC_MLMIX),
+ .keep_clocks = true,
+ },
+
+ [IMX8MP_POWER_DOMAIN_AUDIOMIX] = {
+ .genpd = {
+ .name = "audiomix",
+ },
+ .bits = {
+ .pxx = IMX8MP_AUDIOMIX_Pxx_REQ,
+ .map = IMX8MP_AUDIOMIX_A53_DOMAIN,
+ .hskreq = IMX8MP_AUDIOMIX_PWRDNREQN,
+ .hskack = IMX8MP_AUDIOMIX_PWRDNACKN,
+ },
+ .pgc = BIT(IMX8MP_PGC_AUDIOMIX),
+ .keep_clocks = true,
+ },
+
+ [IMX8MP_POWER_DOMAIN_GPU2D] = {
+ .genpd = {
+ .name = "gpu2d",
+ },
+ .bits = {
+ .pxx = IMX8MP_GPU_2D_Pxx_REQ,
+ .map = IMX8MP_GPU2D_A53_DOMAIN,
+ },
+ .pgc = BIT(IMX8MP_PGC_GPU2D),
+ },
+
+ [IMX8MP_POWER_DOMAIN_GPUMIX] = {
+ .genpd = {
+ .name = "gpumix",
+ },
+ .bits = {
+ .pxx = IMX8MP_GPU_SHARE_LOGIC_Pxx_REQ,
+ .map = IMX8MP_GPUMIX_A53_DOMAIN,
+ .hskreq = IMX8MP_GPUMIX_PWRDNREQN,
+ .hskack = IMX8MP_GPUMIX_PWRDNACKN,
+ },
+ .pgc = BIT(IMX8MP_PGC_GPUMIX),
+ .keep_clocks = true,
+ },
+
+ [IMX8MP_POWER_DOMAIN_VPUMIX] = {
+ .genpd = {
+ .name = "vpumix",
+ },
+ .bits = {
+ .pxx = IMX8MP_VPU_MIX_SHARE_LOGIC_Pxx_REQ,
+ .map = IMX8MP_VPUMIX_A53_DOMAIN,
+ .hskreq = IMX8MP_VPUMIX_PWRDNREQN,
+ .hskack = IMX8MP_VPUMIX_PWRDNACKN,
+ },
+ .pgc = BIT(IMX8MP_PGC_VPUMIX),
+ .keep_clocks = true,
+ },
+
+ [IMX8MP_POWER_DOMAIN_GPU3D] = {
+ .genpd = {
+ .name = "gpu3d",
+ },
+ .bits = {
+ .pxx = IMX8MP_GPU_3D_Pxx_REQ,
+ .map = IMX8MP_GPU3D_A53_DOMAIN,
+ },
+ .pgc = BIT(IMX8MP_PGC_GPU3D),
+ },
+
+ [IMX8MP_POWER_DOMAIN_MEDIAMIX] = {
+ .genpd = {
+ .name = "mediamix",
+ },
+ .bits = {
+ .pxx = IMX8MP_MEDIMIX_Pxx_REQ,
+ .map = IMX8MP_MEDIAMIX_A53_DOMAIN,
+ .hskreq = IMX8MP_MEDIAMIX_PWRDNREQN,
+ .hskack = IMX8MP_MEDIAMIX_PWRDNACKN,
+ },
+ .pgc = BIT(IMX8MP_PGC_MEDIAMIX),
+ .keep_clocks = true,
+ },
+
+ [IMX8MP_POWER_DOMAIN_VPU_G1] = {
+ .genpd = {
+ .name = "vpu-g1",
+ },
+ .bits = {
+ .pxx = IMX8MP_VPU_G1_Pxx_REQ,
+ .map = IMX8MP_VPU_G1_A53_DOMAIN,
+ },
+ .pgc = BIT(IMX8MP_PGC_VPU_G1),
+ },
+
+ [IMX8MP_POWER_DOMAIN_VPU_G2] = {
+ .genpd = {
+ .name = "vpu-g2",
+ },
+ .bits = {
+ .pxx = IMX8MP_VPU_G2_Pxx_REQ,
+ .map = IMX8MP_VPU_G2_A53_DOMAIN
+ },
+ .pgc = BIT(IMX8MP_PGC_VPU_G2),
+ },
+
+ [IMX8MP_POWER_DOMAIN_VPU_VC8000E] = {
+ .genpd = {
+ .name = "vpu-h1",
+ },
+ .bits = {
+ .pxx = IMX8MP_VPU_VC8K_Pxx_REQ,
+ .map = IMX8MP_VPU_VC8000E_A53_DOMAIN,
+ },
+ .pgc = BIT(IMX8MP_PGC_VPU_VC8000E),
+ },
+
+ [IMX8MP_POWER_DOMAIN_HDMIMIX] = {
+ .genpd = {
+ .name = "hdmimix",
+ },
+ .bits = {
+ .pxx = IMX8MP_HDMIMIX_Pxx_REQ,
+ .map = IMX8MP_HDMIMIX_A53_DOMAIN,
+ .hskreq = IMX8MP_HDMIMIX_PWRDNREQN,
+ .hskack = IMX8MP_HDMIMIX_PWRDNACKN,
+ },
+ .pgc = BIT(IMX8MP_PGC_HDMIMIX),
+ .keep_clocks = true,
+ },
+
+ [IMX8MP_POWER_DOMAIN_HDMI_PHY] = {
+ .genpd = {
+ .name = "hdmi-phy",
+ },
+ .bits = {
+ .pxx = IMX8MP_HDMI_PHY_Pxx_REQ,
+ .map = IMX8MP_HDMI_PHY_A53_DOMAIN,
+ },
+ .pgc = BIT(IMX8MP_PGC_HDMI),
+ },
+
+ [IMX8MP_POWER_DOMAIN_MIPI_PHY2] = {
+ .genpd = {
+ .name = "mipi-phy2",
+ },
+ .bits = {
+ .pxx = IMX8MP_MIPI_PHY2_Pxx_REQ,
+ .map = IMX8MP_MIPI_PHY2_A53_DOMAIN,
+ },
+ .pgc = BIT(IMX8MP_PGC_MIPI2),
+ },
+
+ [IMX8MP_POWER_DOMAIN_HSIOMIX] = {
+ .genpd = {
+ .name = "hsiomix",
+ },
+ .bits = {
+ .pxx = IMX8MP_HSIOMIX_Pxx_REQ,
+ .map = IMX8MP_HSIOMIX_A53_DOMAIN,
+ .hskreq = IMX8MP_HSIOMIX_PWRDNREQN,
+ .hskack = IMX8MP_HSIOMIX_PWRDNACKN,
+ },
+ .pgc = BIT(IMX8MP_PGC_HSIOMIX),
+ .keep_clocks = true,
+ },
+
+ [IMX8MP_POWER_DOMAIN_MEDIAMIX_ISPDWP] = {
+ .genpd = {
+ .name = "mediamix-isp-dwp",
+ },
+ .bits = {
+ .pxx = IMX8MP_MEDIA_ISP_DWP_Pxx_REQ,
+ .map = IMX8MP_MEDIA_ISPDWP_A53_DOMAIN,
+ },
+ .pgc = BIT(IMX8MP_PGC_MEDIA_ISP_DWP),
+ },
+};
+
+static const struct regmap_range imx8mp_yes_ranges[] = {
+ regmap_reg_range(GPC_LPCR_A_CORE_BSC,
+ IMX8MP_GPC_PGC_CPU_MAPPING),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_NOC),
+ GPC_PGC_SR(IMX8MP_PGC_NOC)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_MIPI1),
+ GPC_PGC_SR(IMX8MP_PGC_MIPI1)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_PCIE),
+ GPC_PGC_SR(IMX8MP_PGC_PCIE)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_USB1),
+ GPC_PGC_SR(IMX8MP_PGC_USB1)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_USB2),
+ GPC_PGC_SR(IMX8MP_PGC_USB2)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_MLMIX),
+ GPC_PGC_SR(IMX8MP_PGC_MLMIX)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_AUDIOMIX),
+ GPC_PGC_SR(IMX8MP_PGC_AUDIOMIX)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_GPU2D),
+ GPC_PGC_SR(IMX8MP_PGC_GPU2D)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_GPUMIX),
+ GPC_PGC_SR(IMX8MP_PGC_GPUMIX)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_VPUMIX),
+ GPC_PGC_SR(IMX8MP_PGC_VPUMIX)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_GPU3D),
+ GPC_PGC_SR(IMX8MP_PGC_GPU3D)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_MEDIAMIX),
+ GPC_PGC_SR(IMX8MP_PGC_MEDIAMIX)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_VPU_G1),
+ GPC_PGC_SR(IMX8MP_PGC_VPU_G1)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_VPU_G2),
+ GPC_PGC_SR(IMX8MP_PGC_VPU_G2)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_VPU_VC8000E),
+ GPC_PGC_SR(IMX8MP_PGC_VPU_VC8000E)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_HDMIMIX),
+ GPC_PGC_SR(IMX8MP_PGC_HDMIMIX)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_HDMI),
+ GPC_PGC_SR(IMX8MP_PGC_HDMI)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_MIPI2),
+ GPC_PGC_SR(IMX8MP_PGC_MIPI2)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_HSIOMIX),
+ GPC_PGC_SR(IMX8MP_PGC_HSIOMIX)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_MEDIA_ISP_DWP),
+ GPC_PGC_SR(IMX8MP_PGC_MEDIA_ISP_DWP)),
+ regmap_reg_range(GPC_PGC_CTRL(IMX8MP_PGC_DDRMIX),
+ GPC_PGC_SR(IMX8MP_PGC_DDRMIX)),
+};
+
+static const struct regmap_access_table imx8mp_access_table = {
+ .yes_ranges = imx8mp_yes_ranges,
+ .n_yes_ranges = ARRAY_SIZE(imx8mp_yes_ranges),
+};
+
+static const struct imx_pgc_regs imx8mp_pgc_regs = {
+ .map = IMX8MP_GPC_PGC_CPU_MAPPING,
+ .pup = IMX8MP_GPC_PU_PGC_SW_PUP_REQ,
+ .pdn = IMX8MP_GPC_PU_PGC_SW_PDN_REQ,
+ .hsk = IMX8MP_GPC_PU_PWRHSK,
+};
+static const struct imx_pgc_domain_data imx8mp_pgc_domain_data = {
+ .domains = imx8mp_pgc_domains,
+ .domains_num = ARRAY_SIZE(imx8mp_pgc_domains),
+ .reg_access_table = &imx8mp_access_table,
+ .pgc_regs = &imx8mp_pgc_regs,
};
static const struct imx_pgc_domain imx8mn_pgc_domains[] = {
@@ -895,6 +1297,7 @@ static const struct imx_pgc_domain_data imx8mn_pgc_domain_data = {
.domains = imx8mn_pgc_domains,
.domains_num = ARRAY_SIZE(imx8mn_pgc_domains),
.reg_access_table = &imx8mn_access_table,
+ .pgc_regs = &imx7_pgc_regs,
};
static int imx_pgc_domain_probe(struct platform_device *pdev)
@@ -927,7 +1330,7 @@ static int imx_pgc_domain_probe(struct platform_device *pdev)
pm_runtime_enable(domain->dev);
if (domain->bits.map)
- regmap_update_bits(domain->regmap, GPC_PGC_CPU_MAPPING,
+ regmap_update_bits(domain->regmap, domain->regs->map,
domain->bits.map, domain->bits.map);
ret = pm_genpd_init(&domain->genpd, NULL, true);
@@ -953,7 +1356,7 @@ out_genpd_remove:
pm_genpd_remove(&domain->genpd);
out_domain_unmap:
if (domain->bits.map)
- regmap_update_bits(domain->regmap, GPC_PGC_CPU_MAPPING,
+ regmap_update_bits(domain->regmap, domain->regs->map,
domain->bits.map, 0);
pm_runtime_disable(domain->dev);
@@ -968,7 +1371,7 @@ static int imx_pgc_domain_remove(struct platform_device *pdev)
pm_genpd_remove(&domain->genpd);
if (domain->bits.map)
- regmap_update_bits(domain->regmap, GPC_PGC_CPU_MAPPING,
+ regmap_update_bits(domain->regmap, domain->regs->map,
domain->bits.map, 0);
pm_runtime_disable(domain->dev);
@@ -1099,6 +1502,8 @@ static int imx_gpcv2_probe(struct platform_device *pdev)
domain = pd_pdev->dev.platform_data;
domain->regmap = regmap;
+ domain->regs = domain_data->pgc_regs;
+
domain->genpd.power_on = imx_pgc_power_up;
domain->genpd.power_off = imx_pgc_power_down;
@@ -1120,6 +1525,7 @@ static const struct of_device_id imx_gpcv2_dt_ids[] = {
{ .compatible = "fsl,imx7d-gpc", .data = &imx7_pgc_domain_data, },
{ .compatible = "fsl,imx8mm-gpc", .data = &imx8mm_pgc_domain_data, },
{ .compatible = "fsl,imx8mn-gpc", .data = &imx8mn_pgc_domain_data, },
+ { .compatible = "fsl,imx8mp-gpc", .data = &imx8mp_pgc_domain_data, },
{ .compatible = "fsl,imx8mq-gpc", .data = &imx8m_pgc_domain_data, },
{ }
};
diff --git a/drivers/soc/imx/imx8m-blk-ctrl.c b/drivers/soc/imx/imx8m-blk-ctrl.c
index 122f9c884b38..7f49385ed2f8 100644
--- a/drivers/soc/imx/imx8m-blk-ctrl.c
+++ b/drivers/soc/imx/imx8m-blk-ctrl.c
@@ -15,11 +15,12 @@
#include <dt-bindings/power/imx8mm-power.h>
#include <dt-bindings/power/imx8mn-power.h>
+#include <dt-bindings/power/imx8mp-power.h>
#include <dt-bindings/power/imx8mq-power.h>
#define BLK_SFT_RSTN 0x0
#define BLK_CLK_EN 0x4
-#define BLK_MIPI_RESET_DIV 0x8 /* Mini/Nano DISPLAY_BLK_CTRL only */
+#define BLK_MIPI_RESET_DIV 0x8 /* Mini/Nano/Plus DISPLAY_BLK_CTRL only */
struct imx8m_blk_ctrl_domain;
@@ -41,7 +42,7 @@ struct imx8m_blk_ctrl_domain_data {
u32 clk_mask;
/*
- * i.MX8M Mini and Nano have a third DISPLAY_BLK_CTRL register
+ * i.MX8M Mini, Nano and Plus have a third DISPLAY_BLK_CTRL register
* which is used to control the reset for the MIPI Phy.
* Since it's only present in certain circumstances,
* an if-statement should be used before setting and clearing this
@@ -50,7 +51,7 @@ struct imx8m_blk_ctrl_domain_data {
u32 mipi_phy_rst_mask;
};
-#define DOMAIN_MAX_CLKS 3
+#define DOMAIN_MAX_CLKS 4
struct imx8m_blk_ctrl_domain {
struct generic_pm_domain genpd;
@@ -241,6 +242,7 @@ static int imx8m_blk_ctrl_probe(struct platform_device *pdev)
ret = PTR_ERR(domain->power_dev);
goto cleanup_pds;
}
+ dev_set_name(domain->power_dev, "%s", data->name);
domain->genpd.name = data->name;
domain->genpd.power_on = imx8m_blk_ctrl_power_on;
@@ -590,6 +592,121 @@ static const struct imx8m_blk_ctrl_data imx8mn_disp_blk_ctl_dev_data = {
.num_domains = ARRAY_SIZE(imx8mn_disp_blk_ctl_domain_data),
};
+static int imx8mp_media_power_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct imx8m_blk_ctrl *bc = container_of(nb, struct imx8m_blk_ctrl,
+ power_nb);
+
+ if (action != GENPD_NOTIFY_ON && action != GENPD_NOTIFY_PRE_OFF)
+ return NOTIFY_OK;
+
+ /* Enable bus clock and deassert bus reset */
+ regmap_set_bits(bc->regmap, BLK_CLK_EN, BIT(8));
+ regmap_set_bits(bc->regmap, BLK_SFT_RSTN, BIT(8));
+
+ /*
+ * On power up we have no software backchannel to the GPC to
+ * wait for the ADB handshake to happen, so we just delay for a
+ * bit. On power down the GPC driver waits for the handshake.
+ */
+ if (action == GENPD_NOTIFY_ON)
+ udelay(5);
+
+ return NOTIFY_OK;
+}
+
+/*
+ * From i.MX 8M Plus Applications Processor Reference Manual, Rev. 1,
+ * section 13.2.2, 13.2.3
+ * isp-ahb and dwe are not in Figure 13-5. Media BLK_CTRL Clocks
+ */
+static const struct imx8m_blk_ctrl_domain_data imx8mp_media_blk_ctl_domain_data[] = {
+ [IMX8MP_MEDIABLK_PD_MIPI_DSI_1] = {
+ .name = "mediablk-mipi-dsi-1",
+ .clk_names = (const char *[]){ "apb", "phy", },
+ .num_clks = 2,
+ .gpc_name = "mipi-dsi1",
+ .rst_mask = BIT(0) | BIT(1),
+ .clk_mask = BIT(0) | BIT(1),
+ .mipi_phy_rst_mask = BIT(17),
+ },
+ [IMX8MP_MEDIABLK_PD_MIPI_CSI2_1] = {
+ .name = "mediablk-mipi-csi2-1",
+ .clk_names = (const char *[]){ "apb", "cam1" },
+ .num_clks = 2,
+ .gpc_name = "mipi-csi1",
+ .rst_mask = BIT(2) | BIT(3),
+ .clk_mask = BIT(2) | BIT(3),
+ .mipi_phy_rst_mask = BIT(16),
+ },
+ [IMX8MP_MEDIABLK_PD_LCDIF_1] = {
+ .name = "mediablk-lcdif-1",
+ .clk_names = (const char *[]){ "disp1", "apb", "axi", },
+ .num_clks = 3,
+ .gpc_name = "lcdif1",
+ .rst_mask = BIT(4) | BIT(5) | BIT(23),
+ .clk_mask = BIT(4) | BIT(5) | BIT(23),
+ },
+ [IMX8MP_MEDIABLK_PD_ISI] = {
+ .name = "mediablk-isi",
+ .clk_names = (const char *[]){ "axi", "apb" },
+ .num_clks = 2,
+ .gpc_name = "isi",
+ .rst_mask = BIT(6) | BIT(7),
+ .clk_mask = BIT(6) | BIT(7),
+ },
+ [IMX8MP_MEDIABLK_PD_MIPI_CSI2_2] = {
+ .name = "mediablk-mipi-csi2-2",
+ .clk_names = (const char *[]){ "apb", "cam2" },
+ .num_clks = 2,
+ .gpc_name = "mipi-csi2",
+ .rst_mask = BIT(9) | BIT(10),
+ .clk_mask = BIT(9) | BIT(10),
+ .mipi_phy_rst_mask = BIT(30),
+ },
+ [IMX8MP_MEDIABLK_PD_LCDIF_2] = {
+ .name = "mediablk-lcdif-2",
+ .clk_names = (const char *[]){ "disp1", "apb", "axi", },
+ .num_clks = 3,
+ .gpc_name = "lcdif2",
+ .rst_mask = BIT(11) | BIT(12) | BIT(24),
+ .clk_mask = BIT(11) | BIT(12) | BIT(24),
+ },
+ [IMX8MP_MEDIABLK_PD_ISP] = {
+ .name = "mediablk-isp",
+ .clk_names = (const char *[]){ "isp", "axi", "apb" },
+ .num_clks = 3,
+ .gpc_name = "isp",
+ .rst_mask = BIT(16) | BIT(17) | BIT(18),
+ .clk_mask = BIT(16) | BIT(17) | BIT(18),
+ },
+ [IMX8MP_MEDIABLK_PD_DWE] = {
+ .name = "mediablk-dwe",
+ .clk_names = (const char *[]){ "axi", "apb" },
+ .num_clks = 2,
+ .gpc_name = "dwe",
+ .rst_mask = BIT(19) | BIT(20) | BIT(21),
+ .clk_mask = BIT(19) | BIT(20) | BIT(21),
+ },
+ [IMX8MP_MEDIABLK_PD_MIPI_DSI_2] = {
+ .name = "mediablk-mipi-dsi-2",
+ .clk_names = (const char *[]){ "phy", },
+ .num_clks = 1,
+ .gpc_name = "mipi-dsi2",
+ .rst_mask = BIT(22),
+ .clk_mask = BIT(22),
+ .mipi_phy_rst_mask = BIT(29),
+ },
+};
+
+static const struct imx8m_blk_ctrl_data imx8mp_media_blk_ctl_dev_data = {
+ .max_reg = 0x138,
+ .power_notifier_fn = imx8mp_media_power_notifier,
+ .domains = imx8mp_media_blk_ctl_domain_data,
+ .num_domains = ARRAY_SIZE(imx8mp_media_blk_ctl_domain_data),
+};
+
static int imx8mq_vpu_power_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
@@ -663,6 +780,9 @@ static const struct of_device_id imx8m_blk_ctrl_of_match[] = {
.compatible = "fsl,imx8mn-disp-blk-ctrl",
.data = &imx8mn_disp_blk_ctl_dev_data
}, {
+ .compatible = "fsl,imx8mp-media-blk-ctrl",
+ .data = &imx8mp_media_blk_ctl_dev_data
+ }, {
.compatible = "fsl,imx8mq-vpu-blk-ctrl",
.data = &imx8mq_vpu_blk_ctl_dev_data
}, {
diff --git a/drivers/soc/imx/imx8mp-blk-ctrl.c b/drivers/soc/imx/imx8mp-blk-ctrl.c
new file mode 100644
index 000000000000..4ca2ede6871b
--- /dev/null
+++ b/drivers/soc/imx/imx8mp-blk-ctrl.c
@@ -0,0 +1,696 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Copyright 2022 Pengutronix, Lucas Stach <kernel@pengutronix.de>
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/power/imx8mp-power.h>
+
+#define GPR_REG0 0x0
+#define PCIE_CLOCK_MODULE_EN BIT(0)
+#define USB_CLOCK_MODULE_EN BIT(1)
+
+struct imx8mp_blk_ctrl_domain;
+
+struct imx8mp_blk_ctrl {
+ struct device *dev;
+ struct notifier_block power_nb;
+ struct device *bus_power_dev;
+ struct regmap *regmap;
+ struct imx8mp_blk_ctrl_domain *domains;
+ struct genpd_onecell_data onecell_data;
+ void (*power_off) (struct imx8mp_blk_ctrl *bc, struct imx8mp_blk_ctrl_domain *domain);
+ void (*power_on) (struct imx8mp_blk_ctrl *bc, struct imx8mp_blk_ctrl_domain *domain);
+};
+
+struct imx8mp_blk_ctrl_domain_data {
+ const char *name;
+ const char * const *clk_names;
+ int num_clks;
+ const char *gpc_name;
+};
+
+#define DOMAIN_MAX_CLKS 2
+
+struct imx8mp_blk_ctrl_domain {
+ struct generic_pm_domain genpd;
+ const struct imx8mp_blk_ctrl_domain_data *data;
+ struct clk_bulk_data clks[DOMAIN_MAX_CLKS];
+ struct device *power_dev;
+ struct imx8mp_blk_ctrl *bc;
+ int id;
+};
+
+struct imx8mp_blk_ctrl_data {
+ int max_reg;
+ notifier_fn_t power_notifier_fn;
+ void (*power_off) (struct imx8mp_blk_ctrl *bc, struct imx8mp_blk_ctrl_domain *domain);
+ void (*power_on) (struct imx8mp_blk_ctrl *bc, struct imx8mp_blk_ctrl_domain *domain);
+ const struct imx8mp_blk_ctrl_domain_data *domains;
+ int num_domains;
+};
+
+static inline struct imx8mp_blk_ctrl_domain *
+to_imx8mp_blk_ctrl_domain(struct generic_pm_domain *genpd)
+{
+ return container_of(genpd, struct imx8mp_blk_ctrl_domain, genpd);
+}
+
+static void imx8mp_hsio_blk_ctrl_power_on(struct imx8mp_blk_ctrl *bc,
+ struct imx8mp_blk_ctrl_domain *domain)
+{
+ switch (domain->id) {
+ case IMX8MP_HSIOBLK_PD_USB:
+ regmap_set_bits(bc->regmap, GPR_REG0, USB_CLOCK_MODULE_EN);
+ break;
+ case IMX8MP_HSIOBLK_PD_PCIE:
+ regmap_set_bits(bc->regmap, GPR_REG0, PCIE_CLOCK_MODULE_EN);
+ break;
+ default:
+ break;
+ }
+}
+
+static void imx8mp_hsio_blk_ctrl_power_off(struct imx8mp_blk_ctrl *bc,
+ struct imx8mp_blk_ctrl_domain *domain)
+{
+ switch (domain->id) {
+ case IMX8MP_HSIOBLK_PD_USB:
+ regmap_clear_bits(bc->regmap, GPR_REG0, USB_CLOCK_MODULE_EN);
+ break;
+ case IMX8MP_HSIOBLK_PD_PCIE:
+ regmap_clear_bits(bc->regmap, GPR_REG0, PCIE_CLOCK_MODULE_EN);
+ break;
+ default:
+ break;
+ }
+}
+
+static int imx8mp_hsio_power_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct imx8mp_blk_ctrl *bc = container_of(nb, struct imx8mp_blk_ctrl,
+ power_nb);
+ struct clk_bulk_data *usb_clk = bc->domains[IMX8MP_HSIOBLK_PD_USB].clks;
+ int num_clks = bc->domains[IMX8MP_HSIOBLK_PD_USB].data->num_clks;
+ int ret;
+
+ switch (action) {
+ case GENPD_NOTIFY_ON:
+ /*
+ * enable USB clock for a moment for the power-on ADB handshake
+ * to proceed
+ */
+ ret = clk_bulk_prepare_enable(num_clks, usb_clk);
+ if (ret)
+ return NOTIFY_BAD;
+ regmap_set_bits(bc->regmap, GPR_REG0, USB_CLOCK_MODULE_EN);
+
+ udelay(5);
+
+ regmap_clear_bits(bc->regmap, GPR_REG0, USB_CLOCK_MODULE_EN);
+ clk_bulk_disable_unprepare(num_clks, usb_clk);
+ break;
+ case GENPD_NOTIFY_PRE_OFF:
+ /* enable USB clock for the power-down ADB handshake to work */
+ ret = clk_bulk_prepare_enable(num_clks, usb_clk);
+ if (ret)
+ return NOTIFY_BAD;
+
+ regmap_set_bits(bc->regmap, GPR_REG0, USB_CLOCK_MODULE_EN);
+ break;
+ case GENPD_NOTIFY_OFF:
+ clk_bulk_disable_unprepare(num_clks, usb_clk);
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static const struct imx8mp_blk_ctrl_domain_data imx8mp_hsio_domain_data[] = {
+ [IMX8MP_HSIOBLK_PD_USB] = {
+ .name = "hsioblk-usb",
+ .clk_names = (const char *[]){ "usb" },
+ .num_clks = 1,
+ .gpc_name = "usb",
+ },
+ [IMX8MP_HSIOBLK_PD_USB_PHY1] = {
+ .name = "hsioblk-usb-phy1",
+ .gpc_name = "usb-phy1",
+ },
+ [IMX8MP_HSIOBLK_PD_USB_PHY2] = {
+ .name = "hsioblk-usb-phy2",
+ .gpc_name = "usb-phy2",
+ },
+ [IMX8MP_HSIOBLK_PD_PCIE] = {
+ .name = "hsioblk-pcie",
+ .clk_names = (const char *[]){ "pcie" },
+ .num_clks = 1,
+ .gpc_name = "pcie",
+ },
+ [IMX8MP_HSIOBLK_PD_PCIE_PHY] = {
+ .name = "hsioblk-pcie-phy",
+ .gpc_name = "pcie-phy",
+ },
+};
+
+static const struct imx8mp_blk_ctrl_data imx8mp_hsio_blk_ctl_dev_data = {
+ .max_reg = 0x24,
+ .power_on = imx8mp_hsio_blk_ctrl_power_on,
+ .power_off = imx8mp_hsio_blk_ctrl_power_off,
+ .power_notifier_fn = imx8mp_hsio_power_notifier,
+ .domains = imx8mp_hsio_domain_data,
+ .num_domains = ARRAY_SIZE(imx8mp_hsio_domain_data),
+};
+
+#define HDMI_RTX_RESET_CTL0 0x20
+#define HDMI_RTX_CLK_CTL0 0x40
+#define HDMI_RTX_CLK_CTL1 0x50
+#define HDMI_RTX_CLK_CTL2 0x60
+#define HDMI_RTX_CLK_CTL3 0x70
+#define HDMI_RTX_CLK_CTL4 0x80
+#define HDMI_TX_CONTROL0 0x200
+
+static void imx8mp_hdmi_blk_ctrl_power_on(struct imx8mp_blk_ctrl *bc,
+ struct imx8mp_blk_ctrl_domain *domain)
+{
+ switch (domain->id) {
+ case IMX8MP_HDMIBLK_PD_IRQSTEER:
+ regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL0, BIT(9));
+ regmap_set_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(16));
+ break;
+ case IMX8MP_HDMIBLK_PD_LCDIF:
+ regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL0,
+ BIT(7) | BIT(16) | BIT(17) | BIT(18) |
+ BIT(19) | BIT(20));
+ regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(11));
+ regmap_set_bits(bc->regmap, HDMI_RTX_RESET_CTL0,
+ BIT(4) | BIT(5) | BIT(6));
+ break;
+ case IMX8MP_HDMIBLK_PD_PAI:
+ regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(17));
+ regmap_set_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(18));
+ break;
+ case IMX8MP_HDMIBLK_PD_PVI:
+ regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(28));
+ regmap_set_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(22));
+ break;
+ case IMX8MP_HDMIBLK_PD_TRNG:
+ regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(27) | BIT(30));
+ regmap_set_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(20));
+ break;
+ case IMX8MP_HDMIBLK_PD_HDMI_TX:
+ regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL0,
+ BIT(2) | BIT(4) | BIT(5));
+ regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL1,
+ BIT(12) | BIT(13) | BIT(14) | BIT(15) | BIT(16) |
+ BIT(18) | BIT(19) | BIT(20) | BIT(21));
+ regmap_set_bits(bc->regmap, HDMI_RTX_RESET_CTL0,
+ BIT(7) | BIT(10) | BIT(11));
+ regmap_set_bits(bc->regmap, HDMI_TX_CONTROL0, BIT(1));
+ break;
+ case IMX8MP_HDMIBLK_PD_HDMI_TX_PHY:
+ regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(22) | BIT(24));
+ regmap_set_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(12));
+ regmap_clear_bits(bc->regmap, HDMI_TX_CONTROL0, BIT(3));
+ break;
+ default:
+ break;
+ }
+}
+
+static void imx8mp_hdmi_blk_ctrl_power_off(struct imx8mp_blk_ctrl *bc,
+ struct imx8mp_blk_ctrl_domain *domain)
+{
+ switch (domain->id) {
+ case IMX8MP_HDMIBLK_PD_IRQSTEER:
+ regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL0, BIT(9));
+ regmap_clear_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(16));
+ break;
+ case IMX8MP_HDMIBLK_PD_LCDIF:
+ regmap_clear_bits(bc->regmap, HDMI_RTX_RESET_CTL0,
+ BIT(4) | BIT(5) | BIT(6));
+ regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(11));
+ regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL0,
+ BIT(7) | BIT(16) | BIT(17) | BIT(18) |
+ BIT(19) | BIT(20));
+ break;
+ case IMX8MP_HDMIBLK_PD_PAI:
+ regmap_clear_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(18));
+ regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(17));
+ break;
+ case IMX8MP_HDMIBLK_PD_PVI:
+ regmap_clear_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(22));
+ regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(28));
+ break;
+ case IMX8MP_HDMIBLK_PD_TRNG:
+ regmap_clear_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(20));
+ regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(27) | BIT(30));
+ break;
+ case IMX8MP_HDMIBLK_PD_HDMI_TX:
+ regmap_clear_bits(bc->regmap, HDMI_TX_CONTROL0, BIT(1));
+ regmap_clear_bits(bc->regmap, HDMI_RTX_RESET_CTL0,
+ BIT(7) | BIT(10) | BIT(11));
+ regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL1,
+ BIT(12) | BIT(13) | BIT(14) | BIT(15) | BIT(16) |
+ BIT(18) | BIT(19) | BIT(20) | BIT(21));
+ regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL0,
+ BIT(2) | BIT(4) | BIT(5));
+ break;
+ case IMX8MP_HDMIBLK_PD_HDMI_TX_PHY:
+ regmap_set_bits(bc->regmap, HDMI_TX_CONTROL0, BIT(3));
+ regmap_clear_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(12));
+ regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(22) | BIT(24));
+ break;
+ default:
+ break;
+ }
+}
+
+static int imx8mp_hdmi_power_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct imx8mp_blk_ctrl *bc = container_of(nb, struct imx8mp_blk_ctrl,
+ power_nb);
+
+ if (action != GENPD_NOTIFY_ON)
+ return NOTIFY_OK;
+
+ /*
+ * Contrary to other blk-ctrls the reset and clock don't clear when the
+ * power domain is powered down. To ensure the proper reset pulsing,
+ * first clear them all to asserted state, then enable the bus clocks
+ * and then release the ADB reset.
+ */
+ regmap_write(bc->regmap, HDMI_RTX_RESET_CTL0, 0x0);
+ regmap_write(bc->regmap, HDMI_RTX_CLK_CTL0, 0x0);
+ regmap_write(bc->regmap, HDMI_RTX_CLK_CTL1, 0x0);
+ regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL0,
+ BIT(0) | BIT(1) | BIT(10));
+ regmap_set_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(0));
+
+ /*
+ * On power up we have no software backchannel to the GPC to
+ * wait for the ADB handshake to happen, so we just delay for a
+ * bit. On power down the GPC driver waits for the handshake.
+ */
+ udelay(5);
+
+ return NOTIFY_OK;
+}
+
+static const struct imx8mp_blk_ctrl_domain_data imx8mp_hdmi_domain_data[] = {
+ [IMX8MP_HDMIBLK_PD_IRQSTEER] = {
+ .name = "hdmiblk-irqsteer",
+ .clk_names = (const char *[]){ "apb" },
+ .num_clks = 1,
+ .gpc_name = "irqsteer",
+ },
+ [IMX8MP_HDMIBLK_PD_LCDIF] = {
+ .name = "hdmiblk-lcdif",
+ .clk_names = (const char *[]){ "axi", "apb" },
+ .num_clks = 2,
+ .gpc_name = "lcdif",
+ },
+ [IMX8MP_HDMIBLK_PD_PAI] = {
+ .name = "hdmiblk-pai",
+ .clk_names = (const char *[]){ "apb" },
+ .num_clks = 1,
+ .gpc_name = "pai",
+ },
+ [IMX8MP_HDMIBLK_PD_PVI] = {
+ .name = "hdmiblk-pvi",
+ .clk_names = (const char *[]){ "apb" },
+ .num_clks = 1,
+ .gpc_name = "pvi",
+ },
+ [IMX8MP_HDMIBLK_PD_TRNG] = {
+ .name = "hdmiblk-trng",
+ .clk_names = (const char *[]){ "apb" },
+ .num_clks = 1,
+ .gpc_name = "trng",
+ },
+ [IMX8MP_HDMIBLK_PD_HDMI_TX] = {
+ .name = "hdmiblk-hdmi-tx",
+ .clk_names = (const char *[]){ "apb", "ref_266m" },
+ .num_clks = 2,
+ .gpc_name = "hdmi-tx",
+ },
+ [IMX8MP_HDMIBLK_PD_HDMI_TX_PHY] = {
+ .name = "hdmiblk-hdmi-tx-phy",
+ .clk_names = (const char *[]){ "apb", "ref_24m" },
+ .num_clks = 2,
+ .gpc_name = "hdmi-tx-phy",
+ },
+};
+
+static const struct imx8mp_blk_ctrl_data imx8mp_hdmi_blk_ctl_dev_data = {
+ .max_reg = 0x23c,
+ .power_on = imx8mp_hdmi_blk_ctrl_power_on,
+ .power_off = imx8mp_hdmi_blk_ctrl_power_off,
+ .power_notifier_fn = imx8mp_hdmi_power_notifier,
+ .domains = imx8mp_hdmi_domain_data,
+ .num_domains = ARRAY_SIZE(imx8mp_hdmi_domain_data),
+};
+
+static int imx8mp_blk_ctrl_power_on(struct generic_pm_domain *genpd)
+{
+ struct imx8mp_blk_ctrl_domain *domain = to_imx8mp_blk_ctrl_domain(genpd);
+ const struct imx8mp_blk_ctrl_domain_data *data = domain->data;
+ struct imx8mp_blk_ctrl *bc = domain->bc;
+ int ret;
+
+ /* make sure bus domain is awake */
+ ret = pm_runtime_resume_and_get(bc->bus_power_dev);
+ if (ret < 0) {
+ dev_err(bc->dev, "failed to power up bus domain\n");
+ return ret;
+ }
+
+ /* enable upstream clocks */
+ ret = clk_bulk_prepare_enable(data->num_clks, domain->clks);
+ if (ret) {
+ dev_err(bc->dev, "failed to enable clocks\n");
+ goto bus_put;
+ }
+
+ /* domain specific blk-ctrl manipulation */
+ bc->power_on(bc, domain);
+
+ /* power up upstream GPC domain */
+ ret = pm_runtime_resume_and_get(domain->power_dev);
+ if (ret < 0) {
+ dev_err(bc->dev, "failed to power up peripheral domain\n");
+ goto clk_disable;
+ }
+
+ clk_bulk_disable_unprepare(data->num_clks, domain->clks);
+
+ return 0;
+
+clk_disable:
+ clk_bulk_disable_unprepare(data->num_clks, domain->clks);
+bus_put:
+ pm_runtime_put(bc->bus_power_dev);
+
+ return ret;
+}
+
+static int imx8mp_blk_ctrl_power_off(struct generic_pm_domain *genpd)
+{
+ struct imx8mp_blk_ctrl_domain *domain = to_imx8mp_blk_ctrl_domain(genpd);
+ const struct imx8mp_blk_ctrl_domain_data *data = domain->data;
+ struct imx8mp_blk_ctrl *bc = domain->bc;
+ int ret;
+
+ ret = clk_bulk_prepare_enable(data->num_clks, domain->clks);
+ if (ret) {
+ dev_err(bc->dev, "failed to enable clocks\n");
+ return ret;
+ }
+
+ /* domain specific blk-ctrl manipulation */
+ bc->power_off(bc, domain);
+
+ clk_bulk_disable_unprepare(data->num_clks, domain->clks);
+
+ /* power down upstream GPC domain */
+ pm_runtime_put(domain->power_dev);
+
+ /* allow bus domain to suspend */
+ pm_runtime_put(bc->bus_power_dev);
+
+ return 0;
+}
+
+static struct generic_pm_domain *
+imx8m_blk_ctrl_xlate(struct of_phandle_args *args, void *data)
+{
+ struct genpd_onecell_data *onecell_data = data;
+ unsigned int index = args->args[0];
+
+ if (args->args_count != 1 ||
+ index >= onecell_data->num_domains)
+ return ERR_PTR(-EINVAL);
+
+ return onecell_data->domains[index];
+}
+
+static struct lock_class_key blk_ctrl_genpd_lock_class;
+
+static int imx8mp_blk_ctrl_probe(struct platform_device *pdev)
+{
+ const struct imx8mp_blk_ctrl_data *bc_data;
+ struct device *dev = &pdev->dev;
+ struct imx8mp_blk_ctrl *bc;
+ void __iomem *base;
+ int num_domains, i, ret;
+
+ struct regmap_config regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ };
+
+ bc = devm_kzalloc(dev, sizeof(*bc), GFP_KERNEL);
+ if (!bc)
+ return -ENOMEM;
+
+ bc->dev = dev;
+
+ bc_data = of_device_get_match_data(dev);
+ num_domains = bc_data->num_domains;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap_config.max_register = bc_data->max_reg;
+ bc->regmap = devm_regmap_init_mmio(dev, base, &regmap_config);
+ if (IS_ERR(bc->regmap))
+ return dev_err_probe(dev, PTR_ERR(bc->regmap),
+ "failed to init regmap\n");
+
+ bc->domains = devm_kcalloc(dev, num_domains,
+ sizeof(struct imx8mp_blk_ctrl_domain),
+ GFP_KERNEL);
+ if (!bc->domains)
+ return -ENOMEM;
+
+ bc->onecell_data.num_domains = num_domains;
+ bc->onecell_data.xlate = imx8m_blk_ctrl_xlate;
+ bc->onecell_data.domains =
+ devm_kcalloc(dev, num_domains,
+ sizeof(struct generic_pm_domain *), GFP_KERNEL);
+ if (!bc->onecell_data.domains)
+ return -ENOMEM;
+
+ bc->bus_power_dev = genpd_dev_pm_attach_by_name(dev, "bus");
+ if (IS_ERR(bc->bus_power_dev))
+ return dev_err_probe(dev, PTR_ERR(bc->bus_power_dev),
+ "failed to attach bus power domain\n");
+
+ bc->power_off = bc_data->power_off;
+ bc->power_on = bc_data->power_on;
+
+ for (i = 0; i < num_domains; i++) {
+ const struct imx8mp_blk_ctrl_domain_data *data = &bc_data->domains[i];
+ struct imx8mp_blk_ctrl_domain *domain = &bc->domains[i];
+ int j;
+
+ domain->data = data;
+
+ for (j = 0; j < data->num_clks; j++)
+ domain->clks[j].id = data->clk_names[j];
+
+ ret = devm_clk_bulk_get(dev, data->num_clks, domain->clks);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to get clock\n");
+ goto cleanup_pds;
+ }
+
+ domain->power_dev =
+ dev_pm_domain_attach_by_name(dev, data->gpc_name);
+ if (IS_ERR(domain->power_dev)) {
+ dev_err_probe(dev, PTR_ERR(domain->power_dev),
+ "failed to attach power domain %s\n",
+ data->gpc_name);
+ ret = PTR_ERR(domain->power_dev);
+ goto cleanup_pds;
+ }
+ dev_set_name(domain->power_dev, "%s", data->name);
+
+ domain->genpd.name = data->name;
+ domain->genpd.power_on = imx8mp_blk_ctrl_power_on;
+ domain->genpd.power_off = imx8mp_blk_ctrl_power_off;
+ domain->bc = bc;
+ domain->id = i;
+
+ ret = pm_genpd_init(&domain->genpd, NULL, true);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to init power domain\n");
+ dev_pm_domain_detach(domain->power_dev, true);
+ goto cleanup_pds;
+ }
+
+ /*
+ * We use runtime PM to trigger power on/off of the upstream GPC
+ * domain, as a strict hierarchical parent/child power domain
+ * setup doesn't allow us to meet the sequencing requirements.
+ * This means we have nested locking of genpd locks, without the
+ * nesting being visible at the genpd level, so we need a
+ * separate lock class to make lockdep aware of the fact that
+ * this are separate domain locks that can be nested without a
+ * self-deadlock.
+ */
+ lockdep_set_class(&domain->genpd.mlock,
+ &blk_ctrl_genpd_lock_class);
+
+ bc->onecell_data.domains[i] = &domain->genpd;
+ }
+
+ ret = of_genpd_add_provider_onecell(dev->of_node, &bc->onecell_data);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to add power domain provider\n");
+ goto cleanup_pds;
+ }
+
+ bc->power_nb.notifier_call = bc_data->power_notifier_fn;
+ ret = dev_pm_genpd_add_notifier(bc->bus_power_dev, &bc->power_nb);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to add power notifier\n");
+ goto cleanup_provider;
+ }
+
+ dev_set_drvdata(dev, bc);
+
+ return 0;
+
+cleanup_provider:
+ of_genpd_del_provider(dev->of_node);
+cleanup_pds:
+ for (i--; i >= 0; i--) {
+ pm_genpd_remove(&bc->domains[i].genpd);
+ dev_pm_domain_detach(bc->domains[i].power_dev, true);
+ }
+
+ dev_pm_domain_detach(bc->bus_power_dev, true);
+
+ return ret;
+}
+
+static int imx8mp_blk_ctrl_remove(struct platform_device *pdev)
+{
+ struct imx8mp_blk_ctrl *bc = dev_get_drvdata(&pdev->dev);
+ int i;
+
+ of_genpd_del_provider(pdev->dev.of_node);
+
+ for (i = 0; bc->onecell_data.num_domains; i++) {
+ struct imx8mp_blk_ctrl_domain *domain = &bc->domains[i];
+
+ pm_genpd_remove(&domain->genpd);
+ dev_pm_domain_detach(domain->power_dev, true);
+ }
+
+ dev_pm_genpd_remove_notifier(bc->bus_power_dev);
+
+ dev_pm_domain_detach(bc->bus_power_dev, true);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int imx8mp_blk_ctrl_suspend(struct device *dev)
+{
+ struct imx8mp_blk_ctrl *bc = dev_get_drvdata(dev);
+ int ret, i;
+
+ /*
+ * This may look strange, but is done so the generic PM_SLEEP code
+ * can power down our domains and more importantly power them up again
+ * after resume, without tripping over our usage of runtime PM to
+ * control the upstream GPC domains. Things happen in the right order
+ * in the system suspend/resume paths due to the device parent/child
+ * hierarchy.
+ */
+ ret = pm_runtime_get_sync(bc->bus_power_dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(bc->bus_power_dev);
+ return ret;
+ }
+
+ for (i = 0; i < bc->onecell_data.num_domains; i++) {
+ struct imx8mp_blk_ctrl_domain *domain = &bc->domains[i];
+
+ ret = pm_runtime_get_sync(domain->power_dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(domain->power_dev);
+ goto out_fail;
+ }
+ }
+
+ return 0;
+
+out_fail:
+ for (i--; i >= 0; i--)
+ pm_runtime_put(bc->domains[i].power_dev);
+
+ pm_runtime_put(bc->bus_power_dev);
+
+ return ret;
+}
+
+static int imx8mp_blk_ctrl_resume(struct device *dev)
+{
+ struct imx8mp_blk_ctrl *bc = dev_get_drvdata(dev);
+ int i;
+
+ for (i = 0; i < bc->onecell_data.num_domains; i++)
+ pm_runtime_put(bc->domains[i].power_dev);
+
+ pm_runtime_put(bc->bus_power_dev);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops imx8mp_blk_ctrl_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(imx8mp_blk_ctrl_suspend,
+ imx8mp_blk_ctrl_resume)
+};
+
+static const struct of_device_id imx8mp_blk_ctrl_of_match[] = {
+ {
+ .compatible = "fsl,imx8mp-hsio-blk-ctrl",
+ .data = &imx8mp_hsio_blk_ctl_dev_data,
+ }, {
+ .compatible = "fsl,imx8mp-hdmi-blk-ctrl",
+ .data = &imx8mp_hdmi_blk_ctl_dev_data,
+ }, {
+ /* Sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, imx8m_blk_ctrl_of_match);
+
+static struct platform_driver imx8mp_blk_ctrl_driver = {
+ .probe = imx8mp_blk_ctrl_probe,
+ .remove = imx8mp_blk_ctrl_remove,
+ .driver = {
+ .name = "imx8mp-blk-ctrl",
+ .pm = &imx8mp_blk_ctrl_pm_ops,
+ .of_match_table = imx8mp_blk_ctrl_of_match,
+ },
+};
+module_platform_driver(imx8mp_blk_ctrl_driver);
diff --git a/drivers/soc/mediatek/mt8167-mmsys.h b/drivers/soc/mediatek/mt8167-mmsys.h
index 2772ef5e3934..f7a35b3656bb 100644
--- a/drivers/soc/mediatek/mt8167-mmsys.h
+++ b/drivers/soc/mediatek/mt8167-mmsys.h
@@ -18,7 +18,7 @@ static const struct mtk_mmsys_routes mt8167_mmsys_routing_table[] = {
DDP_COMPONENT_OVL0, DDP_COMPONENT_COLOR0,
MT8167_DISP_REG_CONFIG_DISP_OVL0_MOUT_EN, OVL0_MOUT_EN_COLOR0,
}, {
- DDP_COMPONENT_DITHER, DDP_COMPONENT_RDMA0,
+ DDP_COMPONENT_DITHER0, DDP_COMPONENT_RDMA0,
MT8167_DISP_REG_CONFIG_DISP_DITHER_MOUT_EN, MT8167_DITHER_MOUT_EN_RDMA0
}, {
DDP_COMPONENT_OVL0, DDP_COMPONENT_COLOR0,
diff --git a/drivers/soc/mediatek/mt8183-mmsys.h b/drivers/soc/mediatek/mt8183-mmsys.h
index 0c021f4b76d2..ff6be1703469 100644
--- a/drivers/soc/mediatek/mt8183-mmsys.h
+++ b/drivers/soc/mediatek/mt8183-mmsys.h
@@ -41,7 +41,7 @@ static const struct mtk_mmsys_routes mmsys_mt8183_routing_table[] = {
MT8183_DISP_OVL1_2L_MOUT_EN, MT8183_OVL1_2L_MOUT_EN_RDMA1,
MT8183_OVL1_2L_MOUT_EN_RDMA1
}, {
- DDP_COMPONENT_DITHER, DDP_COMPONENT_DSI0,
+ DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
MT8183_DISP_DITHER0_MOUT_EN, MT8183_DITHER0_MOUT_IN_DSI0,
MT8183_DITHER0_MOUT_IN_DSI0
}, {
diff --git a/drivers/soc/mediatek/mt8186-mmsys.h b/drivers/soc/mediatek/mt8186-mmsys.h
index c72ccf86ea28..eb1ad9c37a9c 100644
--- a/drivers/soc/mediatek/mt8186-mmsys.h
+++ b/drivers/soc/mediatek/mt8186-mmsys.h
@@ -76,12 +76,12 @@ static const struct mtk_mmsys_routes mmsys_mt8186_routing_table[] = {
MT8186_RDMA0_SOUT_TO_COLOR0
},
{
- DDP_COMPONENT_DITHER, DDP_COMPONENT_DSI0,
+ DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
MT8186_DISP_DITHER0_MOUT_EN, MT8186_DITHER0_MOUT_EN_MASK,
MT8186_DITHER0_MOUT_TO_DSI0,
},
{
- DDP_COMPONENT_DITHER, DDP_COMPONENT_DSI0,
+ DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
MT8186_DISP_DSI0_SEL_IN, MT8186_DSI0_SEL_IN_MASK,
MT8186_DSI0_FROM_DITHER0
},
diff --git a/drivers/soc/mediatek/mt8192-mmsys.h b/drivers/soc/mediatek/mt8192-mmsys.h
index 6aae0b12b6ff..a016d80b4bc1 100644
--- a/drivers/soc/mediatek/mt8192-mmsys.h
+++ b/drivers/soc/mediatek/mt8192-mmsys.h
@@ -40,7 +40,7 @@ static const struct mtk_mmsys_routes mmsys_mt8192_routing_table[] = {
MT8192_DISP_OVL2_2L_MOUT_EN, MT8192_OVL2_2L_MOUT_EN_RDMA4,
MT8192_OVL2_2L_MOUT_EN_RDMA4
}, {
- DDP_COMPONENT_DITHER, DDP_COMPONENT_DSI0,
+ DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
MT8192_DISP_DITHER0_MOUT_EN, MT8192_DITHER0_MOUT_IN_DSI0,
MT8192_DITHER0_MOUT_IN_DSI0
}, {
@@ -52,7 +52,7 @@ static const struct mtk_mmsys_routes mmsys_mt8192_routing_table[] = {
MT8192_DISP_AAL0_SEL_IN, MT8192_AAL0_SEL_IN_CCORR0,
MT8192_AAL0_SEL_IN_CCORR0
}, {
- DDP_COMPONENT_DITHER, DDP_COMPONENT_DSI0,
+ DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
MT8192_DISP_DSI0_SEL_IN, MT8192_DSI0_SEL_IN_DITHER0,
MT8192_DSI0_SEL_IN_DITHER0
}, {
diff --git a/drivers/soc/mediatek/mt8195-mmsys.h b/drivers/soc/mediatek/mt8195-mmsys.h
new file mode 100644
index 000000000000..abfe94a30248
--- /dev/null
+++ b/drivers/soc/mediatek/mt8195-mmsys.h
@@ -0,0 +1,370 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __SOC_MEDIATEK_MT8195_MMSYS_H
+#define __SOC_MEDIATEK_MT8195_MMSYS_H
+
+#define MT8195_VDO0_OVL_MOUT_EN 0xf14
+#define MT8195_MOUT_DISP_OVL0_TO_DISP_RDMA0 BIT(0)
+#define MT8195_MOUT_DISP_OVL0_TO_DISP_WDMA0 BIT(1)
+#define MT8195_MOUT_DISP_OVL0_TO_DISP_OVL1 BIT(2)
+#define MT8195_MOUT_DISP_OVL1_TO_DISP_RDMA1 BIT(4)
+#define MT8195_MOUT_DISP_OVL1_TO_DISP_WDMA1 BIT(5)
+#define MT8195_MOUT_DISP_OVL1_TO_DISP_OVL0 BIT(6)
+
+#define MT8195_VDO0_SEL_IN 0xf34
+#define MT8195_SEL_IN_VPP_MERGE_FROM_MASK GENMASK(1, 0)
+#define MT8195_SEL_IN_VPP_MERGE_FROM_DSC_WRAP0_OUT (0 << 0)
+#define MT8195_SEL_IN_VPP_MERGE_FROM_DISP_DITHER1 (1 << 0)
+#define MT8195_SEL_IN_VPP_MERGE_FROM_VDO1_VIRTUAL0 (2 << 0)
+#define MT8195_SEL_IN_DSC_WRAP0_IN_FROM_MASK GENMASK(4, 4)
+#define MT8195_SEL_IN_DSC_WRAP0_IN_FROM_DISP_DITHER0 (0 << 4)
+#define MT8195_SEL_IN_DSC_WRAP0_IN_FROM_VPP_MERGE (1 << 4)
+#define MT8195_SEL_IN_DSC_WRAP1_IN_FROM_MASK GENMASK(5, 5)
+#define MT8195_SEL_IN_DSC_WRAP1_IN_FROM_DISP_DITHER1 (0 << 5)
+#define MT8195_SEL_IN_DSC_WRAP1_IN_FROM_VPP_MERGE (1 << 5)
+#define MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK GENMASK(8, 8)
+#define MT8195_SEL_IN_SINA_VIRTUAL0_FROM_VPP_MERGE (0 << 8)
+#define MT8195_SEL_IN_SINA_VIRTUAL0_FROM_DSC_WRAP1_OUT (1 << 8)
+#define MT8195_SEL_IN_SINB_VIRTUAL0_FROM_MASK GENMASK(9, 9)
+#define MT8195_SEL_IN_SINB_VIRTUAL0_FROM_DSC_WRAP0_OUT (0 << 9)
+#define MT8195_SEL_IN_DP_INTF0_FROM_MASK GENMASK(13, 12)
+#define MT8195_SEL_IN_DP_INTF0_FROM_DSC_WRAP1_OUT (0 << 0)
+#define MT8195_SEL_IN_DP_INTF0_FROM_VPP_MERGE (1 << 12)
+#define MT8195_SEL_IN_DP_INTF0_FROM_VDO1_VIRTUAL0 (2 << 12)
+#define MT8195_SEL_IN_DSI0_FROM_MASK GENMASK(16, 16)
+#define MT8195_SEL_IN_DSI0_FROM_DSC_WRAP0_OUT (0 << 16)
+#define MT8195_SEL_IN_DSI0_FROM_DISP_DITHER0 (1 << 16)
+#define MT8195_SEL_IN_DSI1_FROM_MASK GENMASK(17, 17)
+#define MT8195_SEL_IN_DSI1_FROM_DSC_WRAP1_OUT (0 << 17)
+#define MT8195_SEL_IN_DSI1_FROM_VPP_MERGE (1 << 17)
+#define MT8195_SEL_IN_DISP_WDMA1_FROM_MASK GENMASK(20, 20)
+#define MT8195_SEL_IN_DISP_WDMA1_FROM_DISP_OVL1 (0 << 20)
+#define MT8195_SEL_IN_DISP_WDMA1_FROM_VPP_MERGE (1 << 20)
+#define MT8195_SEL_IN_DSC_WRAP1_FROM_MASK GENMASK(21, 21)
+#define MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN (0 << 21)
+#define MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1 (1 << 21)
+#define MT8195_SEL_IN_DISP_WDMA0_FROM_MASK GENMASK(22, 22)
+#define MT8195_SEL_IN_DISP_WDMA0_FROM_DISP_OVL0 (0 << 22)
+
+#define MT8195_VDO0_SEL_OUT 0xf38
+#define MT8195_SOUT_DISP_DITHER0_TO_MASK BIT(0)
+#define MT8195_SOUT_DISP_DITHER0_TO_DSC_WRAP0_IN (0 << 0)
+#define MT8195_SOUT_DISP_DITHER0_TO_DSI0 (1 << 0)
+#define MT8195_SOUT_DISP_DITHER1_TO_MASK GENMASK(2, 1)
+#define MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_IN (0 << 1)
+#define MT8195_SOUT_DISP_DITHER1_TO_VPP_MERGE (1 << 1)
+#define MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT (2 << 1)
+#define MT8195_SOUT_VDO1_VIRTUAL0_TO_MASK GENMASK(4, 4)
+#define MT8195_SOUT_VDO1_VIRTUAL0_TO_VPP_MERGE (0 << 4)
+#define MT8195_SOUT_VDO1_VIRTUAL0_TO_DP_INTF0 (1 << 4)
+#define MT8195_SOUT_VPP_MERGE_TO_MASK GENMASK(10, 8)
+#define MT8195_SOUT_VPP_MERGE_TO_DSI1 (0 << 8)
+#define MT8195_SOUT_VPP_MERGE_TO_DP_INTF0 (1 << 8)
+#define MT8195_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0 (2 << 8)
+#define MT8195_SOUT_VPP_MERGE_TO_DISP_WDMA1 (3 << 8)
+#define MT8195_SOUT_VPP_MERGE_TO_DSC_WRAP0_IN (4 << 8)
+#define MT8195_SOUT_VPP_MERGE_TO_DSC_WRAP1_IN_MASK GENMASK(11, 11)
+#define MT8195_SOUT_VPP_MERGE_TO_DSC_WRAP1_IN (0 << 11)
+#define MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK GENMASK(13, 12)
+#define MT8195_SOUT_DSC_WRAP0_OUT_TO_DSI0 (0 << 12)
+#define MT8195_SOUT_DSC_WRAP0_OUT_TO_SINB_VIRTUAL0 (1 << 12)
+#define MT8195_SOUT_DSC_WRAP0_OUT_TO_VPP_MERGE (2 << 12)
+#define MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK GENMASK(17, 16)
+#define MT8195_SOUT_DSC_WRAP1_OUT_TO_DSI1 (0 << 16)
+#define MT8195_SOUT_DSC_WRAP1_OUT_TO_DP_INTF0 (1 << 16)
+#define MT8195_SOUT_DSC_WRAP1_OUT_TO_SINA_VIRTUAL0 (2 << 16)
+#define MT8195_SOUT_DSC_WRAP1_OUT_TO_VPP_MERGE (3 << 16)
+
+static const struct mtk_mmsys_routes mmsys_mt8195_routing_table[] = {
+ {
+ DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
+ MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL0_TO_DISP_RDMA0,
+ MT8195_MOUT_DISP_OVL0_TO_DISP_RDMA0
+ }, {
+ DDP_COMPONENT_OVL0, DDP_COMPONENT_WDMA0,
+ MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL0_TO_DISP_WDMA0,
+ MT8195_MOUT_DISP_OVL0_TO_DISP_WDMA0
+ }, {
+ DDP_COMPONENT_OVL0, DDP_COMPONENT_OVL1,
+ MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL0_TO_DISP_OVL1,
+ MT8195_MOUT_DISP_OVL0_TO_DISP_OVL1
+ }, {
+ DDP_COMPONENT_OVL1, DDP_COMPONENT_RDMA1,
+ MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL1_TO_DISP_RDMA1,
+ MT8195_MOUT_DISP_OVL1_TO_DISP_RDMA1
+ }, {
+ DDP_COMPONENT_OVL1, DDP_COMPONENT_WDMA1,
+ MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL1_TO_DISP_WDMA1,
+ MT8195_MOUT_DISP_OVL1_TO_DISP_WDMA1
+ }, {
+ DDP_COMPONENT_OVL1, DDP_COMPONENT_OVL0,
+ MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL1_TO_DISP_OVL0,
+ MT8195_MOUT_DISP_OVL1_TO_DISP_OVL0
+ }, {
+ DDP_COMPONENT_DSC0, DDP_COMPONENT_MERGE0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_VPP_MERGE_FROM_MASK,
+ MT8195_SEL_IN_VPP_MERGE_FROM_DSC_WRAP0_OUT
+ }, {
+ DDP_COMPONENT_DITHER1, DDP_COMPONENT_MERGE0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_VPP_MERGE_FROM_MASK,
+ MT8195_SEL_IN_VPP_MERGE_FROM_DISP_DITHER1
+ }, {
+ DDP_COMPONENT_MERGE5, DDP_COMPONENT_MERGE0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_VPP_MERGE_FROM_MASK,
+ MT8195_SEL_IN_VPP_MERGE_FROM_VDO1_VIRTUAL0
+ }, {
+ DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSC0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP0_IN_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP0_IN_FROM_DISP_DITHER0
+ }, {
+ DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSC0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP0_IN_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP0_IN_FROM_VPP_MERGE
+ }, {
+ DDP_COMPONENT_DITHER1, DDP_COMPONENT_DSC1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_IN_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_IN_FROM_DISP_DITHER1
+ }, {
+ DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSC1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_IN_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_IN_FROM_VPP_MERGE
+ }, {
+ DDP_COMPONENT_MERGE0, DDP_COMPONENT_DP_INTF1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINA_VIRTUAL0_FROM_VPP_MERGE
+ }, {
+ DDP_COMPONENT_MERGE0, DDP_COMPONENT_DPI0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINA_VIRTUAL0_FROM_VPP_MERGE
+ }, {
+ DDP_COMPONENT_MERGE0, DDP_COMPONENT_DPI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINA_VIRTUAL0_FROM_VPP_MERGE
+ }, {
+ DDP_COMPONENT_DSC1, DDP_COMPONENT_DP_INTF1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINA_VIRTUAL0_FROM_DSC_WRAP1_OUT
+ }, {
+ DDP_COMPONENT_DSC1, DDP_COMPONENT_DPI0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINA_VIRTUAL0_FROM_DSC_WRAP1_OUT
+ }, {
+ DDP_COMPONENT_DSC1, DDP_COMPONENT_DPI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINA_VIRTUAL0_FROM_DSC_WRAP1_OUT
+ }, {
+ DDP_COMPONENT_DSC0, DDP_COMPONENT_DP_INTF1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINB_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINB_VIRTUAL0_FROM_DSC_WRAP0_OUT
+ }, {
+ DDP_COMPONENT_DSC0, DDP_COMPONENT_DPI0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINB_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINB_VIRTUAL0_FROM_DSC_WRAP0_OUT
+ }, {
+ DDP_COMPONENT_DSC0, DDP_COMPONENT_DPI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINB_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINB_VIRTUAL0_FROM_DSC_WRAP0_OUT
+ }, {
+ DDP_COMPONENT_DSC1, DDP_COMPONENT_DP_INTF0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DP_INTF0_FROM_MASK,
+ MT8195_SEL_IN_DP_INTF0_FROM_DSC_WRAP1_OUT
+ }, {
+ DDP_COMPONENT_MERGE0, DDP_COMPONENT_DP_INTF0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DP_INTF0_FROM_MASK,
+ MT8195_SEL_IN_DP_INTF0_FROM_VPP_MERGE
+ }, {
+ DDP_COMPONENT_MERGE5, DDP_COMPONENT_DP_INTF0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DP_INTF0_FROM_MASK,
+ MT8195_SEL_IN_DP_INTF0_FROM_VDO1_VIRTUAL0
+ }, {
+ DDP_COMPONENT_DSC0, DDP_COMPONENT_DSI0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSI0_FROM_MASK,
+ MT8195_SEL_IN_DSI0_FROM_DSC_WRAP0_OUT
+ }, {
+ DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSI0_FROM_MASK,
+ MT8195_SEL_IN_DSI0_FROM_DISP_DITHER0
+ }, {
+ DDP_COMPONENT_DSC1, DDP_COMPONENT_DSI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSI1_FROM_MASK,
+ MT8195_SEL_IN_DSI1_FROM_DSC_WRAP1_OUT
+ }, {
+ DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSI1_FROM_MASK,
+ MT8195_SEL_IN_DSI1_FROM_VPP_MERGE
+ }, {
+ DDP_COMPONENT_OVL1, DDP_COMPONENT_WDMA1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DISP_WDMA1_FROM_MASK,
+ MT8195_SEL_IN_DISP_WDMA1_FROM_DISP_OVL1
+ }, {
+ DDP_COMPONENT_MERGE0, DDP_COMPONENT_WDMA1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DISP_WDMA1_FROM_MASK,
+ MT8195_SEL_IN_DISP_WDMA1_FROM_VPP_MERGE
+ }, {
+ DDP_COMPONENT_DSC1, DDP_COMPONENT_DSI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN
+ }, {
+ DDP_COMPONENT_DSC1, DDP_COMPONENT_DP_INTF0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN
+ }, {
+ DDP_COMPONENT_DSC1, DDP_COMPONENT_DP_INTF1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN
+ }, {
+ DDP_COMPONENT_DSC1, DDP_COMPONENT_DPI0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN
+ }, {
+ DDP_COMPONENT_DSC1, DDP_COMPONENT_DPI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN
+ }, {
+ DDP_COMPONENT_DSC1, DDP_COMPONENT_MERGE0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN
+ }, {
+ DDP_COMPONENT_DITHER1, DDP_COMPONENT_DSI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1
+ }, {
+ DDP_COMPONENT_DITHER1, DDP_COMPONENT_DP_INTF0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1
+ }, {
+ DDP_COMPONENT_DITHER1, DDP_COMPONENT_DPI0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1
+ }, {
+ DDP_COMPONENT_DITHER1, DDP_COMPONENT_DPI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1
+ }, {
+ DDP_COMPONENT_OVL0, DDP_COMPONENT_WDMA0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DISP_WDMA0_FROM_MASK,
+ MT8195_SEL_IN_DISP_WDMA0_FROM_DISP_OVL0
+ }, {
+ DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSC0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER0_TO_MASK,
+ MT8195_SOUT_DISP_DITHER0_TO_DSC_WRAP0_IN
+ }, {
+ DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER0_TO_MASK,
+ MT8195_SOUT_DISP_DITHER0_TO_DSI0
+ }, {
+ DDP_COMPONENT_DITHER1, DDP_COMPONENT_DSC1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
+ MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_IN
+ }, {
+ DDP_COMPONENT_DITHER1, DDP_COMPONENT_MERGE0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
+ MT8195_SOUT_DISP_DITHER1_TO_VPP_MERGE
+ }, {
+ DDP_COMPONENT_DITHER1, DDP_COMPONENT_DSI1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
+ MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT
+ }, {
+ DDP_COMPONENT_DITHER1, DDP_COMPONENT_DP_INTF0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
+ MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT
+ }, {
+ DDP_COMPONENT_DITHER1, DDP_COMPONENT_DP_INTF1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
+ MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT
+ }, {
+ DDP_COMPONENT_DITHER1, DDP_COMPONENT_DPI0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
+ MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT
+ }, {
+ DDP_COMPONENT_DITHER1, DDP_COMPONENT_DPI1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
+ MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT
+ }, {
+ DDP_COMPONENT_MERGE5, DDP_COMPONENT_MERGE0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VDO1_VIRTUAL0_TO_MASK,
+ MT8195_SOUT_VDO1_VIRTUAL0_TO_VPP_MERGE
+ }, {
+ DDP_COMPONENT_MERGE5, DDP_COMPONENT_DP_INTF0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VDO1_VIRTUAL0_TO_MASK,
+ MT8195_SOUT_VDO1_VIRTUAL0_TO_DP_INTF0
+ }, {
+ DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSI1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
+ MT8195_SOUT_VPP_MERGE_TO_DSI1
+ }, {
+ DDP_COMPONENT_MERGE0, DDP_COMPONENT_DP_INTF0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
+ MT8195_SOUT_VPP_MERGE_TO_DP_INTF0
+ }, {
+ DDP_COMPONENT_MERGE0, DDP_COMPONENT_DP_INTF1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
+ MT8195_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0
+ }, {
+ DDP_COMPONENT_MERGE0, DDP_COMPONENT_DPI0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
+ MT8195_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0
+ }, {
+ DDP_COMPONENT_MERGE0, DDP_COMPONENT_DPI1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
+ MT8195_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0
+ }, {
+ DDP_COMPONENT_MERGE0, DDP_COMPONENT_WDMA1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
+ MT8195_SOUT_VPP_MERGE_TO_DISP_WDMA1
+ }, {
+ DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSC0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
+ MT8195_SOUT_VPP_MERGE_TO_DSC_WRAP0_IN
+ }, {
+ DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSC1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_DSC_WRAP1_IN_MASK,
+ MT8195_SOUT_VPP_MERGE_TO_DSC_WRAP1_IN
+ }, {
+ DDP_COMPONENT_DSC0, DDP_COMPONENT_DSI0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP0_OUT_TO_DSI0
+ }, {
+ DDP_COMPONENT_DSC0, DDP_COMPONENT_DP_INTF1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP0_OUT_TO_SINB_VIRTUAL0
+ }, {
+ DDP_COMPONENT_DSC0, DDP_COMPONENT_DPI0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP0_OUT_TO_SINB_VIRTUAL0
+ }, {
+ DDP_COMPONENT_DSC0, DDP_COMPONENT_DPI1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP0_OUT_TO_SINB_VIRTUAL0
+ }, {
+ DDP_COMPONENT_DSC0, DDP_COMPONENT_MERGE0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP0_OUT_TO_VPP_MERGE
+ }, {
+ DDP_COMPONENT_DSC1, DDP_COMPONENT_DSI1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP1_OUT_TO_DSI1
+ }, {
+ DDP_COMPONENT_DSC1, DDP_COMPONENT_DP_INTF0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP1_OUT_TO_DP_INTF0
+ }, {
+ DDP_COMPONENT_DSC1, DDP_COMPONENT_DP_INTF1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP1_OUT_TO_SINA_VIRTUAL0
+ }, {
+ DDP_COMPONENT_DSC1, DDP_COMPONENT_DPI0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP1_OUT_TO_SINA_VIRTUAL0
+ }, {
+ DDP_COMPONENT_DSC1, DDP_COMPONENT_DPI1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP1_OUT_TO_SINA_VIRTUAL0
+ }, {
+ DDP_COMPONENT_DSC1, DDP_COMPONENT_MERGE0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP1_OUT_TO_VPP_MERGE
+ }
+};
+
+#endif /* __SOC_MEDIATEK_MT8195_MMSYS_H */
diff --git a/drivers/soc/mediatek/mt8365-mmsys.h b/drivers/soc/mediatek/mt8365-mmsys.h
index 690e3fe2dee0..24129a6c25f8 100644
--- a/drivers/soc/mediatek/mt8365-mmsys.h
+++ b/drivers/soc/mediatek/mt8365-mmsys.h
@@ -41,12 +41,12 @@ static const struct mtk_mmsys_routes mt8365_mmsys_routing_table[] = {
MT8365_DISP_COLOR_SEL_IN_COLOR0,MT8365_DISP_COLOR_SEL_IN_COLOR0
},
{
- DDP_COMPONENT_DITHER, DDP_COMPONENT_DSI0,
+ DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
MT8365_DISP_REG_CONFIG_DISP_DITHER0_MOUT_EN,
MT8365_DITHER_MOUT_EN_DSI0, MT8365_DITHER_MOUT_EN_DSI0
},
{
- DDP_COMPONENT_DITHER, DDP_COMPONENT_DSI0,
+ DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
MT8365_DISP_REG_CONFIG_DISP_DSI0_SEL_IN,
MT8365_DSI0_SEL_IN_DITHER, MT8365_DSI0_SEL_IN_DITHER
},
diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c
index 3c8e4212d941..c1837a468267 100644
--- a/drivers/soc/mediatek/mtk-cmdq-helper.c
+++ b/drivers/soc/mediatek/mtk-cmdq-helper.c
@@ -425,34 +425,11 @@ int cmdq_pkt_finalize(struct cmdq_pkt *pkt)
}
EXPORT_SYMBOL(cmdq_pkt_finalize);
-static void cmdq_pkt_flush_async_cb(struct cmdq_cb_data data)
-{
- struct cmdq_pkt *pkt = (struct cmdq_pkt *)data.data;
- struct cmdq_task_cb *cb = &pkt->cb;
- struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
-
- dma_sync_single_for_cpu(client->chan->mbox->dev, pkt->pa_base,
- pkt->cmd_buf_size, DMA_TO_DEVICE);
- if (cb->cb) {
- data.data = cb->data;
- cb->cb(data);
- }
-}
-
-int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb,
- void *data)
+int cmdq_pkt_flush_async(struct cmdq_pkt *pkt)
{
int err;
struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
- pkt->cb.cb = cb;
- pkt->cb.data = data;
- pkt->async_cb.cb = cmdq_pkt_flush_async_cb;
- pkt->async_cb.data = pkt;
-
- dma_sync_single_for_device(client->chan->mbox->dev, pkt->pa_base,
- pkt->cmd_buf_size, DMA_TO_DEVICE);
-
err = mbox_send_message(client->chan, pkt);
if (err < 0)
return err;
diff --git a/drivers/soc/mediatek/mtk-mmsys.c b/drivers/soc/mediatek/mtk-mmsys.c
index 4fc4c2c9ea20..06d8e83a2cb5 100644
--- a/drivers/soc/mediatek/mtk-mmsys.c
+++ b/drivers/soc/mediatek/mtk-mmsys.c
@@ -17,6 +17,7 @@
#include "mt8183-mmsys.h"
#include "mt8186-mmsys.h"
#include "mt8192-mmsys.h"
+#include "mt8195-mmsys.h"
#include "mt8365-mmsys.h"
static const struct mtk_mmsys_driver_data mt2701_mmsys_driver_data = {
@@ -25,26 +26,61 @@ static const struct mtk_mmsys_driver_data mt2701_mmsys_driver_data = {
.num_routes = ARRAY_SIZE(mmsys_default_routing_table),
};
+static const struct mtk_mmsys_match_data mt2701_mmsys_match_data = {
+ .num_drv_data = 1,
+ .drv_data = {
+ &mt2701_mmsys_driver_data,
+ },
+};
+
static const struct mtk_mmsys_driver_data mt2712_mmsys_driver_data = {
.clk_driver = "clk-mt2712-mm",
.routes = mmsys_default_routing_table,
.num_routes = ARRAY_SIZE(mmsys_default_routing_table),
};
+static const struct mtk_mmsys_match_data mt2712_mmsys_match_data = {
+ .num_drv_data = 1,
+ .drv_data = {
+ &mt2712_mmsys_driver_data,
+ },
+};
+
static const struct mtk_mmsys_driver_data mt6779_mmsys_driver_data = {
.clk_driver = "clk-mt6779-mm",
};
+static const struct mtk_mmsys_match_data mt6779_mmsys_match_data = {
+ .num_drv_data = 1,
+ .drv_data = {
+ &mt6779_mmsys_driver_data,
+ },
+};
+
static const struct mtk_mmsys_driver_data mt6797_mmsys_driver_data = {
.clk_driver = "clk-mt6797-mm",
};
+static const struct mtk_mmsys_match_data mt6797_mmsys_match_data = {
+ .num_drv_data = 1,
+ .drv_data = {
+ &mt6797_mmsys_driver_data,
+ },
+};
+
static const struct mtk_mmsys_driver_data mt8167_mmsys_driver_data = {
.clk_driver = "clk-mt8167-mm",
.routes = mt8167_mmsys_routing_table,
.num_routes = ARRAY_SIZE(mt8167_mmsys_routing_table),
};
+static const struct mtk_mmsys_match_data mt8167_mmsys_match_data = {
+ .num_drv_data = 1,
+ .drv_data = {
+ &mt8167_mmsys_driver_data,
+ },
+};
+
static const struct mtk_mmsys_driver_data mt8173_mmsys_driver_data = {
.clk_driver = "clk-mt8173-mm",
.routes = mmsys_default_routing_table,
@@ -52,6 +88,13 @@ static const struct mtk_mmsys_driver_data mt8173_mmsys_driver_data = {
.sw0_rst_offset = MT8183_MMSYS_SW0_RST_B,
};
+static const struct mtk_mmsys_match_data mt8173_mmsys_match_data = {
+ .num_drv_data = 1,
+ .drv_data = {
+ &mt8173_mmsys_driver_data,
+ },
+};
+
static const struct mtk_mmsys_driver_data mt8183_mmsys_driver_data = {
.clk_driver = "clk-mt8183-mm",
.routes = mmsys_mt8183_routing_table,
@@ -59,6 +102,13 @@ static const struct mtk_mmsys_driver_data mt8183_mmsys_driver_data = {
.sw0_rst_offset = MT8183_MMSYS_SW0_RST_B,
};
+static const struct mtk_mmsys_match_data mt8183_mmsys_match_data = {
+ .num_drv_data = 1,
+ .drv_data = {
+ &mt8183_mmsys_driver_data,
+ },
+};
+
static const struct mtk_mmsys_driver_data mt8186_mmsys_driver_data = {
.clk_driver = "clk-mt8186-mm",
.routes = mmsys_mt8186_routing_table,
@@ -66,10 +116,45 @@ static const struct mtk_mmsys_driver_data mt8186_mmsys_driver_data = {
.sw0_rst_offset = MT8186_MMSYS_SW0_RST_B,
};
+static const struct mtk_mmsys_match_data mt8186_mmsys_match_data = {
+ .num_drv_data = 1,
+ .drv_data = {
+ &mt8186_mmsys_driver_data,
+ },
+};
+
static const struct mtk_mmsys_driver_data mt8192_mmsys_driver_data = {
.clk_driver = "clk-mt8192-mm",
.routes = mmsys_mt8192_routing_table,
.num_routes = ARRAY_SIZE(mmsys_mt8192_routing_table),
+ .sw0_rst_offset = MT8186_MMSYS_SW0_RST_B,
+};
+
+static const struct mtk_mmsys_match_data mt8192_mmsys_match_data = {
+ .num_drv_data = 1,
+ .drv_data = {
+ &mt8192_mmsys_driver_data,
+ },
+};
+
+static const struct mtk_mmsys_driver_data mt8195_vdosys0_driver_data = {
+ .io_start = 0x1c01a000,
+ .clk_driver = "clk-mt8195-vdo0",
+ .routes = mmsys_mt8195_routing_table,
+ .num_routes = ARRAY_SIZE(mmsys_mt8195_routing_table),
+};
+
+static const struct mtk_mmsys_driver_data mt8195_vdosys1_driver_data = {
+ .io_start = 0x1c100000,
+ .clk_driver = "clk-mt8195-vdo1",
+};
+
+static const struct mtk_mmsys_match_data mt8195_mmsys_match_data = {
+ .num_drv_data = 2,
+ .drv_data = {
+ &mt8195_vdosys0_driver_data,
+ &mt8195_vdosys1_driver_data,
+ },
};
static const struct mtk_mmsys_driver_data mt8365_mmsys_driver_data = {
@@ -78,13 +163,33 @@ static const struct mtk_mmsys_driver_data mt8365_mmsys_driver_data = {
.num_routes = ARRAY_SIZE(mt8365_mmsys_routing_table),
};
+static const struct mtk_mmsys_match_data mt8365_mmsys_match_data = {
+ .num_drv_data = 1,
+ .drv_data = {
+ &mt8365_mmsys_driver_data,
+ },
+};
+
struct mtk_mmsys {
void __iomem *regs;
const struct mtk_mmsys_driver_data *data;
spinlock_t lock; /* protects mmsys_sw_rst_b reg */
struct reset_controller_dev rcdev;
+ phys_addr_t io_start;
};
+static int mtk_mmsys_find_match_drvdata(struct mtk_mmsys *mmsys,
+ const struct mtk_mmsys_match_data *match)
+{
+ int i;
+
+ for (i = 0; i < match->num_drv_data; i++)
+ if (mmsys->io_start == match->drv_data[i]->io_start)
+ return i;
+
+ return -EINVAL;
+}
+
void mtk_mmsys_ddp_connect(struct device *dev,
enum mtk_ddp_comp_id cur,
enum mtk_ddp_comp_id next)
@@ -179,7 +284,9 @@ static int mtk_mmsys_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct platform_device *clks;
struct platform_device *drm;
+ const struct mtk_mmsys_match_data *match_data;
struct mtk_mmsys *mmsys;
+ struct resource *res;
int ret;
mmsys = devm_kzalloc(dev, sizeof(*mmsys), GFP_KERNEL);
@@ -205,7 +312,27 @@ static int mtk_mmsys_probe(struct platform_device *pdev)
return ret;
}
- mmsys->data = of_device_get_match_data(&pdev->dev);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "Couldn't get mmsys resource\n");
+ return -EINVAL;
+ }
+ mmsys->io_start = res->start;
+
+ match_data = of_device_get_match_data(dev);
+ if (match_data->num_drv_data > 1) {
+ /* This SoC has multiple mmsys channels */
+ ret = mtk_mmsys_find_match_drvdata(mmsys, match_data);
+ if (ret < 0) {
+ dev_err(dev, "Couldn't get match driver data\n");
+ return ret;
+ }
+ mmsys->data = match_data->drv_data[ret];
+ } else {
+ dev_dbg(dev, "Using single mmsys channel\n");
+ mmsys->data = match_data->drv_data[0];
+ }
+
platform_set_drvdata(pdev, mmsys);
clks = platform_device_register_data(&pdev->dev, mmsys->data->clk_driver,
@@ -226,43 +353,47 @@ static int mtk_mmsys_probe(struct platform_device *pdev)
static const struct of_device_id of_match_mtk_mmsys[] = {
{
.compatible = "mediatek,mt2701-mmsys",
- .data = &mt2701_mmsys_driver_data,
+ .data = &mt2701_mmsys_match_data,
},
{
.compatible = "mediatek,mt2712-mmsys",
- .data = &mt2712_mmsys_driver_data,
+ .data = &mt2712_mmsys_match_data,
},
{
.compatible = "mediatek,mt6779-mmsys",
- .data = &mt6779_mmsys_driver_data,
+ .data = &mt6779_mmsys_match_data,
},
{
.compatible = "mediatek,mt6797-mmsys",
- .data = &mt6797_mmsys_driver_data,
+ .data = &mt6797_mmsys_match_data,
},
{
.compatible = "mediatek,mt8167-mmsys",
- .data = &mt8167_mmsys_driver_data,
+ .data = &mt8167_mmsys_match_data,
},
{
.compatible = "mediatek,mt8173-mmsys",
- .data = &mt8173_mmsys_driver_data,
+ .data = &mt8173_mmsys_match_data,
},
{
.compatible = "mediatek,mt8183-mmsys",
- .data = &mt8183_mmsys_driver_data,
+ .data = &mt8183_mmsys_match_data,
},
{
.compatible = "mediatek,mt8186-mmsys",
- .data = &mt8186_mmsys_driver_data,
+ .data = &mt8186_mmsys_match_data,
},
{
.compatible = "mediatek,mt8192-mmsys",
- .data = &mt8192_mmsys_driver_data,
+ .data = &mt8192_mmsys_match_data,
+ },
+ {
+ .compatible = "mediatek,mt8195-mmsys",
+ .data = &mt8195_mmsys_match_data,
},
{
.compatible = "mediatek,mt8365-mmsys",
- .data = &mt8365_mmsys_driver_data,
+ .data = &mt8365_mmsys_match_data,
},
{ }
};
diff --git a/drivers/soc/mediatek/mtk-mmsys.h b/drivers/soc/mediatek/mtk-mmsys.h
index 77f37f8c715b..f01ba206481d 100644
--- a/drivers/soc/mediatek/mtk-mmsys.h
+++ b/drivers/soc/mediatek/mtk-mmsys.h
@@ -87,12 +87,18 @@ struct mtk_mmsys_routes {
};
struct mtk_mmsys_driver_data {
+ const resource_size_t io_start;
const char *clk_driver;
const struct mtk_mmsys_routes *routes;
const unsigned int num_routes;
const u16 sw0_rst_offset;
};
+struct mtk_mmsys_match_data {
+ unsigned short num_drv_data;
+ const struct mtk_mmsys_driver_data *drv_data[];
+};
+
/*
* Routes in mt8173, mt2701, mt2712 are different. That means
* in the same register address, it controls different input/output
diff --git a/drivers/soc/mediatek/mtk-mutex.c b/drivers/soc/mediatek/mtk-mutex.c
index aaf8fc1abb43..981d56967e7a 100644
--- a/drivers/soc/mediatek/mtk-mutex.c
+++ b/drivers/soc/mediatek/mtk-mutex.c
@@ -96,6 +96,20 @@
#define MT8173_MUTEX_MOD_DISP_PWM1 24
#define MT8173_MUTEX_MOD_DISP_OD 25
+#define MT8195_MUTEX_MOD_DISP_OVL0 0
+#define MT8195_MUTEX_MOD_DISP_WDMA0 1
+#define MT8195_MUTEX_MOD_DISP_RDMA0 2
+#define MT8195_MUTEX_MOD_DISP_COLOR0 3
+#define MT8195_MUTEX_MOD_DISP_CCORR0 4
+#define MT8195_MUTEX_MOD_DISP_AAL0 5
+#define MT8195_MUTEX_MOD_DISP_GAMMA0 6
+#define MT8195_MUTEX_MOD_DISP_DITHER0 7
+#define MT8195_MUTEX_MOD_DISP_DSI0 8
+#define MT8195_MUTEX_MOD_DISP_DSC_WRAP0_CORE0 9
+#define MT8195_MUTEX_MOD_DISP_VPP_MERGE 20
+#define MT8195_MUTEX_MOD_DISP_DP_INTF0 21
+#define MT8195_MUTEX_MOD_DISP_PWM0 27
+
#define MT2712_MUTEX_MOD_DISP_PWM2 10
#define MT2712_MUTEX_MOD_DISP_OVL0 11
#define MT2712_MUTEX_MOD_DISP_OVL1 12
@@ -132,9 +146,21 @@
#define MT8167_MUTEX_SOF_DPI1 3
#define MT8183_MUTEX_SOF_DSI0 1
#define MT8183_MUTEX_SOF_DPI0 2
+#define MT8195_MUTEX_SOF_DSI0 1
+#define MT8195_MUTEX_SOF_DSI1 2
+#define MT8195_MUTEX_SOF_DP_INTF0 3
+#define MT8195_MUTEX_SOF_DP_INTF1 4
+#define MT8195_MUTEX_SOF_DPI0 6 /* for HDMI_TX */
+#define MT8195_MUTEX_SOF_DPI1 5 /* for digital video out */
#define MT8183_MUTEX_EOF_DSI0 (MT8183_MUTEX_SOF_DSI0 << 6)
#define MT8183_MUTEX_EOF_DPI0 (MT8183_MUTEX_SOF_DPI0 << 6)
+#define MT8195_MUTEX_EOF_DSI0 (MT8195_MUTEX_SOF_DSI0 << 7)
+#define MT8195_MUTEX_EOF_DSI1 (MT8195_MUTEX_SOF_DSI1 << 7)
+#define MT8195_MUTEX_EOF_DP_INTF0 (MT8195_MUTEX_SOF_DP_INTF0 << 7)
+#define MT8195_MUTEX_EOF_DP_INTF1 (MT8195_MUTEX_SOF_DP_INTF1 << 7)
+#define MT8195_MUTEX_EOF_DPI0 (MT8195_MUTEX_SOF_DPI0 << 7)
+#define MT8195_MUTEX_EOF_DPI1 (MT8195_MUTEX_SOF_DPI1 << 7)
struct mtk_mutex {
int id;
@@ -149,6 +175,9 @@ enum mtk_mutex_sof_id {
MUTEX_SOF_DPI1,
MUTEX_SOF_DSI2,
MUTEX_SOF_DSI3,
+ MUTEX_SOF_DP_INTF0,
+ MUTEX_SOF_DP_INTF1,
+ DDP_MUTEX_SOF_MAX,
};
struct mtk_mutex_data {
@@ -200,7 +229,7 @@ static const unsigned int mt8167_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_AAL0] = MT8167_MUTEX_MOD_DISP_AAL,
[DDP_COMPONENT_CCORR] = MT8167_MUTEX_MOD_DISP_CCORR,
[DDP_COMPONENT_COLOR0] = MT8167_MUTEX_MOD_DISP_COLOR,
- [DDP_COMPONENT_DITHER] = MT8167_MUTEX_MOD_DISP_DITHER,
+ [DDP_COMPONENT_DITHER0] = MT8167_MUTEX_MOD_DISP_DITHER,
[DDP_COMPONENT_GAMMA] = MT8167_MUTEX_MOD_DISP_GAMMA,
[DDP_COMPONENT_OVL0] = MT8167_MUTEX_MOD_DISP_OVL0,
[DDP_COMPONENT_OVL1] = MT8167_MUTEX_MOD_DISP_OVL1,
@@ -233,7 +262,7 @@ static const unsigned int mt8183_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_AAL0] = MT8183_MUTEX_MOD_DISP_AAL0,
[DDP_COMPONENT_CCORR] = MT8183_MUTEX_MOD_DISP_CCORR0,
[DDP_COMPONENT_COLOR0] = MT8183_MUTEX_MOD_DISP_COLOR0,
- [DDP_COMPONENT_DITHER] = MT8183_MUTEX_MOD_DISP_DITHER0,
+ [DDP_COMPONENT_DITHER0] = MT8183_MUTEX_MOD_DISP_DITHER0,
[DDP_COMPONENT_GAMMA] = MT8183_MUTEX_MOD_DISP_GAMMA0,
[DDP_COMPONENT_OVL0] = MT8183_MUTEX_MOD_DISP_OVL0,
[DDP_COMPONENT_OVL_2L0] = MT8183_MUTEX_MOD_DISP_OVL0_2L,
@@ -247,7 +276,7 @@ static const unsigned int mt8186_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_AAL0] = MT8186_MUTEX_MOD_DISP_AAL0,
[DDP_COMPONENT_CCORR] = MT8186_MUTEX_MOD_DISP_CCORR0,
[DDP_COMPONENT_COLOR0] = MT8186_MUTEX_MOD_DISP_COLOR0,
- [DDP_COMPONENT_DITHER] = MT8186_MUTEX_MOD_DISP_DITHER0,
+ [DDP_COMPONENT_DITHER0] = MT8186_MUTEX_MOD_DISP_DITHER0,
[DDP_COMPONENT_GAMMA] = MT8186_MUTEX_MOD_DISP_GAMMA0,
[DDP_COMPONENT_OVL0] = MT8186_MUTEX_MOD_DISP_OVL0,
[DDP_COMPONENT_OVL_2L0] = MT8186_MUTEX_MOD_DISP_OVL0_2L,
@@ -260,7 +289,7 @@ static const unsigned int mt8192_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_AAL0] = MT8192_MUTEX_MOD_DISP_AAL0,
[DDP_COMPONENT_CCORR] = MT8192_MUTEX_MOD_DISP_CCORR0,
[DDP_COMPONENT_COLOR0] = MT8192_MUTEX_MOD_DISP_COLOR0,
- [DDP_COMPONENT_DITHER] = MT8192_MUTEX_MOD_DISP_DITHER0,
+ [DDP_COMPONENT_DITHER0] = MT8192_MUTEX_MOD_DISP_DITHER0,
[DDP_COMPONENT_GAMMA] = MT8192_MUTEX_MOD_DISP_GAMMA0,
[DDP_COMPONENT_POSTMASK0] = MT8192_MUTEX_MOD_DISP_POSTMASK0,
[DDP_COMPONENT_OVL0] = MT8192_MUTEX_MOD_DISP_OVL0,
@@ -270,7 +299,23 @@ static const unsigned int mt8192_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_RDMA4] = MT8192_MUTEX_MOD_DISP_RDMA4,
};
-static const unsigned int mt2712_mutex_sof[MUTEX_SOF_DSI3 + 1] = {
+static const unsigned int mt8195_mutex_mod[DDP_COMPONENT_ID_MAX] = {
+ [DDP_COMPONENT_OVL0] = MT8195_MUTEX_MOD_DISP_OVL0,
+ [DDP_COMPONENT_WDMA0] = MT8195_MUTEX_MOD_DISP_WDMA0,
+ [DDP_COMPONENT_RDMA0] = MT8195_MUTEX_MOD_DISP_RDMA0,
+ [DDP_COMPONENT_COLOR0] = MT8195_MUTEX_MOD_DISP_COLOR0,
+ [DDP_COMPONENT_CCORR] = MT8195_MUTEX_MOD_DISP_CCORR0,
+ [DDP_COMPONENT_AAL0] = MT8195_MUTEX_MOD_DISP_AAL0,
+ [DDP_COMPONENT_GAMMA] = MT8195_MUTEX_MOD_DISP_GAMMA0,
+ [DDP_COMPONENT_DITHER0] = MT8195_MUTEX_MOD_DISP_DITHER0,
+ [DDP_COMPONENT_MERGE0] = MT8195_MUTEX_MOD_DISP_VPP_MERGE,
+ [DDP_COMPONENT_DSC0] = MT8195_MUTEX_MOD_DISP_DSC_WRAP0_CORE0,
+ [DDP_COMPONENT_DSI0] = MT8195_MUTEX_MOD_DISP_DSI0,
+ [DDP_COMPONENT_PWM0] = MT8195_MUTEX_MOD_DISP_PWM0,
+ [DDP_COMPONENT_DP_INTF0] = MT8195_MUTEX_MOD_DISP_DP_INTF0,
+};
+
+static const unsigned int mt2712_mutex_sof[DDP_MUTEX_SOF_MAX] = {
[MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
[MUTEX_SOF_DSI0] = MUTEX_SOF_DSI0,
[MUTEX_SOF_DSI1] = MUTEX_SOF_DSI1,
@@ -280,7 +325,7 @@ static const unsigned int mt2712_mutex_sof[MUTEX_SOF_DSI3 + 1] = {
[MUTEX_SOF_DSI3] = MUTEX_SOF_DSI3,
};
-static const unsigned int mt8167_mutex_sof[MUTEX_SOF_DSI3 + 1] = {
+static const unsigned int mt8167_mutex_sof[DDP_MUTEX_SOF_MAX] = {
[MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
[MUTEX_SOF_DSI0] = MUTEX_SOF_DSI0,
[MUTEX_SOF_DPI0] = MT8167_MUTEX_SOF_DPI0,
@@ -288,7 +333,7 @@ static const unsigned int mt8167_mutex_sof[MUTEX_SOF_DSI3 + 1] = {
};
/* Add EOF setting so overlay hardware can receive frame done irq */
-static const unsigned int mt8183_mutex_sof[MUTEX_SOF_DSI3 + 1] = {
+static const unsigned int mt8183_mutex_sof[DDP_MUTEX_SOF_MAX] = {
[MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
[MUTEX_SOF_DSI0] = MUTEX_SOF_DSI0 | MT8183_MUTEX_EOF_DSI0,
[MUTEX_SOF_DPI0] = MT8183_MUTEX_SOF_DPI0 | MT8183_MUTEX_EOF_DPI0,
@@ -300,6 +345,26 @@ static const unsigned int mt8186_mutex_sof[MUTEX_SOF_DSI3 + 1] = {
[MUTEX_SOF_DPI0] = MT8186_MUTEX_SOF_DPI0 | MT8186_MUTEX_EOF_DPI0,
};
+/*
+ * To support refresh mode(video mode), DISP_REG_MUTEX_SOF should
+ * select the EOF source and configure the EOF plus timing from the
+ * module that provides the timing signal.
+ * So that MUTEX can not only send a STREAM_DONE event to GCE
+ * but also detect the error at end of frame(EAEOF) when EOF signal
+ * arrives.
+ */
+static const unsigned int mt8195_mutex_sof[DDP_MUTEX_SOF_MAX] = {
+ [MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
+ [MUTEX_SOF_DSI0] = MT8195_MUTEX_SOF_DSI0 | MT8195_MUTEX_EOF_DSI0,
+ [MUTEX_SOF_DSI1] = MT8195_MUTEX_SOF_DSI1 | MT8195_MUTEX_EOF_DSI1,
+ [MUTEX_SOF_DPI0] = MT8195_MUTEX_SOF_DPI0 | MT8195_MUTEX_EOF_DPI0,
+ [MUTEX_SOF_DPI1] = MT8195_MUTEX_SOF_DPI1 | MT8195_MUTEX_EOF_DPI1,
+ [MUTEX_SOF_DP_INTF0] =
+ MT8195_MUTEX_SOF_DP_INTF0 | MT8195_MUTEX_EOF_DP_INTF0,
+ [MUTEX_SOF_DP_INTF1] =
+ MT8195_MUTEX_SOF_DP_INTF1 | MT8195_MUTEX_EOF_DP_INTF1,
+};
+
static const struct mtk_mutex_data mt2701_mutex_driver_data = {
.mutex_mod = mt2701_mutex_mod,
.mutex_sof = mt2712_mutex_sof,
@@ -351,6 +416,13 @@ static const struct mtk_mutex_data mt8192_mutex_driver_data = {
.mutex_sof_reg = MT8183_MUTEX0_SOF0,
};
+static const struct mtk_mutex_data mt8195_mutex_driver_data = {
+ .mutex_mod = mt8195_mutex_mod,
+ .mutex_sof = mt8195_mutex_sof,
+ .mutex_mod_reg = MT8183_MUTEX0_MOD0,
+ .mutex_sof_reg = MT8183_MUTEX0_SOF0,
+};
+
struct mtk_mutex *mtk_mutex_get(struct device *dev)
{
struct mtk_mutex_ctx *mtx = dev_get_drvdata(dev);
@@ -423,6 +495,9 @@ void mtk_mutex_add_comp(struct mtk_mutex *mutex,
case DDP_COMPONENT_DPI1:
sof_id = MUTEX_SOF_DPI1;
break;
+ case DDP_COMPONENT_DP_INTF0:
+ sof_id = MUTEX_SOF_DP_INTF0;
+ break;
default:
if (mtx->data->mutex_mod[id] < 32) {
offset = DISP_REG_MUTEX_MOD(mtx->data->mutex_mod_reg,
@@ -462,6 +537,7 @@ void mtk_mutex_remove_comp(struct mtk_mutex *mutex,
case DDP_COMPONENT_DSI3:
case DDP_COMPONENT_DPI0:
case DDP_COMPONENT_DPI1:
+ case DDP_COMPONENT_DP_INTF0:
writel_relaxed(MUTEX_SOF_SINGLE_MODE,
mtx->regs +
DISP_REG_MUTEX_SOF(mtx->data->mutex_sof_reg,
@@ -587,6 +663,8 @@ static const struct of_device_id mutex_driver_dt_match[] = {
.data = &mt8186_mutex_driver_data},
{ .compatible = "mediatek,mt8192-disp-mutex",
.data = &mt8192_mutex_driver_data},
+ { .compatible = "mediatek,mt8195-disp-mutex",
+ .data = &mt8195_mutex_driver_data},
{},
};
MODULE_DEVICE_TABLE(of, mutex_driver_dt_match);
diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
index eecafeded56f..4b143cf7b4ce 100644
--- a/drivers/soc/qcom/llcc-qcom.c
+++ b/drivers/soc/qcom/llcc-qcom.c
@@ -130,6 +130,50 @@ static const struct llcc_slice_config sc7280_data[] = {
{ LLCC_MODPE, 29, 64, 1, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
};
+static const struct llcc_slice_config sc8180x_data[] = {
+ { LLCC_CPUSS, 1, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 1 },
+ { LLCC_VIDSC0, 2, 512, 2, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_VIDSC1, 3, 512, 2, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_AUDIO, 6, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MDMHPGRW, 7, 3072, 1, 1, 0x3ff, 0xc00, 0, 0, 0, 1, 0 },
+ { LLCC_MDM, 8, 3072, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MODHW, 9, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_CMPT, 10, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_GPUHTW, 11, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_GPU, 12, 5120, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MMUHWT, 13, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1 },
+ { LLCC_CMPTDMA, 15, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_DISP, 16, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_VIDFW, 17, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MDMHPFX, 20, 1024, 2, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MDMPNG, 21, 1024, 0, 1, 0xc, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_AUDHW, 22, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_NPU, 23, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_WLHW, 24, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MODPE, 29, 512, 1, 1, 0xc, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_APTCM, 30, 512, 3, 1, 0x0, 0x1, 1, 0, 0, 1, 0 },
+ { LLCC_WRCACHE, 31, 128, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0 },
+};
+
+static const struct llcc_slice_config sc8280xp_data[] = {
+ { LLCC_CPUSS, 1, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 1, 0 },
+ { LLCC_VIDSC0, 2, 512, 3, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_AUDIO, 6, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 0 },
+ { LLCC_CMPT, 10, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 0 },
+ { LLCC_GPUHTW, 11, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_GPU, 12, 4096, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 1 },
+ { LLCC_MMUHWT, 13, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_DISP, 16, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_AUDHW, 22, 2048, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_DRE, 26, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_CVP, 28, 512, 3, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_APTCM, 30, 1024, 3, 1, 0x0, 0x1, 1, 0, 0, 1, 0, 0 },
+ { LLCC_WRCACHE, 31, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_CVPFW, 32, 512, 1, 0, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_CPUSS1, 33, 2048, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_CPUHWT, 36, 512, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+};
+
static const struct llcc_slice_config sdm845_data[] = {
{ LLCC_CPUSS, 1, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 1 },
{ LLCC_VIDSC0, 2, 512, 2, 1, 0x0, 0x0f0, 0, 0, 1, 1, 0 },
@@ -276,6 +320,20 @@ static const struct qcom_llcc_config sc7280_cfg = {
.reg_offset = llcc_v1_2_reg_offset,
};
+static const struct qcom_llcc_config sc8180x_cfg = {
+ .sct_data = sc8180x_data,
+ .size = ARRAY_SIZE(sc8180x_data),
+ .need_llcc_cfg = true,
+ .reg_offset = llcc_v1_2_reg_offset,
+};
+
+static const struct qcom_llcc_config sc8280xp_cfg = {
+ .sct_data = sc8280xp_data,
+ .size = ARRAY_SIZE(sc8280xp_data),
+ .need_llcc_cfg = true,
+ .reg_offset = llcc_v1_2_reg_offset,
+};
+
static const struct qcom_llcc_config sdm845_cfg = {
.sct_data = sdm845_data,
.size = ARRAY_SIZE(sdm845_data),
@@ -741,6 +799,8 @@ err:
static const struct of_device_id qcom_llcc_of_match[] = {
{ .compatible = "qcom,sc7180-llcc", .data = &sc7180_cfg },
{ .compatible = "qcom,sc7280-llcc", .data = &sc7280_cfg },
+ { .compatible = "qcom,sc8180x-llcc", .data = &sc8180x_cfg },
+ { .compatible = "qcom,sc8280xp-llcc", .data = &sc8280xp_cfg },
{ .compatible = "qcom,sdm845-llcc", .data = &sdm845_cfg },
{ .compatible = "qcom,sm6350-llcc", .data = &sm6350_cfg },
{ .compatible = "qcom,sm8150-llcc", .data = &sm8150_cfg },
@@ -749,6 +809,7 @@ static const struct of_device_id qcom_llcc_of_match[] = {
{ .compatible = "qcom,sm8450-llcc", .data = &sm8450_cfg },
{ }
};
+MODULE_DEVICE_TABLE(of, qcom_llcc_of_match);
static struct platform_driver qcom_llcc_driver = {
.driver = {
diff --git a/drivers/soc/qcom/pdr_interface.c b/drivers/soc/qcom/pdr_interface.c
index fc580a3c4336..0034af927b48 100644
--- a/drivers/soc/qcom/pdr_interface.c
+++ b/drivers/soc/qcom/pdr_interface.c
@@ -304,24 +304,23 @@ static void pdr_indication_cb(struct qmi_handle *qmi,
notifier_hdl);
const struct servreg_state_updated_ind *ind_msg = data;
struct pdr_list_node *ind;
- struct pdr_service *pds;
- bool found = false;
+ struct pdr_service *pds = NULL, *iter;
if (!ind_msg || !ind_msg->service_path[0] ||
strlen(ind_msg->service_path) > SERVREG_NAME_LENGTH)
return;
mutex_lock(&pdr->list_lock);
- list_for_each_entry(pds, &pdr->lookups, node) {
- if (strcmp(pds->service_path, ind_msg->service_path))
+ list_for_each_entry(iter, &pdr->lookups, node) {
+ if (strcmp(iter->service_path, ind_msg->service_path))
continue;
- found = true;
+ pds = iter;
break;
}
mutex_unlock(&pdr->list_lock);
- if (!found)
+ if (!pds)
return;
pr_info("PDR: Indication received from %s, state: 0x%x, trans-id: %d\n",
diff --git a/drivers/soc/qcom/pdr_internal.h b/drivers/soc/qcom/pdr_internal.h
index ab9ae8cdfa54..a30422214943 100644
--- a/drivers/soc/qcom/pdr_internal.h
+++ b/drivers/soc/qcom/pdr_internal.h
@@ -28,7 +28,7 @@ struct servreg_location_entry {
u32 instance;
};
-struct qmi_elem_info servreg_location_entry_ei[] = {
+static struct qmi_elem_info servreg_location_entry_ei[] = {
{
.data_type = QMI_STRING,
.elem_len = SERVREG_NAME_LENGTH + 1,
@@ -74,7 +74,7 @@ struct servreg_get_domain_list_req {
u32 domain_offset;
};
-struct qmi_elem_info servreg_get_domain_list_req_ei[] = {
+static struct qmi_elem_info servreg_get_domain_list_req_ei[] = {
{
.data_type = QMI_STRING,
.elem_len = SERVREG_NAME_LENGTH + 1,
@@ -116,7 +116,7 @@ struct servreg_get_domain_list_resp {
struct servreg_location_entry domain_list[SERVREG_DOMAIN_LIST_LENGTH];
};
-struct qmi_elem_info servreg_get_domain_list_resp_ei[] = {
+static struct qmi_elem_info servreg_get_domain_list_resp_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -199,7 +199,7 @@ struct servreg_register_listener_req {
char service_path[SERVREG_NAME_LENGTH + 1];
};
-struct qmi_elem_info servreg_register_listener_req_ei[] = {
+static struct qmi_elem_info servreg_register_listener_req_ei[] = {
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
@@ -227,7 +227,7 @@ struct servreg_register_listener_resp {
enum servreg_service_state curr_state;
};
-struct qmi_elem_info servreg_register_listener_resp_ei[] = {
+static struct qmi_elem_info servreg_register_listener_resp_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -263,7 +263,7 @@ struct servreg_restart_pd_req {
char service_path[SERVREG_NAME_LENGTH + 1];
};
-struct qmi_elem_info servreg_restart_pd_req_ei[] = {
+static struct qmi_elem_info servreg_restart_pd_req_ei[] = {
{
.data_type = QMI_STRING,
.elem_len = SERVREG_NAME_LENGTH + 1,
@@ -280,7 +280,7 @@ struct servreg_restart_pd_resp {
struct qmi_response_type_v01 resp;
};
-struct qmi_elem_info servreg_restart_pd_resp_ei[] = {
+static struct qmi_elem_info servreg_restart_pd_resp_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -300,7 +300,7 @@ struct servreg_state_updated_ind {
u16 transaction_id;
};
-struct qmi_elem_info servreg_state_updated_ind_ei[] = {
+static struct qmi_elem_info servreg_state_updated_ind_ei[] = {
{
.data_type = QMI_SIGNED_4_BYTE_ENUM,
.elem_len = 1,
@@ -336,7 +336,7 @@ struct servreg_set_ack_req {
u16 transaction_id;
};
-struct qmi_elem_info servreg_set_ack_req_ei[] = {
+static struct qmi_elem_info servreg_set_ack_req_ei[] = {
{
.data_type = QMI_STRING,
.elem_len = SERVREG_NAME_LENGTH + 1,
@@ -362,7 +362,7 @@ struct servreg_set_ack_resp {
struct qmi_response_type_v01 resp;
};
-struct qmi_elem_info servreg_set_ack_resp_ei[] = {
+static struct qmi_elem_info servreg_set_ack_resp_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
diff --git a/drivers/soc/qcom/rpmhpd.c b/drivers/soc/qcom/rpmhpd.c
index 58f1dc9b9cb7..05fff8691ee3 100644
--- a/drivers/soc/qcom/rpmhpd.c
+++ b/drivers/soc/qcom/rpmhpd.c
@@ -180,6 +180,36 @@ static struct rpmhpd mxc_ao = {
.res_name = "mxc.lvl",
};
+static struct rpmhpd nsp = {
+ .pd = { .name = "nsp", },
+ .res_name = "nsp.lvl",
+};
+
+static struct rpmhpd qphy = {
+ .pd = { .name = "qphy", },
+ .res_name = "qphy.lvl",
+};
+
+/* SA8540P RPMH powerdomains */
+static struct rpmhpd *sa8540p_rpmhpds[] = {
+ [SC8280XP_CX] = &cx,
+ [SC8280XP_CX_AO] = &cx_ao,
+ [SC8280XP_EBI] = &ebi,
+ [SC8280XP_GFX] = &gfx,
+ [SC8280XP_LCX] = &lcx,
+ [SC8280XP_LMX] = &lmx,
+ [SC8280XP_MMCX] = &mmcx,
+ [SC8280XP_MMCX_AO] = &mmcx_ao,
+ [SC8280XP_MX] = &mx,
+ [SC8280XP_MX_AO] = &mx_ao,
+ [SC8280XP_NSP] = &nsp,
+};
+
+static const struct rpmhpd_desc sa8540p_desc = {
+ .rpmhpds = sa8540p_rpmhpds,
+ .num_pds = ARRAY_SIZE(sa8540p_rpmhpds),
+};
+
/* SDM845 RPMH powerdomains */
static struct rpmhpd *sdm845_rpmhpds[] = {
[SDM845_CX] = &cx_w_mx_parent,
@@ -210,6 +240,21 @@ static const struct rpmhpd_desc sdx55_desc = {
.num_pds = ARRAY_SIZE(sdx55_rpmhpds),
};
+/* SDX65 RPMH powerdomains */
+static struct rpmhpd *sdx65_rpmhpds[] = {
+ [SDX65_CX] = &cx_w_mx_parent,
+ [SDX65_CX_AO] = &cx_ao_w_mx_parent,
+ [SDX65_MSS] = &mss,
+ [SDX65_MX] = &mx,
+ [SDX65_MX_AO] = &mx_ao,
+ [SDX65_MXC] = &mxc,
+};
+
+static const struct rpmhpd_desc sdx65_desc = {
+ .rpmhpds = sdx65_rpmhpds,
+ .num_pds = ARRAY_SIZE(sdx65_rpmhpds),
+};
+
/* SM6350 RPMH powerdomains */
static struct rpmhpd *sm6350_rpmhpds[] = {
[SM6350_CX] = &cx_w_mx_parent,
@@ -363,12 +408,36 @@ static const struct rpmhpd_desc sc8180x_desc = {
.num_pds = ARRAY_SIZE(sc8180x_rpmhpds),
};
+/* SC8280xp RPMH powerdomains */
+static struct rpmhpd *sc8280xp_rpmhpds[] = {
+ [SC8280XP_CX] = &cx,
+ [SC8280XP_CX_AO] = &cx_ao,
+ [SC8280XP_EBI] = &ebi,
+ [SC8280XP_GFX] = &gfx,
+ [SC8280XP_LCX] = &lcx,
+ [SC8280XP_LMX] = &lmx,
+ [SC8280XP_MMCX] = &mmcx,
+ [SC8280XP_MMCX_AO] = &mmcx_ao,
+ [SC8280XP_MX] = &mx,
+ [SC8280XP_MX_AO] = &mx_ao,
+ [SC8280XP_NSP] = &nsp,
+ [SC8280XP_QPHY] = &qphy,
+};
+
+static const struct rpmhpd_desc sc8280xp_desc = {
+ .rpmhpds = sc8280xp_rpmhpds,
+ .num_pds = ARRAY_SIZE(sc8280xp_rpmhpds),
+};
+
static const struct of_device_id rpmhpd_match_table[] = {
+ { .compatible = "qcom,sa8540p-rpmhpd", .data = &sa8540p_desc },
{ .compatible = "qcom,sc7180-rpmhpd", .data = &sc7180_desc },
{ .compatible = "qcom,sc7280-rpmhpd", .data = &sc7280_desc },
{ .compatible = "qcom,sc8180x-rpmhpd", .data = &sc8180x_desc },
+ { .compatible = "qcom,sc8280xp-rpmhpd", .data = &sc8280xp_desc },
{ .compatible = "qcom,sdm845-rpmhpd", .data = &sdm845_desc },
{ .compatible = "qcom,sdx55-rpmhpd", .data = &sdx55_desc},
+ { .compatible = "qcom,sdx65-rpmhpd", .data = &sdx65_desc},
{ .compatible = "qcom,sm6350-rpmhpd", .data = &sm6350_desc },
{ .compatible = "qcom,sm8150-rpmhpd", .data = &sm8150_desc },
{ .compatible = "qcom,sm8250-rpmhpd", .data = &sm8250_desc },
@@ -597,10 +666,8 @@ static int rpmhpd_probe(struct platform_device *pdev)
data->num_domains = num_pds;
for (i = 0; i < num_pds; i++) {
- if (!rpmhpds[i]) {
- dev_warn(dev, "rpmhpds[%d] is empty\n", i);
+ if (!rpmhpds[i])
continue;
- }
rpmhpds[i]->dev = dev;
rpmhpds[i]->addr = cmd_db_read_addr(rpmhpds[i]->res_name);
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
index e2057d8f1eff..3e95835653ea 100644
--- a/drivers/soc/qcom/smem.c
+++ b/drivers/soc/qcom/smem.c
@@ -195,6 +195,20 @@ struct smem_partition_header {
__le32 reserved[3];
};
+/**
+ * struct smem_partition - describes smem partition
+ * @virt_base: starting virtual address of partition
+ * @phys_base: starting physical address of partition
+ * @cacheline: alignment for "cached" entries
+ * @size: size of partition
+ */
+struct smem_partition {
+ void __iomem *virt_base;
+ phys_addr_t phys_base;
+ size_t cacheline;
+ size_t size;
+};
+
static const u8 SMEM_PART_MAGIC[] = { 0x24, 0x50, 0x52, 0x54 };
/**
@@ -250,11 +264,9 @@ struct smem_region {
* struct qcom_smem - device data for the smem device
* @dev: device pointer
* @hwlock: reference to a hwspinlock
- * @global_partition: pointer to global partition when in use
- * @global_cacheline: cacheline size for global partition
- * @partitions: list of pointers to partitions affecting the current
- * processor/host
- * @cacheline: list of cacheline sizes for each host
+ * @ptable: virtual base of partition table
+ * @global_partition: describes for global partition when in use
+ * @partitions: list of partitions of current processor/host
* @item_count: max accepted item number
* @socinfo: platform device pointer
* @num_regions: number of @regions
@@ -265,12 +277,11 @@ struct qcom_smem {
struct hwspinlock *hwlock;
- struct smem_partition_header *global_partition;
- size_t global_cacheline;
- struct smem_partition_header *partitions[SMEM_HOST_COUNT];
- size_t cacheline[SMEM_HOST_COUNT];
u32 item_count;
struct platform_device *socinfo;
+ struct smem_ptable *ptable;
+ struct smem_partition global_partition;
+ struct smem_partition partitions[SMEM_HOST_COUNT];
unsigned num_regions;
struct smem_region regions[];
@@ -348,18 +359,26 @@ static struct qcom_smem *__smem;
#define HWSPINLOCK_TIMEOUT 1000
static int qcom_smem_alloc_private(struct qcom_smem *smem,
- struct smem_partition_header *phdr,
+ struct smem_partition *part,
unsigned item,
size_t size)
{
struct smem_private_entry *hdr, *end;
+ struct smem_partition_header *phdr;
size_t alloc_size;
void *cached;
+ void *p_end;
+
+ phdr = (struct smem_partition_header __force *)part->virt_base;
+ p_end = (void *)phdr + part->size;
hdr = phdr_to_first_uncached_entry(phdr);
end = phdr_to_last_uncached_entry(phdr);
cached = phdr_to_last_cached_entry(phdr);
+ if (WARN_ON((void *)end > p_end || cached > p_end))
+ return -EINVAL;
+
while (hdr < end) {
if (hdr->canary != SMEM_PRIVATE_CANARY)
goto bad_canary;
@@ -369,6 +388,9 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem,
hdr = uncached_entry_next(hdr);
}
+ if (WARN_ON((void *)hdr > p_end))
+ return -EINVAL;
+
/* Check that we don't grow into the cached region */
alloc_size = sizeof(*hdr) + ALIGN(size, 8);
if ((void *)hdr + alloc_size > cached) {
@@ -442,7 +464,7 @@ static int qcom_smem_alloc_global(struct qcom_smem *smem,
*/
int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
{
- struct smem_partition_header *phdr;
+ struct smem_partition *part;
unsigned long flags;
int ret;
@@ -464,12 +486,12 @@ int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
if (ret)
return ret;
- if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
- phdr = __smem->partitions[host];
- ret = qcom_smem_alloc_private(__smem, phdr, item, size);
- } else if (__smem->global_partition) {
- phdr = __smem->global_partition;
- ret = qcom_smem_alloc_private(__smem, phdr, item, size);
+ if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) {
+ part = &__smem->partitions[host];
+ ret = qcom_smem_alloc_private(__smem, part, item, size);
+ } else if (__smem->global_partition.virt_base) {
+ part = &__smem->global_partition;
+ ret = qcom_smem_alloc_private(__smem, part, item, size);
} else {
ret = qcom_smem_alloc_global(__smem, item, size);
}
@@ -487,6 +509,8 @@ static void *qcom_smem_get_global(struct qcom_smem *smem,
struct smem_header *header;
struct smem_region *region;
struct smem_global_entry *entry;
+ u64 entry_offset;
+ u32 e_size;
u32 aux_base;
unsigned i;
@@ -501,9 +525,16 @@ static void *qcom_smem_get_global(struct qcom_smem *smem,
region = &smem->regions[i];
if ((u32)region->aux_base == aux_base || !aux_base) {
+ e_size = le32_to_cpu(entry->size);
+ entry_offset = le32_to_cpu(entry->offset);
+
+ if (WARN_ON(e_size + entry_offset > region->size))
+ return ERR_PTR(-EINVAL);
+
if (size != NULL)
- *size = le32_to_cpu(entry->size);
- return region->virt_base + le32_to_cpu(entry->offset);
+ *size = e_size;
+
+ return region->virt_base + entry_offset;
}
}
@@ -511,12 +542,18 @@ static void *qcom_smem_get_global(struct qcom_smem *smem,
}
static void *qcom_smem_get_private(struct qcom_smem *smem,
- struct smem_partition_header *phdr,
- size_t cacheline,
+ struct smem_partition *part,
unsigned item,
size_t *size)
{
struct smem_private_entry *e, *end;
+ struct smem_partition_header *phdr;
+ void *item_ptr, *p_end;
+ u32 padding_data;
+ u32 e_size;
+
+ phdr = (struct smem_partition_header __force *)part->virt_base;
+ p_end = (void *)phdr + part->size;
e = phdr_to_first_uncached_entry(phdr);
end = phdr_to_last_uncached_entry(phdr);
@@ -526,36 +563,65 @@ static void *qcom_smem_get_private(struct qcom_smem *smem,
goto invalid_canary;
if (le16_to_cpu(e->item) == item) {
- if (size != NULL)
- *size = le32_to_cpu(e->size) -
- le16_to_cpu(e->padding_data);
+ if (size != NULL) {
+ e_size = le32_to_cpu(e->size);
+ padding_data = le16_to_cpu(e->padding_data);
- return uncached_entry_to_item(e);
+ if (WARN_ON(e_size > part->size || padding_data > e_size))
+ return ERR_PTR(-EINVAL);
+
+ *size = e_size - padding_data;
+ }
+
+ item_ptr = uncached_entry_to_item(e);
+ if (WARN_ON(item_ptr > p_end))
+ return ERR_PTR(-EINVAL);
+
+ return item_ptr;
}
e = uncached_entry_next(e);
}
+ if (WARN_ON((void *)e > p_end))
+ return ERR_PTR(-EINVAL);
+
/* Item was not found in the uncached list, search the cached list */
- e = phdr_to_first_cached_entry(phdr, cacheline);
+ e = phdr_to_first_cached_entry(phdr, part->cacheline);
end = phdr_to_last_cached_entry(phdr);
+ if (WARN_ON((void *)e < (void *)phdr || (void *)end > p_end))
+ return ERR_PTR(-EINVAL);
+
while (e > end) {
if (e->canary != SMEM_PRIVATE_CANARY)
goto invalid_canary;
if (le16_to_cpu(e->item) == item) {
- if (size != NULL)
- *size = le32_to_cpu(e->size) -
- le16_to_cpu(e->padding_data);
+ if (size != NULL) {
+ e_size = le32_to_cpu(e->size);
+ padding_data = le16_to_cpu(e->padding_data);
+
+ if (WARN_ON(e_size > part->size || padding_data > e_size))
+ return ERR_PTR(-EINVAL);
+
+ *size = e_size - padding_data;
+ }
- return cached_entry_to_item(e);
+ item_ptr = cached_entry_to_item(e);
+ if (WARN_ON(item_ptr < (void *)phdr))
+ return ERR_PTR(-EINVAL);
+
+ return item_ptr;
}
- e = cached_entry_next(e, cacheline);
+ e = cached_entry_next(e, part->cacheline);
}
+ if (WARN_ON((void *)e < (void *)phdr))
+ return ERR_PTR(-EINVAL);
+
return ERR_PTR(-ENOENT);
invalid_canary:
@@ -576,9 +642,8 @@ invalid_canary:
*/
void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
{
- struct smem_partition_header *phdr;
+ struct smem_partition *part;
unsigned long flags;
- size_t cacheln;
int ret;
void *ptr = ERR_PTR(-EPROBE_DEFER);
@@ -594,14 +659,12 @@ void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
if (ret)
return ERR_PTR(ret);
- if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
- phdr = __smem->partitions[host];
- cacheln = __smem->cacheline[host];
- ptr = qcom_smem_get_private(__smem, phdr, cacheln, item, size);
- } else if (__smem->global_partition) {
- phdr = __smem->global_partition;
- cacheln = __smem->global_cacheline;
- ptr = qcom_smem_get_private(__smem, phdr, cacheln, item, size);
+ if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) {
+ part = &__smem->partitions[host];
+ ptr = qcom_smem_get_private(__smem, part, item, size);
+ } else if (__smem->global_partition.virt_base) {
+ part = &__smem->global_partition;
+ ptr = qcom_smem_get_private(__smem, part, item, size);
} else {
ptr = qcom_smem_get_global(__smem, item, size);
}
@@ -622,6 +685,7 @@ EXPORT_SYMBOL(qcom_smem_get);
*/
int qcom_smem_get_free_space(unsigned host)
{
+ struct smem_partition *part;
struct smem_partition_header *phdr;
struct smem_header *header;
unsigned ret;
@@ -629,23 +693,39 @@ int qcom_smem_get_free_space(unsigned host)
if (!__smem)
return -EPROBE_DEFER;
- if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
- phdr = __smem->partitions[host];
+ if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) {
+ part = &__smem->partitions[host];
+ phdr = part->virt_base;
ret = le32_to_cpu(phdr->offset_free_cached) -
le32_to_cpu(phdr->offset_free_uncached);
- } else if (__smem->global_partition) {
- phdr = __smem->global_partition;
+
+ if (ret > le32_to_cpu(part->size))
+ return -EINVAL;
+ } else if (__smem->global_partition.virt_base) {
+ part = &__smem->global_partition;
+ phdr = part->virt_base;
ret = le32_to_cpu(phdr->offset_free_cached) -
le32_to_cpu(phdr->offset_free_uncached);
+
+ if (ret > le32_to_cpu(part->size))
+ return -EINVAL;
} else {
header = __smem->regions[0].virt_base;
ret = le32_to_cpu(header->available);
+
+ if (ret > __smem->regions[0].size)
+ return -EINVAL;
}
return ret;
}
EXPORT_SYMBOL(qcom_smem_get_free_space);
+static bool addr_in_range(void __iomem *base, size_t size, void *addr)
+{
+ return base && (addr >= base && addr < base + size);
+}
+
/**
* qcom_smem_virt_to_phys() - return the physical address associated
* with an smem item pointer (previously returned by qcom_smem_get()
@@ -655,17 +735,36 @@ EXPORT_SYMBOL(qcom_smem_get_free_space);
*/
phys_addr_t qcom_smem_virt_to_phys(void *p)
{
- unsigned i;
+ struct smem_partition *part;
+ struct smem_region *area;
+ u64 offset;
+ u32 i;
+
+ for (i = 0; i < SMEM_HOST_COUNT; i++) {
+ part = &__smem->partitions[i];
+
+ if (addr_in_range(part->virt_base, part->size, p)) {
+ offset = p - part->virt_base;
+
+ return (phys_addr_t)part->phys_base + offset;
+ }
+ }
+
+ part = &__smem->global_partition;
+
+ if (addr_in_range(part->virt_base, part->size, p)) {
+ offset = p - part->virt_base;
+
+ return (phys_addr_t)part->phys_base + offset;
+ }
for (i = 0; i < __smem->num_regions; i++) {
- struct smem_region *region = &__smem->regions[i];
+ area = &__smem->regions[i];
- if (p < region->virt_base)
- continue;
- if (p < region->virt_base + region->size) {
- u64 offset = p - region->virt_base;
+ if (addr_in_range(area->virt_base, area->size, p)) {
+ offset = p - area->virt_base;
- return region->aux_base + offset;
+ return (phys_addr_t)area->aux_base + offset;
}
}
@@ -689,7 +788,7 @@ static struct smem_ptable *qcom_smem_get_ptable(struct qcom_smem *smem)
struct smem_ptable *ptable;
u32 version;
- ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K;
+ ptable = smem->ptable;
if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic)))
return ERR_PTR(-ENOENT);
@@ -728,9 +827,14 @@ qcom_smem_partition_header(struct qcom_smem *smem,
struct smem_ptable_entry *entry, u16 host0, u16 host1)
{
struct smem_partition_header *header;
+ u32 phys_addr;
u32 size;
- header = smem->regions[0].virt_base + le32_to_cpu(entry->offset);
+ phys_addr = smem->regions[0].aux_base + le32_to_cpu(entry->offset);
+ header = devm_ioremap_wc(smem->dev, phys_addr, le32_to_cpu(entry->size));
+
+ if (!header)
+ return NULL;
if (memcmp(header->magic, SMEM_PART_MAGIC, sizeof(header->magic))) {
dev_err(smem->dev, "bad partition magic %4ph\n", header->magic);
@@ -772,7 +876,7 @@ static int qcom_smem_set_global_partition(struct qcom_smem *smem)
bool found = false;
int i;
- if (smem->global_partition) {
+ if (smem->global_partition.virt_base) {
dev_err(smem->dev, "Already found the global partition\n");
return -EINVAL;
}
@@ -807,8 +911,11 @@ static int qcom_smem_set_global_partition(struct qcom_smem *smem)
if (!header)
return -EINVAL;
- smem->global_partition = header;
- smem->global_cacheline = le32_to_cpu(entry->cacheline);
+ smem->global_partition.virt_base = (void __iomem *)header;
+ smem->global_partition.phys_base = smem->regions[0].aux_base +
+ le32_to_cpu(entry->offset);
+ smem->global_partition.size = le32_to_cpu(entry->size);
+ smem->global_partition.cacheline = le32_to_cpu(entry->cacheline);
return 0;
}
@@ -848,7 +955,7 @@ qcom_smem_enumerate_partitions(struct qcom_smem *smem, u16 local_host)
return -EINVAL;
}
- if (smem->partitions[remote_host]) {
+ if (smem->partitions[remote_host].virt_base) {
dev_err(smem->dev, "duplicate host %hu\n", remote_host);
return -EINVAL;
}
@@ -857,13 +964,47 @@ qcom_smem_enumerate_partitions(struct qcom_smem *smem, u16 local_host)
if (!header)
return -EINVAL;
- smem->partitions[remote_host] = header;
- smem->cacheline[remote_host] = le32_to_cpu(entry->cacheline);
+ smem->partitions[remote_host].virt_base = (void __iomem *)header;
+ smem->partitions[remote_host].phys_base = smem->regions[0].aux_base +
+ le32_to_cpu(entry->offset);
+ smem->partitions[remote_host].size = le32_to_cpu(entry->size);
+ smem->partitions[remote_host].cacheline = le32_to_cpu(entry->cacheline);
}
return 0;
}
+static int qcom_smem_map_toc(struct qcom_smem *smem, struct smem_region *region)
+{
+ u32 ptable_start;
+
+ /* map starting 4K for smem header */
+ region->virt_base = devm_ioremap_wc(smem->dev, region->aux_base, SZ_4K);
+ ptable_start = region->aux_base + region->size - SZ_4K;
+ /* map last 4k for toc */
+ smem->ptable = devm_ioremap_wc(smem->dev, ptable_start, SZ_4K);
+
+ if (!region->virt_base || !smem->ptable)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int qcom_smem_map_global(struct qcom_smem *smem, u32 size)
+{
+ u32 phys_addr;
+
+ phys_addr = smem->regions[0].aux_base;
+
+ smem->regions[0].size = size;
+ smem->regions[0].virt_base = devm_ioremap_wc(smem->dev, phys_addr, size);
+
+ if (!smem->regions[0].virt_base)
+ return -ENOMEM;
+
+ return 0;
+}
+
static int qcom_smem_resolve_mem(struct qcom_smem *smem, const char *name,
struct smem_region *region)
{
@@ -894,10 +1035,12 @@ static int qcom_smem_probe(struct platform_device *pdev)
struct smem_header *header;
struct reserved_mem *rmem;
struct qcom_smem *smem;
+ unsigned long flags;
size_t array_size;
int num_regions;
int hwlock_id;
u32 version;
+ u32 size;
int ret;
int i;
@@ -933,7 +1076,12 @@ static int qcom_smem_probe(struct platform_device *pdev)
return ret;
}
- for (i = 0; i < num_regions; i++) {
+
+ ret = qcom_smem_map_toc(smem, &smem->regions[0]);
+ if (ret)
+ return ret;
+
+ for (i = 1; i < num_regions; i++) {
smem->regions[i].virt_base = devm_ioremap_wc(&pdev->dev,
smem->regions[i].aux_base,
smem->regions[i].size);
@@ -950,7 +1098,30 @@ static int qcom_smem_probe(struct platform_device *pdev)
return -EINVAL;
}
+ hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0);
+ if (hwlock_id < 0) {
+ if (hwlock_id != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to retrieve hwlock\n");
+ return hwlock_id;
+ }
+
+ smem->hwlock = hwspin_lock_request_specific(hwlock_id);
+ if (!smem->hwlock)
+ return -ENXIO;
+
+ ret = hwspin_lock_timeout_irqsave(smem->hwlock, HWSPINLOCK_TIMEOUT, &flags);
+ if (ret)
+ return ret;
+ size = readl_relaxed(&header->available) + readl_relaxed(&header->free_offset);
+ hwspin_unlock_irqrestore(smem->hwlock, &flags);
+
version = qcom_smem_get_sbl_version(smem);
+ /*
+ * smem header mapping is required only in heap version scheme, so unmap
+ * it here. It will be remapped in qcom_smem_map_global() when whole
+ * partition is mapped again.
+ */
+ devm_iounmap(smem->dev, smem->regions[0].virt_base);
switch (version >> 16) {
case SMEM_GLOBAL_PART_VERSION:
ret = qcom_smem_set_global_partition(smem);
@@ -959,6 +1130,7 @@ static int qcom_smem_probe(struct platform_device *pdev)
smem->item_count = qcom_smem_get_item_count(smem);
break;
case SMEM_GLOBAL_HEAP_VERSION:
+ qcom_smem_map_global(smem, size);
smem->item_count = SMEM_ITEM_COUNT;
break;
default:
@@ -971,17 +1143,6 @@ static int qcom_smem_probe(struct platform_device *pdev)
if (ret < 0 && ret != -ENOENT)
return ret;
- hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0);
- if (hwlock_id < 0) {
- if (hwlock_id != -EPROBE_DEFER)
- dev_err(&pdev->dev, "failed to retrieve hwlock\n");
- return hwlock_id;
- }
-
- smem->hwlock = hwspin_lock_request_specific(hwlock_id);
- if (!smem->hwlock)
- return -ENXIO;
-
__smem = smem;
smem->socinfo = platform_device_register_data(&pdev->dev, "qcom-socinfo",
diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c
index 4a157240f419..59dbf4b61e6c 100644
--- a/drivers/soc/qcom/smp2p.c
+++ b/drivers/soc/qcom/smp2p.c
@@ -493,6 +493,7 @@ static int smp2p_parse_ipc(struct qcom_smp2p *smp2p)
}
smp2p->ipc_regmap = syscon_node_to_regmap(syscon);
+ of_node_put(syscon);
if (IS_ERR(smp2p->ipc_regmap))
return PTR_ERR(smp2p->ipc_regmap);
diff --git a/drivers/soc/qcom/smsm.c b/drivers/soc/qcom/smsm.c
index ef15d014c03a..9df9bba242f3 100644
--- a/drivers/soc/qcom/smsm.c
+++ b/drivers/soc/qcom/smsm.c
@@ -374,6 +374,7 @@ static int smsm_parse_ipc(struct qcom_smsm *smsm, unsigned host_id)
return 0;
host->ipc_regmap = syscon_node_to_regmap(syscon);
+ of_node_put(syscon);
if (IS_ERR(host->ipc_regmap))
return PTR_ERR(host->ipc_regmap);
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index 8b38d134720a..cee579a267a6 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -236,24 +236,24 @@ static const struct soc_id soc_id[] = {
{ 184, "APQ8074" },
{ 185, "MSM8274" },
{ 186, "MSM8674" },
- { 194, "MSM8974PRO" },
+ { 194, "MSM8974PRO-AC" },
{ 198, "MSM8126" },
{ 199, "APQ8026" },
{ 200, "MSM8926" },
{ 205, "MSM8326" },
{ 206, "MSM8916" },
{ 207, "MSM8994" },
- { 208, "APQ8074-AA" },
- { 209, "APQ8074-AB" },
- { 210, "APQ8074PRO" },
- { 211, "MSM8274-AA" },
- { 212, "MSM8274-AB" },
- { 213, "MSM8274PRO" },
- { 214, "MSM8674-AA" },
- { 215, "MSM8674-AB" },
- { 216, "MSM8674PRO" },
- { 217, "MSM8974-AA" },
- { 218, "MSM8974-AB" },
+ { 208, "APQ8074PRO-AA" },
+ { 209, "APQ8074PRO-AB" },
+ { 210, "APQ8074PRO-AC" },
+ { 211, "MSM8274PRO-AA" },
+ { 212, "MSM8274PRO-AB" },
+ { 213, "MSM8274PRO-AC" },
+ { 214, "MSM8674PRO-AA" },
+ { 215, "MSM8674PRO-AB" },
+ { 216, "MSM8674PRO-AC" },
+ { 217, "MSM8974PRO-AA" },
+ { 218, "MSM8974PRO-AB" },
{ 219, "APQ8028" },
{ 220, "MSM8128" },
{ 221, "MSM8228" },
@@ -330,6 +330,8 @@ static const struct soc_id soc_id[] = {
{ 459, "SM7225" },
{ 460, "SA8540P" },
{ 480, "SM8450" },
+ { 482, "SM8450" },
+ { 487, "SC7280" },
};
static const char *socinfo_machine(struct device *dev, unsigned int id)
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index fdc99a05a7e0..c50a6ce1b99d 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -47,6 +47,8 @@ config ARCH_RZG2L
config ARCH_RZN1
bool
+ select PM
+ select PM_GENERIC_DOMAINS
select ARM_AMBA
if ARM && ARCH_RENESAS
@@ -268,6 +270,13 @@ config ARCH_R8A779A0
help
This enables support for the Renesas R-Car V3U SoC.
+config ARCH_R8A779G0
+ bool "ARM64 Platform support for R-Car V4H"
+ select ARCH_RCAR_GEN3
+ select SYSC_R8A779G0
+ help
+ This enables support for the Renesas R-Car V4H SoC.
+
config ARCH_R8A774C0
bool "ARM64 Platform support for RZ/G2E"
select ARCH_RCAR_GEN3
@@ -296,6 +305,12 @@ config ARCH_R8A774B1
help
This enables support for the Renesas RZ/G2N SoC.
+config ARCH_R9A07G043
+ bool "ARM64 Platform support for RZ/G2UL"
+ select ARCH_RZG2L
+ help
+ This enables support for the Renesas RZ/G2UL SoC variants.
+
config ARCH_R9A07G044
bool "ARM64 Platform support for RZ/G2L"
select ARCH_RZG2L
@@ -308,6 +323,13 @@ config ARCH_R9A07G054
help
This enables support for the Renesas RZ/V2L SoC variants.
+config ARCH_R9A09G011
+ bool "ARM64 Platform support for RZ/V2M"
+ select PM
+ select PM_GENERIC_DOMAINS
+ help
+ This enables support for the Renesas RZ/V2M SoC.
+
endif # ARM64
config RST_RCAR
@@ -379,6 +401,10 @@ config SYSC_R8A779A0
bool "System Controller support for R-Car V3U" if COMPILE_TEST
select SYSC_RCAR_GEN4
+config SYSC_R8A779G0
+ bool "System Controller support for R-Car V4H" if COMPILE_TEST
+ select SYSC_RCAR_GEN4
+
config SYSC_RMOBILE
bool "System Controller support for R-Mobile" if COMPILE_TEST
diff --git a/drivers/soc/renesas/Makefile b/drivers/soc/renesas/Makefile
index deeb41f84f01..535868c9c7e4 100644
--- a/drivers/soc/renesas/Makefile
+++ b/drivers/soc/renesas/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_SYSC_R8A77990) += r8a77990-sysc.o
obj-$(CONFIG_SYSC_R8A77995) += r8a77995-sysc.o
obj-$(CONFIG_SYSC_R8A779A0) += r8a779a0-sysc.o
obj-$(CONFIG_SYSC_R8A779F0) += r8a779f0-sysc.o
+obj-$(CONFIG_SYSC_R8A779G0) += r8a779g0-sysc.o
ifdef CONFIG_SMP
obj-$(CONFIG_ARCH_R9A06G032) += r9a06g032-smp.o
endif
diff --git a/drivers/soc/renesas/r8a779g0-sysc.c b/drivers/soc/renesas/r8a779g0-sysc.c
new file mode 100644
index 000000000000..a452709f066d
--- /dev/null
+++ b/drivers/soc/renesas/r8a779g0-sysc.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas R-Car V4H System Controller
+ *
+ * Copyright (C) 2022 Renesas Electronics Corp.
+ */
+
+#include <linux/bits.h>
+#include <linux/clk/renesas.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/of_address.h>
+#include <linux/pm_domain.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include <dt-bindings/power/r8a779g0-sysc.h>
+
+#include "rcar-gen4-sysc.h"
+
+static struct rcar_gen4_sysc_area r8a779g0_areas[] __initdata = {
+ { "always-on", R8A779G0_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
+ { "a3e0", R8A779G0_PD_A3E0, R8A779G0_PD_ALWAYS_ON, PD_SCU },
+ { "a2e0d0", R8A779G0_PD_A2E0D0, R8A779G0_PD_A3E0, PD_SCU },
+ { "a2e0d1", R8A779G0_PD_A2E0D1, R8A779G0_PD_A3E0, PD_SCU },
+ { "a1e0d0c0", R8A779G0_PD_A1E0D0C0, R8A779G0_PD_A2E0D0, PD_CPU_NOCR },
+ { "a1e0d0c1", R8A779G0_PD_A1E0D0C1, R8A779G0_PD_A2E0D0, PD_CPU_NOCR },
+ { "a1e0d1c0", R8A779G0_PD_A1E0D1C0, R8A779G0_PD_A2E0D1, PD_CPU_NOCR },
+ { "a1e0d1c1", R8A779G0_PD_A1E0D1C1, R8A779G0_PD_A2E0D1, PD_CPU_NOCR },
+ { "a33dga", R8A779G0_PD_A33DGA, R8A779G0_PD_ALWAYS_ON },
+ { "a23dgb", R8A779G0_PD_A23DGB, R8A779G0_PD_A33DGA },
+ { "a3vip0", R8A779G0_PD_A3VIP0, R8A779G0_PD_ALWAYS_ON },
+ { "a3vip1", R8A779G0_PD_A3VIP1, R8A779G0_PD_ALWAYS_ON },
+ { "a3vip2", R8A779G0_PD_A3VIP2, R8A779G0_PD_ALWAYS_ON },
+ { "a3isp0", R8A779G0_PD_A3ISP0, R8A779G0_PD_ALWAYS_ON },
+ { "a3isp1", R8A779G0_PD_A3ISP1, R8A779G0_PD_ALWAYS_ON },
+ { "a3ir", R8A779G0_PD_A3IR, R8A779G0_PD_ALWAYS_ON },
+ { "a2cn0", R8A779G0_PD_A2CN0, R8A779G0_PD_A3IR },
+ { "a1cnn0", R8A779G0_PD_A1CNN0, R8A779G0_PD_A2CN0 },
+ { "a1dsp0", R8A779G0_PD_A1DSP0, R8A779G0_PD_A2CN0 },
+ { "a1dsp1", R8A779G0_PD_A1DSP1, R8A779G0_PD_A2CN0 },
+ { "a1dsp2", R8A779G0_PD_A1DSP2, R8A779G0_PD_A2CN0 },
+ { "a1dsp3", R8A779G0_PD_A1DSP3, R8A779G0_PD_A2CN0 },
+ { "a2imp01", R8A779G0_PD_A2IMP01, R8A779G0_PD_A3IR },
+ { "a2imp23", R8A779G0_PD_A2IMP23, R8A779G0_PD_A3IR },
+ { "a2psc", R8A779G0_PD_A2PSC, R8A779G0_PD_A3IR },
+ { "a2dma", R8A779G0_PD_A2DMA, R8A779G0_PD_A3IR },
+ { "a2cv0", R8A779G0_PD_A2CV0, R8A779G0_PD_A3IR },
+ { "a2cv1", R8A779G0_PD_A2CV1, R8A779G0_PD_A3IR },
+ { "a2cv2", R8A779G0_PD_A2CV2, R8A779G0_PD_A3IR },
+ { "a2cv3", R8A779G0_PD_A2CV3, R8A779G0_PD_A3IR },
+};
+
+const struct rcar_gen4_sysc_info r8a779g0_sysc_info __initconst = {
+ .areas = r8a779g0_areas,
+ .num_areas = ARRAY_SIZE(r8a779g0_areas),
+};
diff --git a/drivers/soc/renesas/rcar-gen4-sysc.c b/drivers/soc/renesas/rcar-gen4-sysc.c
index 831162a57f9a..9e5e6e077abc 100644
--- a/drivers/soc/renesas/rcar-gen4-sysc.c
+++ b/drivers/soc/renesas/rcar-gen4-sysc.c
@@ -282,6 +282,9 @@ static const struct of_device_id rcar_gen4_sysc_matches[] __initconst = {
#ifdef CONFIG_SYSC_R8A779F0
{ .compatible = "renesas,r8a779f0-sysc", .data = &r8a779f0_sysc_info },
#endif
+#ifdef CONFIG_SYSC_R8A779G0
+ { .compatible = "renesas,r8a779g0-sysc", .data = &r8a779g0_sysc_info },
+#endif
{ /* sentinel */ }
};
diff --git a/drivers/soc/renesas/rcar-gen4-sysc.h b/drivers/soc/renesas/rcar-gen4-sysc.h
index 0e0bd102b1f9..fe2d98254754 100644
--- a/drivers/soc/renesas/rcar-gen4-sysc.h
+++ b/drivers/soc/renesas/rcar-gen4-sysc.h
@@ -39,5 +39,6 @@ struct rcar_gen4_sysc_info {
extern const struct rcar_gen4_sysc_info r8a779a0_sysc_info;
extern const struct rcar_gen4_sysc_info r8a779f0_sysc_info;
+extern const struct rcar_gen4_sysc_info r8a779g0_sysc_info;
#endif /* __SOC_RENESAS_RCAR_GEN4_SYSC_H__ */
diff --git a/drivers/soc/renesas/rcar-rst.c b/drivers/soc/renesas/rcar-rst.c
index 4d293eb2d8f3..e1c7e91f5a86 100644
--- a/drivers/soc/renesas/rcar-rst.c
+++ b/drivers/soc/renesas/rcar-rst.c
@@ -103,6 +103,7 @@ static const struct of_device_id rcar_rst_matches[] __initconst = {
/* R-Car Gen4 */
{ .compatible = "renesas,r8a779a0-rst", .data = &rcar_rst_gen4 },
{ .compatible = "renesas,r8a779f0-rst", .data = &rcar_rst_gen4 },
+ { .compatible = "renesas,r8a779g0-rst", .data = &rcar_rst_gen4 },
{ /* sentinel */ }
};
diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c
index 92c7b42250ee..d171f1b635c7 100644
--- a/drivers/soc/renesas/renesas-soc.c
+++ b/drivers/soc/renesas/renesas-soc.c
@@ -64,6 +64,10 @@ static const struct renesas_family fam_rzg2l __initconst __maybe_unused = {
.name = "RZ/G2L",
};
+static const struct renesas_family fam_rzg2ul __initconst __maybe_unused = {
+ .name = "RZ/G2UL",
+};
+
static const struct renesas_family fam_rzv2l __initconst __maybe_unused = {
.name = "RZ/V2L",
};
@@ -148,6 +152,11 @@ static const struct renesas_soc soc_rz_g2l __initconst __maybe_unused = {
.id = 0x841c447,
};
+static const struct renesas_soc soc_rz_g2ul __initconst __maybe_unused = {
+ .family = &fam_rzg2ul,
+ .id = 0x8450447,
+};
+
static const struct renesas_soc soc_rz_v2l __initconst __maybe_unused = {
.family = &fam_rzv2l,
.id = 0x8447447,
@@ -223,7 +232,7 @@ static const struct renesas_soc soc_rcar_d3 __initconst __maybe_unused = {
};
static const struct renesas_soc soc_rcar_v3u __initconst __maybe_unused = {
- .family = &fam_rcar_gen3,
+ .family = &fam_rcar_gen4,
.id = 0x59,
};
@@ -232,6 +241,11 @@ static const struct renesas_soc soc_rcar_s4 __initconst __maybe_unused = {
.id = 0x5a,
};
+static const struct renesas_soc soc_rcar_v4h __initconst __maybe_unused = {
+ .family = &fam_rcar_gen4,
+ .id = 0x5c,
+};
+
static const struct renesas_soc soc_shmobile_ag5 __initconst __maybe_unused = {
.family = &fam_shmobile,
.id = 0x37,
@@ -340,6 +354,12 @@ static const struct of_device_id renesas_socs[] __initconst = {
#ifdef CONFIG_ARCH_R8A779F0
{ .compatible = "renesas,r8a779f0", .data = &soc_rcar_s4 },
#endif
+#ifdef CONFIG_ARCH_R8A779G0
+ { .compatible = "renesas,r8a779g0", .data = &soc_rcar_v4h },
+#endif
+#if defined(CONFIG_ARCH_R9A07G043)
+ { .compatible = "renesas,r9a07g043", .data = &soc_rz_g2ul },
+#endif
#if defined(CONFIG_ARCH_R9A07G044)
{ .compatible = "renesas,r9a07g044", .data = &soc_rz_g2l },
#endif
@@ -378,6 +398,7 @@ static const struct renesas_id id_prr __initconst = {
static const struct of_device_id renesas_ids[] __initconst = {
{ .compatible = "renesas,bsid", .data = &id_bsid },
+ { .compatible = "renesas,r9a07g043-sysc", .data = &id_rzg2l },
{ .compatible = "renesas,r9a07g044-sysc", .data = &id_rzg2l },
{ .compatible = "renesas,r9a07g054-sysc", .data = &id_rzg2l },
{ .compatible = "renesas,prr", .data = &id_prr },
diff --git a/drivers/soc/rockchip/Kconfig b/drivers/soc/rockchip/Kconfig
index 156ac0e0c8fe..aff2f7e95237 100644
--- a/drivers/soc/rockchip/Kconfig
+++ b/drivers/soc/rockchip/Kconfig
@@ -23,23 +23,23 @@ config ROCKCHIP_IODOMAIN
voltage supplied by the regulators.
config ROCKCHIP_PM_DOMAINS
- bool "Rockchip generic power domain"
- depends on PM
- select PM_GENERIC_DOMAINS
- help
- Say y here to enable power domain support.
- In order to meet high performance and low power requirements, a power
- management unit is designed or saving power when RK3288 in low power
- mode. The RK3288 PMU is dedicated for managing the power of the whole chip.
+ bool "Rockchip generic power domain"
+ depends on PM
+ select PM_GENERIC_DOMAINS
+ help
+ Say y here to enable power domain support.
+ In order to meet high performance and low power requirements, a power
+ management unit is designed or saving power when RK3288 in low power
+ mode. The RK3288 PMU is dedicated for managing the power of the whole chip.
- If unsure, say N.
+ If unsure, say N.
config ROCKCHIP_DTPM
tristate "Rockchip DTPM hierarchy"
depends on DTPM && m
help
- Describe the hierarchy for the Dynamic Thermal Power
- Management tree on this platform. That will create all the
- power capping capable devices.
+ Describe the hierarchy for the Dynamic Thermal Power Management tree
+ on this platform. That will create all the power capping capable
+ devices.
endif
diff --git a/drivers/soc/rockchip/grf.c b/drivers/soc/rockchip/grf.c
index 494cf2b5bf7b..384461b70684 100644
--- a/drivers/soc/rockchip/grf.c
+++ b/drivers/soc/rockchip/grf.c
@@ -108,6 +108,20 @@ static const struct rockchip_grf_info rk3399_grf __initconst = {
.num_values = ARRAY_SIZE(rk3399_defaults),
};
+#define RK3566_GRF_USB3OTG0_CON1 0x0104
+
+static const struct rockchip_grf_value rk3566_defaults[] __initconst = {
+ { "usb3otg port switch", RK3566_GRF_USB3OTG0_CON1, HIWORD_UPDATE(0, 1, 12) },
+ { "usb3otg clock switch", RK3566_GRF_USB3OTG0_CON1, HIWORD_UPDATE(1, 1, 7) },
+ { "usb3otg disable usb3", RK3566_GRF_USB3OTG0_CON1, HIWORD_UPDATE(1, 1, 0) },
+};
+
+static const struct rockchip_grf_info rk3566_pipegrf __initconst = {
+ .values = rk3566_defaults,
+ .num_values = ARRAY_SIZE(rk3566_defaults),
+};
+
+
static const struct of_device_id rockchip_grf_dt_match[] __initconst = {
{
.compatible = "rockchip,rk3036-grf",
@@ -130,6 +144,9 @@ static const struct of_device_id rockchip_grf_dt_match[] __initconst = {
}, {
.compatible = "rockchip,rk3399-grf",
.data = (void *)&rk3399_grf,
+ }, {
+ .compatible = "rockchip,rk3566-pipe-grf",
+ .data = (void *)&rk3566_pipegrf,
},
{ /* sentinel */ },
};
diff --git a/drivers/soc/rockchip/pm_domains.c b/drivers/soc/rockchip/pm_domains.c
index 0868b7d406fb..89795abac951 100644
--- a/drivers/soc/rockchip/pm_domains.c
+++ b/drivers/soc/rockchip/pm_domains.c
@@ -8,6 +8,7 @@
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/err.h>
+#include <linux/mutex.h>
#include <linux/pm_clock.h>
#include <linux/pm_domain.h>
#include <linux/of_address.h>
@@ -16,6 +17,7 @@
#include <linux/clk.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
+#include <soc/rockchip/pm_domains.h>
#include <dt-bindings/power/px30-power.h>
#include <dt-bindings/power/rk3036-power.h>
#include <dt-bindings/power/rk3066-power.h>
@@ -139,6 +141,109 @@ struct rockchip_pmu {
#define DOMAIN_RK3568(name, pwr, req, wakeup) \
DOMAIN_M(name, pwr, pwr, req, req, req, wakeup)
+/*
+ * Dynamic Memory Controller may need to coordinate with us -- see
+ * rockchip_pmu_block().
+ *
+ * dmc_pmu_mutex protects registration-time races, so DMC driver doesn't try to
+ * block() while we're initializing the PMU.
+ */
+static DEFINE_MUTEX(dmc_pmu_mutex);
+static struct rockchip_pmu *dmc_pmu;
+
+/*
+ * Block PMU transitions and make sure they don't interfere with ARM Trusted
+ * Firmware operations. There are two conflicts, noted in the comments below.
+ *
+ * Caller must unblock PMU transitions via rockchip_pmu_unblock().
+ */
+int rockchip_pmu_block(void)
+{
+ struct rockchip_pmu *pmu;
+ struct generic_pm_domain *genpd;
+ struct rockchip_pm_domain *pd;
+ int i, ret;
+
+ mutex_lock(&dmc_pmu_mutex);
+
+ /* No PMU (yet)? Then we just block rockchip_pmu_probe(). */
+ if (!dmc_pmu)
+ return 0;
+ pmu = dmc_pmu;
+
+ /*
+ * mutex blocks all idle transitions: we can't touch the
+ * PMU_BUS_IDLE_REQ (our ".idle_offset") register while ARM Trusted
+ * Firmware might be using it.
+ */
+ mutex_lock(&pmu->mutex);
+
+ /*
+ * Power domain clocks: Per Rockchip, we *must* keep certain clocks
+ * enabled for the duration of power-domain transitions. Most
+ * transitions are handled by this driver, but some cases (in
+ * particular, DRAM DVFS / memory-controller idle) must be handled by
+ * firmware. Firmware can handle most clock management via a special
+ * "ungate" register (PMU_CRU_GATEDIS_CON0), but unfortunately, this
+ * doesn't handle PLLs. We can assist this transition by doing the
+ * clock management on behalf of firmware.
+ */
+ for (i = 0; i < pmu->genpd_data.num_domains; i++) {
+ genpd = pmu->genpd_data.domains[i];
+ if (genpd) {
+ pd = to_rockchip_pd(genpd);
+ ret = clk_bulk_enable(pd->num_clks, pd->clks);
+ if (ret < 0) {
+ dev_err(pmu->dev,
+ "failed to enable clks for domain '%s': %d\n",
+ genpd->name, ret);
+ goto err;
+ }
+ }
+ }
+
+ return 0;
+
+err:
+ for (i = i - 1; i >= 0; i--) {
+ genpd = pmu->genpd_data.domains[i];
+ if (genpd) {
+ pd = to_rockchip_pd(genpd);
+ clk_bulk_disable(pd->num_clks, pd->clks);
+ }
+ }
+ mutex_unlock(&pmu->mutex);
+ mutex_unlock(&dmc_pmu_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(rockchip_pmu_block);
+
+/* Unblock PMU transitions. */
+void rockchip_pmu_unblock(void)
+{
+ struct rockchip_pmu *pmu;
+ struct generic_pm_domain *genpd;
+ struct rockchip_pm_domain *pd;
+ int i;
+
+ if (dmc_pmu) {
+ pmu = dmc_pmu;
+ for (i = 0; i < pmu->genpd_data.num_domains; i++) {
+ genpd = pmu->genpd_data.domains[i];
+ if (genpd) {
+ pd = to_rockchip_pd(genpd);
+ clk_bulk_disable(pd->num_clks, pd->clks);
+ }
+ }
+
+ mutex_unlock(&pmu->mutex);
+ }
+
+ mutex_unlock(&dmc_pmu_mutex);
+}
+EXPORT_SYMBOL_GPL(rockchip_pmu_unblock);
+
static bool rockchip_pmu_domain_is_idle(struct rockchip_pm_domain *pd)
{
struct rockchip_pmu *pmu = pd->pmu;
@@ -178,7 +283,7 @@ static int rockchip_pmu_set_idle_request(struct rockchip_pm_domain *pd,
regmap_update_bits(pmu->regmap, pmu->info->req_offset,
pd_info->req_mask, idle ? -1U : 0);
- dsb(sy);
+ wmb();
/* Wait util idle_ack = 1 */
target_ack = idle ? pd_info->ack_mask : 0;
@@ -285,7 +390,7 @@ static void rockchip_do_pmu_set_power_domain(struct rockchip_pm_domain *pd,
regmap_update_bits(pmu->regmap, pmu->info->pwr_offset,
pd->info->pwr_mask, on ? 0 : -1U);
- dsb(sy);
+ wmb();
if (readx_poll_timeout_atomic(rockchip_pmu_domain_is_on, pd, is_on,
is_on == on, 0, 10000)) {
@@ -690,6 +795,12 @@ static int rockchip_pm_domain_probe(struct platform_device *pdev)
error = -ENODEV;
+ /*
+ * Prevent any rockchip_pmu_block() from racing with the remainder of
+ * setup (clocks, register initialization).
+ */
+ mutex_lock(&dmc_pmu_mutex);
+
for_each_available_child_of_node(np, node) {
error = rockchip_pm_add_one_domain(pmu, node);
if (error) {
@@ -719,10 +830,17 @@ static int rockchip_pm_domain_probe(struct platform_device *pdev)
goto err_out;
}
+ /* We only expect one PMU. */
+ if (!WARN_ON_ONCE(dmc_pmu))
+ dmc_pmu = pmu;
+
+ mutex_unlock(&dmc_pmu_mutex);
+
return 0;
err_out:
rockchip_pm_domain_cleanup(pmu);
+ mutex_unlock(&dmc_pmu_mutex);
return error;
}
@@ -1068,9 +1186,9 @@ static struct platform_driver rockchip_pm_domain_driver = {
.name = "rockchip-pm-domain",
.of_match_table = rockchip_pm_domain_dt_match,
/*
- * We can't forcibly eject devices form power domain,
- * so we can't really remove power domains once they
- * were added.
+ * We can't forcibly eject devices from the power
+ * domain, so we can't really remove power domains
+ * once they were added.
*/
.suppress_bind_attrs = true,
},
diff --git a/drivers/soc/tegra/Kconfig b/drivers/soc/tegra/Kconfig
index 8b53ed1cc67e..5725c8ef0406 100644
--- a/drivers/soc/tegra/Kconfig
+++ b/drivers/soc/tegra/Kconfig
@@ -146,6 +146,7 @@ config SOC_TEGRA_PMC
select GENERIC_PINCONF
select PM_OPP
select PM_GENERIC_DOMAINS
+ select REGMAP
config SOC_TEGRA_POWERGATE_BPMP
def_bool y
diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
index aa94fda282f4..b0a8405dbdb1 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2013-2021, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2013-2022, NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/clk.h>
@@ -162,7 +162,7 @@ static const struct nvmem_cell_info tegra_fuse_cells[] = {
.bit_offset = 0,
.nbits = 32,
}, {
- .name = "gcplex-config-fuse",
+ .name = "gpu-gcplex-config-fuse",
.offset = 0x1c8,
.bytes = 4,
.bit_offset = 0,
@@ -186,13 +186,13 @@ static const struct nvmem_cell_info tegra_fuse_cells[] = {
.bit_offset = 0,
.nbits = 32,
}, {
- .name = "pdi0",
+ .name = "gpu-pdi0",
.offset = 0x300,
.bytes = 4,
.bit_offset = 0,
.nbits = 32,
}, {
- .name = "pdi1",
+ .name = "gpu-pdi1",
.offset = 0x304,
.bytes = 4,
.bit_offset = 0,
diff --git a/drivers/soc/tegra/fuse/fuse-tegra30.c b/drivers/soc/tegra/fuse/fuse-tegra30.c
index b071d433d74f..f01d8a2547b6 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra30.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra30.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2013-2014, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2013-2022, NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/device.h>
@@ -344,6 +344,21 @@ static const struct nvmem_cell_lookup tegra194_fuse_lookups[] = {
.cell_name = "xusb-pad-calibration-ext",
.dev_id = "3520000.padctl",
.con_id = "calibration-ext",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "gpu-gcplex-config-fuse",
+ .dev_id = "17000000.gpu",
+ .con_id = "gcplex-config-fuse",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "gpu-pdi0",
+ .dev_id = "17000000.gpu",
+ .con_id = "pdi0",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "gpu-pdi1",
+ .dev_id = "17000000.gpu",
+ .con_id = "pdi1",
},
};
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index fdf508e03400..5611d14d3ba2 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -39,6 +39,7 @@
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_opp.h>
+#include <linux/power_supply.h>
#include <linux/reboot.h>
#include <linux/regmap.h>
#include <linux/reset.h>
@@ -108,6 +109,7 @@
#define PMC_USB_DEBOUNCE_DEL 0xec
#define PMC_USB_AO 0xf0
+#define PMC_SCRATCH37 0x130
#define PMC_SCRATCH41 0x140
#define PMC_WAKE2_MASK 0x160
@@ -394,6 +396,8 @@ struct tegra_pmc_soc {
* @domain: IRQ domain provided by the PMC
* @irq: chip implementation for the IRQ domain
* @clk_nb: pclk clock changes handler
+ * @core_domain_state_synced: flag marking the core domain's state as synced
+ * @core_domain_registered: flag marking the core domain as registered
*/
struct tegra_pmc {
struct device *dev;
@@ -1099,8 +1103,7 @@ static struct notifier_block tegra_pmc_reboot_notifier = {
.notifier_call = tegra_pmc_reboot_notify,
};
-static int tegra_pmc_restart_notify(struct notifier_block *this,
- unsigned long action, void *data)
+static void tegra_pmc_restart(void)
{
u32 value;
@@ -1108,14 +1111,31 @@ static int tegra_pmc_restart_notify(struct notifier_block *this,
value = tegra_pmc_readl(pmc, PMC_CNTRL);
value |= PMC_CNTRL_MAIN_RST;
tegra_pmc_writel(pmc, value, PMC_CNTRL);
+}
+
+static int tegra_pmc_restart_handler(struct sys_off_data *data)
+{
+ tegra_pmc_restart();
return NOTIFY_DONE;
}
-static struct notifier_block tegra_pmc_restart_handler = {
- .notifier_call = tegra_pmc_restart_notify,
- .priority = 128,
-};
+static int tegra_pmc_power_off_handler(struct sys_off_data *data)
+{
+ /*
+ * Reboot Nexus 7 into special bootloader mode if USB cable is
+ * connected in order to display battery status and power off.
+ */
+ if (of_machine_is_compatible("asus,grouper") &&
+ power_supply_is_system_supplied()) {
+ const u32 go_to_charger_mode = 0xa5a55a5a;
+
+ tegra_pmc_writel(pmc, go_to_charger_mode, PMC_SCRATCH37);
+ tegra_pmc_restart();
+ }
+
+ return NOTIFY_DONE;
+}
static int powergate_show(struct seq_file *s, void *data)
{
@@ -2878,6 +2898,42 @@ static int tegra_pmc_probe(struct platform_device *pdev)
}
/*
+ * PMC should be last resort for restarting since it soft-resets
+ * CPU without resetting everything else.
+ */
+ err = devm_register_reboot_notifier(&pdev->dev,
+ &tegra_pmc_reboot_notifier);
+ if (err) {
+ dev_err(&pdev->dev, "unable to register reboot notifier, %d\n",
+ err);
+ return err;
+ }
+
+ err = devm_register_sys_off_handler(&pdev->dev,
+ SYS_OFF_MODE_RESTART,
+ SYS_OFF_PRIO_LOW,
+ tegra_pmc_restart_handler, NULL);
+ if (err) {
+ dev_err(&pdev->dev, "failed to register sys-off handler: %d\n",
+ err);
+ return err;
+ }
+
+ /*
+ * PMC should be primary power-off method if it soft-resets CPU,
+ * asking bootloader to shutdown hardware.
+ */
+ err = devm_register_sys_off_handler(&pdev->dev,
+ SYS_OFF_MODE_POWER_OFF,
+ SYS_OFF_PRIO_FIRMWARE,
+ tegra_pmc_power_off_handler, NULL);
+ if (err) {
+ dev_err(&pdev->dev, "failed to register sys-off handler: %d\n",
+ err);
+ return err;
+ }
+
+ /*
* PCLK clock rate can't be retrieved using CLK API because it
* causes lockup if CPU enters LP2 idle state from some other
* CLK notifier, hence we're caching the rate's value locally.
@@ -2908,28 +2964,13 @@ static int tegra_pmc_probe(struct platform_device *pdev)
goto cleanup_sysfs;
}
- err = devm_register_reboot_notifier(&pdev->dev,
- &tegra_pmc_reboot_notifier);
- if (err) {
- dev_err(&pdev->dev, "unable to register reboot notifier, %d\n",
- err);
- goto cleanup_debugfs;
- }
-
- err = register_restart_handler(&tegra_pmc_restart_handler);
- if (err) {
- dev_err(&pdev->dev, "unable to register restart handler, %d\n",
- err);
- goto cleanup_debugfs;
- }
-
err = tegra_pmc_pinctrl_init(pmc);
if (err)
- goto cleanup_restart_handler;
+ goto cleanup_debugfs;
err = tegra_pmc_regmap_init(pmc);
if (err < 0)
- goto cleanup_restart_handler;
+ goto cleanup_debugfs;
err = tegra_powergate_init(pmc, pdev->dev.of_node);
if (err < 0)
@@ -2952,8 +2993,6 @@ static int tegra_pmc_probe(struct platform_device *pdev)
cleanup_powergates:
tegra_powergate_remove_all(pdev->dev.of_node);
-cleanup_restart_handler:
- unregister_restart_handler(&tegra_pmc_restart_handler);
cleanup_debugfs:
debugfs_remove(pmc->debugfs);
cleanup_sysfs:
@@ -3766,7 +3805,7 @@ static const struct tegra_pmc_regs tegra234_pmc_regs = {
};
static const char * const tegra234_reset_sources[] = {
- "SYS_RESET_N",
+ "SYS_RESET_N", /* 0x0 */
"AOWDT",
"BCCPLEXWDT",
"BPMPWDT",
@@ -3774,19 +3813,36 @@ static const char * const tegra234_reset_sources[] = {
"SPEWDT",
"APEWDT",
"LCCPLEXWDT",
- "SENSOR",
- "AOTAG",
- "VFSENSOR",
+ "SENSOR", /* 0x8 */
+ NULL,
+ NULL,
"MAINSWRST",
"SC7",
"HSM",
- "CSITE",
+ NULL,
"RCEWDT",
- "PVA0WDT",
- "PVA1WDT",
- "L1A_ASYNC",
+ NULL, /* 0x10 */
+ NULL,
+ NULL,
"BPMPBOOT",
"FUSECRC",
+ "DCEWDT",
+ "PSCWDT",
+ "PSC",
+ "CSITE_SW", /* 0x18 */
+ "POD",
+ "SCPM",
+ "VREFRO_POWERBAD",
+ "VMON",
+ "FMON",
+ "FSI_R5WDT",
+ "FSI_THERM",
+ "FSI_R52C0WDT", /* 0x20 */
+ "FSI_R52C1WDT",
+ "FSI_R52C2WDT",
+ "FSI_R52C3WDT",
+ "FSI_FMON",
+ "FSI_VMON", /* 0x25 */
};
static const struct tegra_wake_event tegra234_wake_events[] = {
diff --git a/drivers/soc/ti/knav_dma.c b/drivers/soc/ti/knav_dma.c
index 700d8eecd8c4..d756591de973 100644
--- a/drivers/soc/ti/knav_dma.c
+++ b/drivers/soc/ti/knav_dma.c
@@ -415,9 +415,8 @@ static int of_channel_match_helper(struct device_node *np, const char *name,
void *knav_dma_open_channel(struct device *dev, const char *name,
struct knav_dma_cfg *config)
{
- struct knav_dma_chan *chan;
- struct knav_dma_device *dma;
- bool found = false;
+ struct knav_dma_device *dma = NULL, *iter1;
+ struct knav_dma_chan *chan = NULL, *iter2;
int chan_num = -1;
const char *instance;
@@ -444,33 +443,32 @@ void *knav_dma_open_channel(struct device *dev, const char *name,
}
/* Look for correct dma instance */
- list_for_each_entry(dma, &kdev->list, list) {
- if (!strcmp(dma->name, instance)) {
- found = true;
+ list_for_each_entry(iter1, &kdev->list, list) {
+ if (!strcmp(iter1->name, instance)) {
+ dma = iter1;
break;
}
}
- if (!found) {
+ if (!dma) {
dev_err(kdev->dev, "No DMA instance with name %s\n", instance);
return (void *)-EINVAL;
}
/* Look for correct dma channel from dma instance */
- found = false;
- list_for_each_entry(chan, &dma->chan_list, list) {
+ list_for_each_entry(iter2, &dma->chan_list, list) {
if (config->direction == DMA_MEM_TO_DEV) {
- if (chan->channel == chan_num) {
- found = true;
+ if (iter2->channel == chan_num) {
+ chan = iter2;
break;
}
} else {
- if (chan->flow == chan_num) {
- found = true;
+ if (iter2->flow == chan_num) {
+ chan = iter2;
break;
}
}
}
- if (!found) {
+ if (!chan) {
dev_err(kdev->dev, "channel %d is not in DMA %s\n",
chan_num, instance);
return (void *)-EINVAL;
@@ -747,9 +745,8 @@ static int knav_dma_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&kdev->list);
pm_runtime_enable(kdev->dev);
- ret = pm_runtime_get_sync(kdev->dev);
+ ret = pm_runtime_resume_and_get(kdev->dev);
if (ret < 0) {
- pm_runtime_put_noidle(kdev->dev);
dev_err(kdev->dev, "unable to enable pktdma, err %d\n", ret);
goto err_pm_disable;
}
diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c
index 2ac3856b8d42..92af7d1b6f5b 100644
--- a/drivers/soc/ti/knav_qmss_queue.c
+++ b/drivers/soc/ti/knav_qmss_queue.c
@@ -758,10 +758,9 @@ void *knav_pool_create(const char *name,
int num_desc, int region_id)
{
struct knav_region *reg_itr, *region = NULL;
- struct knav_pool *pool, *pi;
+ struct knav_pool *pool, *pi = NULL, *iter;
struct list_head *node;
unsigned last_offset;
- bool slot_found;
int ret;
if (!kdev)
@@ -790,7 +789,7 @@ void *knav_pool_create(const char *name,
}
pool->queue = knav_queue_open(name, KNAV_QUEUE_GP, 0);
- if (IS_ERR_OR_NULL(pool->queue)) {
+ if (IS_ERR(pool->queue)) {
dev_err(kdev->dev,
"failed to open queue for pool(%s), error %ld\n",
name, PTR_ERR(pool->queue));
@@ -816,18 +815,17 @@ void *knav_pool_create(const char *name,
* the request
*/
last_offset = 0;
- slot_found = false;
node = &region->pools;
- list_for_each_entry(pi, &region->pools, region_inst) {
- if ((pi->region_offset - last_offset) >= num_desc) {
- slot_found = true;
+ list_for_each_entry(iter, &region->pools, region_inst) {
+ if ((iter->region_offset - last_offset) >= num_desc) {
+ pi = iter;
break;
}
- last_offset = pi->region_offset + pi->num_desc;
+ last_offset = iter->region_offset + iter->num_desc;
}
- node = &pi->region_inst;
- if (slot_found) {
+ if (pi) {
+ node = &pi->region_inst;
pool->region = region;
pool->num_desc = num_desc;
pool->region_offset = last_offset;
@@ -1785,9 +1783,8 @@ static int knav_queue_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&kdev->pdsps);
pm_runtime_enable(&pdev->dev);
- ret = pm_runtime_get_sync(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0) {
- pm_runtime_put_noidle(&pdev->dev);
dev_err(dev, "Failed to enable QMSS\n");
return ret;
}
diff --git a/drivers/soc/ti/omap_prm.c b/drivers/soc/ti/omap_prm.c
index f32e1cbbe8c5..913b964374a4 100644
--- a/drivers/soc/ti/omap_prm.c
+++ b/drivers/soc/ti/omap_prm.c
@@ -941,23 +941,20 @@ static int omap_prm_probe(struct platform_device *pdev)
struct resource *res;
const struct omap_prm_data *data;
struct omap_prm *prm;
- const struct of_device_id *match;
int ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
- match = of_match_device(omap_prm_id_table, &pdev->dev);
- if (!match)
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data)
return -ENOTSUPP;
prm = devm_kzalloc(&pdev->dev, sizeof(*prm), GFP_KERNEL);
if (!prm)
return -ENOMEM;
- data = match->data;
-
while (data->base != res->start) {
if (!data->base)
return -EINVAL;
diff --git a/drivers/soc/ti/pm33xx.c b/drivers/soc/ti/pm33xx.c
index 7bab4bbaf02d..ce09c42eaed2 100644
--- a/drivers/soc/ti/pm33xx.c
+++ b/drivers/soc/ti/pm33xx.c
@@ -555,11 +555,9 @@ static int am33xx_pm_probe(struct platform_device *pdev)
#endif /* CONFIG_SUSPEND */
pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
- pm_runtime_put_noidle(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
goto err_pm_runtime_disable;
- }
ret = pm_ops->init(am33xx_do_sram_idle);
if (ret) {
diff --git a/drivers/soc/ti/pruss.c b/drivers/soc/ti/pruss.c
index b36779309e49..0e4ba0f89533 100644
--- a/drivers/soc/ti/pruss.c
+++ b/drivers/soc/ti/pruss.c
@@ -279,10 +279,9 @@ static int pruss_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pruss);
pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
dev_err(dev, "couldn't enable module\n");
- pm_runtime_put_noidle(dev);
goto rpm_disable;
}
diff --git a/drivers/soc/ti/ti_sci_pm_domains.c b/drivers/soc/ti/ti_sci_pm_domains.c
index 8afb3f45d263..a33ec7eaf23d 100644
--- a/drivers/soc/ti/ti_sci_pm_domains.c
+++ b/drivers/soc/ti/ti_sci_pm_domains.c
@@ -183,6 +183,8 @@ static int ti_sci_pm_domain_probe(struct platform_device *pdev)
devm_kcalloc(dev, max_id + 1,
sizeof(*pd_provider->data.domains),
GFP_KERNEL);
+ if (!pd_provider->data.domains)
+ return -ENOMEM;
pd_provider->data.num_domains = max_id + 1;
pd_provider->data.xlate = ti_sci_pd_xlate;
diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c
index 2f03ced0f411..0076d467ff6b 100644
--- a/drivers/soc/ti/wkup_m3_ipc.c
+++ b/drivers/soc/ti/wkup_m3_ipc.c
@@ -7,7 +7,9 @@
* Dave Gerlach <d-gerlach@ti.com>
*/
+#include <linux/debugfs.h>
#include <linux/err.h>
+#include <linux/firmware.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/interrupt.h>
@@ -40,12 +42,30 @@
#define M3_FW_VERSION_MASK 0xffff
#define M3_WAKE_SRC_MASK 0xff
+#define IPC_MEM_TYPE_SHIFT (0x0)
+#define IPC_MEM_TYPE_MASK (0x7 << 0)
+#define IPC_VTT_STAT_SHIFT (0x3)
+#define IPC_VTT_STAT_MASK (0x1 << 3)
+#define IPC_VTT_GPIO_PIN_SHIFT (0x4)
+#define IPC_VTT_GPIO_PIN_MASK (0x3f << 4)
+#define IPC_IO_ISOLATION_STAT_SHIFT (10)
+#define IPC_IO_ISOLATION_STAT_MASK (0x1 << 10)
+
+#define IPC_DBG_HALT_SHIFT (11)
+#define IPC_DBG_HALT_MASK (0x1 << 11)
+
#define M3_STATE_UNKNOWN 0
#define M3_STATE_RESET 1
#define M3_STATE_INITED 2
#define M3_STATE_MSG_FOR_LP 3
#define M3_STATE_MSG_FOR_RESET 4
+#define WKUP_M3_SD_FW_MAGIC 0x570C
+
+#define WKUP_M3_DMEM_START 0x80000
+#define WKUP_M3_AUXDATA_OFFSET 0x1000
+#define WKUP_M3_AUXDATA_SIZE 0xFF
+
static struct wkup_m3_ipc *m3_ipc_state;
static const struct wkup_m3_wakeup_src wakeups[] = {
@@ -66,6 +86,148 @@ static const struct wkup_m3_wakeup_src wakeups[] = {
{.irq_nr = 0, .src = "Unknown"},
};
+/**
+ * wkup_m3_copy_aux_data - Copy auxiliary data to special region of m3 dmem
+ * @data - pointer to data
+ * @sz - size of data to copy (limit 256 bytes)
+ *
+ * Copies any additional blob of data to the wkup_m3 dmem to be used by the
+ * firmware
+ */
+static unsigned long wkup_m3_copy_aux_data(struct wkup_m3_ipc *m3_ipc,
+ const void *data, int sz)
+{
+ unsigned long aux_data_dev_addr;
+ void *aux_data_addr;
+
+ aux_data_dev_addr = WKUP_M3_DMEM_START + WKUP_M3_AUXDATA_OFFSET;
+ aux_data_addr = rproc_da_to_va(m3_ipc->rproc,
+ aux_data_dev_addr,
+ WKUP_M3_AUXDATA_SIZE,
+ NULL);
+ memcpy(aux_data_addr, data, sz);
+
+ return WKUP_M3_AUXDATA_OFFSET;
+}
+
+static void wkup_m3_scale_data_fw_cb(const struct firmware *fw, void *context)
+{
+ unsigned long val, aux_base;
+ struct wkup_m3_scale_data_header hdr;
+ struct wkup_m3_ipc *m3_ipc = context;
+ struct device *dev = m3_ipc->dev;
+
+ if (!fw) {
+ dev_err(dev, "Voltage scale fw name given but file missing.\n");
+ return;
+ }
+
+ memcpy(&hdr, fw->data, sizeof(hdr));
+
+ if (hdr.magic != WKUP_M3_SD_FW_MAGIC) {
+ dev_err(dev, "PM: Voltage Scale Data binary does not appear valid.\n");
+ goto release_sd_fw;
+ }
+
+ aux_base = wkup_m3_copy_aux_data(m3_ipc, fw->data + sizeof(hdr),
+ fw->size - sizeof(hdr));
+
+ val = (aux_base + hdr.sleep_offset);
+ val |= ((aux_base + hdr.wake_offset) << 16);
+
+ m3_ipc->volt_scale_offsets = val;
+
+release_sd_fw:
+ release_firmware(fw);
+};
+
+static int wkup_m3_init_scale_data(struct wkup_m3_ipc *m3_ipc,
+ struct device *dev)
+{
+ int ret = 0;
+
+ /*
+ * If no name is provided, user has already been warned, pm will
+ * still work so return 0
+ */
+
+ if (!m3_ipc->sd_fw_name)
+ return ret;
+
+ ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
+ m3_ipc->sd_fw_name, dev, GFP_ATOMIC,
+ m3_ipc, wkup_m3_scale_data_fw_cb);
+
+ return ret;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void wkup_m3_set_halt_late(bool enabled)
+{
+ if (enabled)
+ m3_ipc_state->halt = (1 << IPC_DBG_HALT_SHIFT);
+ else
+ m3_ipc_state->halt = 0;
+}
+
+static int option_get(void *data, u64 *val)
+{
+ u32 *option = data;
+
+ *val = *option;
+
+ return 0;
+}
+
+static int option_set(void *data, u64 val)
+{
+ u32 *option = data;
+
+ *option = val;
+
+ if (option == &m3_ipc_state->halt) {
+ if (val)
+ wkup_m3_set_halt_late(true);
+ else
+ wkup_m3_set_halt_late(false);
+ }
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(wkup_m3_ipc_option_fops, option_get, option_set,
+ "%llu\n");
+
+static int wkup_m3_ipc_dbg_init(struct wkup_m3_ipc *m3_ipc)
+{
+ m3_ipc->dbg_path = debugfs_create_dir("wkup_m3_ipc", NULL);
+
+ if (!m3_ipc->dbg_path)
+ return -EINVAL;
+
+ (void)debugfs_create_file("enable_late_halt", 0644,
+ m3_ipc->dbg_path,
+ &m3_ipc->halt,
+ &wkup_m3_ipc_option_fops);
+
+ return 0;
+}
+
+static inline void wkup_m3_ipc_dbg_destroy(struct wkup_m3_ipc *m3_ipc)
+{
+ debugfs_remove_recursive(m3_ipc->dbg_path);
+}
+#else
+static inline int wkup_m3_ipc_dbg_init(struct wkup_m3_ipc *m3_ipc)
+{
+ return 0;
+}
+
+static inline void wkup_m3_ipc_dbg_destroy(struct wkup_m3_ipc *m3_ipc)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
static void am33xx_txev_eoi(struct wkup_m3_ipc *m3_ipc)
{
writel(AM33XX_M3_TXEV_ACK,
@@ -130,6 +292,7 @@ static irqreturn_t wkup_m3_txev_handler(int irq, void *ipc_data)
}
m3_ipc->state = M3_STATE_INITED;
+ wkup_m3_init_scale_data(m3_ipc, dev);
complete(&m3_ipc->sync_complete);
break;
case M3_STATE_MSG_FOR_RESET:
@@ -215,6 +378,17 @@ static int wkup_m3_is_available(struct wkup_m3_ipc *m3_ipc)
(m3_ipc->state != M3_STATE_UNKNOWN));
}
+static void wkup_m3_set_vtt_gpio(struct wkup_m3_ipc *m3_ipc, int gpio)
+{
+ m3_ipc->vtt_conf = (1 << IPC_VTT_STAT_SHIFT) |
+ (gpio << IPC_VTT_GPIO_PIN_SHIFT);
+}
+
+static void wkup_m3_set_io_isolation(struct wkup_m3_ipc *m3_ipc)
+{
+ m3_ipc->isolation_conf = (1 << IPC_IO_ISOLATION_STAT_SHIFT);
+}
+
/* Public functions */
/**
* wkup_m3_set_mem_type - Pass wkup_m3 which type of memory is in use
@@ -280,12 +454,15 @@ static int wkup_m3_prepare_low_power(struct wkup_m3_ipc *m3_ipc, int state)
switch (state) {
case WKUP_M3_DEEPSLEEP:
m3_power_state = IPC_CMD_DS0;
+ wkup_m3_ctrl_ipc_write(m3_ipc, m3_ipc->volt_scale_offsets, 5);
break;
case WKUP_M3_STANDBY:
m3_power_state = IPC_CMD_STANDBY;
+ wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 5);
break;
case WKUP_M3_IDLE:
m3_power_state = IPC_CMD_IDLE;
+ wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 5);
break;
default:
return 1;
@@ -294,11 +471,13 @@ static int wkup_m3_prepare_low_power(struct wkup_m3_ipc *m3_ipc, int state)
/* Program each required IPC register then write defaults to others */
wkup_m3_ctrl_ipc_write(m3_ipc, m3_ipc->resume_addr, 0);
wkup_m3_ctrl_ipc_write(m3_ipc, m3_power_state, 1);
- wkup_m3_ctrl_ipc_write(m3_ipc, m3_ipc->mem_type, 4);
+ wkup_m3_ctrl_ipc_write(m3_ipc, m3_ipc->mem_type |
+ m3_ipc->vtt_conf |
+ m3_ipc->isolation_conf |
+ m3_ipc->halt, 4);
wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 2);
wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 3);
- wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 5);
wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 6);
wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 7);
@@ -433,12 +612,13 @@ static int wkup_m3_rproc_boot_thread(void *arg)
static int wkup_m3_ipc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- int irq, ret;
+ int irq, ret, temp;
phandle rproc_phandle;
struct rproc *m3_rproc;
struct resource *res;
struct task_struct *task;
struct wkup_m3_ipc *m3_ipc;
+ struct device_node *np = dev->of_node;
m3_ipc = devm_kzalloc(dev, sizeof(*m3_ipc), GFP_KERNEL);
if (!m3_ipc)
@@ -450,10 +630,8 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev)
return PTR_ERR(m3_ipc->ipc_mem_base);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "no irq resource\n");
+ if (irq < 0)
return irq;
- }
ret = devm_request_irq(dev, irq, wkup_m3_txev_handler,
0, "wkup_m3_txev", m3_ipc);
@@ -496,6 +674,22 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev)
m3_ipc->ops = &ipc_ops;
+ if (!of_property_read_u32(np, "ti,vtt-gpio-pin", &temp)) {
+ if (temp >= 0 && temp <= 31)
+ wkup_m3_set_vtt_gpio(m3_ipc, temp);
+ else
+ dev_warn(dev, "Invalid VTT GPIO(%d) pin\n", temp);
+ }
+
+ if (of_find_property(np, "ti,set-io-isolation", NULL))
+ wkup_m3_set_io_isolation(m3_ipc);
+
+ ret = of_property_read_string(np, "firmware-name",
+ &m3_ipc->sd_fw_name);
+ if (ret) {
+ dev_dbg(dev, "Voltage scaling data blob not provided from DT.\n");
+ };
+
/*
* Wait for firmware loading completion in a thread so we
* can boot the wkup_m3 as soon as it's ready without holding
@@ -510,6 +704,8 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev)
goto err_put_rproc;
}
+ wkup_m3_ipc_dbg_init(m3_ipc);
+
return 0;
err_put_rproc:
@@ -521,6 +717,8 @@ err_free_mbox:
static int wkup_m3_ipc_remove(struct platform_device *pdev)
{
+ wkup_m3_ipc_dbg_destroy(m3_ipc_state);
+
mbox_free_channel(m3_ipc_state->mbox);
rproc_shutdown(m3_ipc_state->rproc);