aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/bus
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/bus')
-rw-r--r--drivers/bus/Kconfig80
-rw-r--r--drivers/bus/Makefile11
-rw-r--r--drivers/bus/arm-integrator-lm.c129
-rw-r--r--drivers/bus/brcmstb_gisb.c109
-rw-r--r--drivers/bus/bt1-apb.c419
-rw-r--r--drivers/bus/bt1-axi.c312
-rw-r--r--drivers/bus/fsl-mc/Kconfig7
-rw-r--r--drivers/bus/fsl-mc/Makefile6
-rw-r--r--drivers/bus/fsl-mc/dprc-driver.c325
-rw-r--r--drivers/bus/fsl-mc/dprc.c147
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-allocator.c35
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c465
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-msi.c115
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-private.h128
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-uapi.c597
-rw-r--r--drivers/bus/fsl-mc/mc-io.c15
-rw-r--r--drivers/bus/fsl-mc/mc-sys.c25
-rw-r--r--drivers/bus/fsl-mc/obj-api.c103
-rw-r--r--drivers/bus/hisi_lpc.c261
-rw-r--r--drivers/bus/imx-weim.c152
-rw-r--r--drivers/bus/intel-ixp4xx-eb.c429
-rw-r--r--drivers/bus/mhi/Kconfig9
-rw-r--r--drivers/bus/mhi/Makefile5
-rw-r--r--drivers/bus/mhi/common.h326
-rw-r--r--drivers/bus/mhi/ep/Kconfig10
-rw-r--r--drivers/bus/mhi/ep/Makefile2
-rw-r--r--drivers/bus/mhi/ep/internal.h218
-rw-r--r--drivers/bus/mhi/ep/main.c1598
-rw-r--r--drivers/bus/mhi/ep/mmio.c273
-rw-r--r--drivers/bus/mhi/ep/ring.c207
-rw-r--r--drivers/bus/mhi/ep/sm.c148
-rw-r--r--drivers/bus/mhi/host/Kconfig31
-rw-r--r--drivers/bus/mhi/host/Makefile6
-rw-r--r--drivers/bus/mhi/host/boot.c536
-rw-r--r--drivers/bus/mhi/host/debugfs.c413
-rw-r--r--drivers/bus/mhi/host/init.c1452
-rw-r--r--drivers/bus/mhi/host/internal.h383
-rw-r--r--drivers/bus/mhi/host/main.c1696
-rw-r--r--drivers/bus/mhi/host/pci_generic.c1226
-rw-r--r--drivers/bus/mhi/host/pm.c1277
-rw-r--r--drivers/bus/mips_cdmm.c20
-rw-r--r--drivers/bus/moxtet.c8
-rw-r--r--drivers/bus/mvebu-mbus.c56
-rw-r--r--drivers/bus/omap_l3_noc.c14
-rw-r--r--drivers/bus/omap_l3_noc.h10
-rw-r--r--drivers/bus/qcom-ebi2.c8
-rw-r--r--drivers/bus/qcom-ssc-block-bus.c389
-rw-r--r--drivers/bus/simple-pm-bus.c43
-rw-r--r--drivers/bus/sun50i-de2.c7
-rw-r--r--drivers/bus/sunxi-rsb.c225
-rw-r--r--drivers/bus/tegra-gmi.c55
-rw-r--r--drivers/bus/ti-sysc.c1237
-rw-r--r--drivers/bus/ts-nbus.c5
-rw-r--r--drivers/bus/vexpress-config.c354
54 files changed, 15225 insertions, 892 deletions
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 6095b6df8a81..7bfe998f3514 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -20,8 +20,17 @@ config ARM_CCI400_PORT_CTRL
Low level power management driver for CCI400 cache coherent
interconnect for ARM platforms.
+config ARM_INTEGRATOR_LM
+ bool "ARM Integrator Logic Module bus"
+ depends on HAS_IOMEM
+ depends on ARCH_INTEGRATOR || COMPILE_TEST
+ default ARCH_INTEGRATOR
+ help
+ Say y here to enable support for the ARM Logic Module bus
+ found on the ARM Integrator AP (Application Platform)
+
config BRCMSTB_GISB_ARB
- bool "Broadcom STB GISB bus arbiter"
+ tristate "Broadcom STB GISB bus arbiter"
depends on ARM || ARM64 || MIPS
default ARCH_BRCMSTB || BMIPS_GENERIC
help
@@ -29,6 +38,36 @@ config BRCMSTB_GISB_ARB
arbiter. This driver provides timeout and target abort error handling
and internal bus master decoding.
+config BT1_APB
+ bool "Baikal-T1 APB-bus driver"
+ depends on MIPS_BAIKAL_T1 || COMPILE_TEST
+ select REGMAP_MMIO
+ help
+ Baikal-T1 AXI-APB bridge is used to access the SoC subsystem CSRs.
+ IO requests are routed to this bus by means of the DW AMBA 3 AXI
+ Interconnect. In case of any APB protocol collisions, slave device
+ not responding on timeout an IRQ is raised with an erroneous address
+ reported to the APB terminator (APB Errors Handler Block). This
+ driver provides the interrupt handler to detect the erroneous
+ address, prints an error message about the address fault, updates an
+ errors counter. The counter and the APB-bus operations timeout can be
+ accessed via corresponding sysfs nodes.
+
+config BT1_AXI
+ bool "Baikal-T1 AXI-bus driver"
+ depends on MIPS_BAIKAL_T1 || COMPILE_TEST
+ select MFD_SYSCON
+ help
+ AXI3-bus is the main communication bus connecting all high-speed
+ peripheral IP-cores with RAM controller and with MIPS P5600 cores on
+ Baikal-T1 SoC. Traffic arbitration is done by means of DW AMBA 3 AXI
+ Interconnect (so called AXI Main Interconnect) routing IO requests
+ from one SoC block to another. This driver provides a way to detect
+ any bus protocol errors and device not responding situations by
+ means of an embedded on top of the interconnect errors handler
+ block (EHB). AXI Interconnect QoS arbitration tuning is currently
+ unsupported.
+
config MOXTET
tristate "CZ.NIC Turris Mox module configuration bus"
depends on SPI_MASTER && OF
@@ -41,7 +80,7 @@ config MOXTET
config HISILICON_LPC
bool "Support for ISA I/O space on HiSilicon Hip06/7"
- depends on (ARM64 && ARCH_HISI) || (COMPILE_TEST && !ALPHA && !HEXAGON && !PARISC && !C6X)
+ depends on (ARM64 && ARCH_HISI) || (COMPILE_TEST && !ALPHA && !HEXAGON && !PARISC)
depends on HAS_IOMEM
select INDIRECT_PIO if ARM64
help
@@ -56,9 +95,20 @@ config IMX_WEIM
The WEIM(Wireless External Interface Module) works like a bus.
You can attach many different devices on it, such as NOR, onenand.
+config INTEL_IXP4XX_EB
+ bool "Intel IXP4xx expansion bus interface driver"
+ depends on HAS_IOMEM
+ depends on ARCH_IXP4XX || COMPILE_TEST
+ default ARCH_IXP4XX
+ select MFD_SYSCON
+ help
+ Driver for the Intel IXP4xx expansion bus interface. The driver is
+ needed to set up various chip select configuration parameters before
+ devices on the expansion bus can be discovered.
+
config MIPS_CDMM
bool "MIPS Common Device Memory Map (CDMM) Driver"
- depends on CPU_MIPSR2
+ depends on CPU_MIPSR2 || CPU_MIPSR5
help
Driver needed for the MIPS Common Device Memory Map bus in MIPS
cores. This bus is for per-CPU tightly coupled devices such as the
@@ -102,17 +152,16 @@ config QCOM_EBI2
Interface 2, which can be used to connect things like NAND Flash,
SRAM, ethernet adapters, FPGAs and LCD displays.
-config SIMPLE_PM_BUS
- tristate "Simple Power-Managed Bus Driver"
- depends on OF && PM
- help
- Driver for transparent busses that don't need a real driver, but
- where the bus controller is part of a PM domain, or under the control
- of a functional clock, and thus relies on runtime PM for managing
- this PM domain and/or clock.
- An example of such a bus controller is the Renesas Bus State
- Controller (BSC, sometimes called "LBSC within Bus Bridge", or
- "External Bus Interface") as found on several Renesas ARM SoCs.
+config QCOM_SSC_BLOCK_BUS
+ bool "Qualcomm SSC Block Bus Init Driver"
+ depends on ARCH_QCOM
+ help
+ Say y here to enable support for initializing the bus that connects
+ the SSC block's internal bus to the cNoC (configurantion NoC) on
+ (some) qcom SoCs.
+ The SSC (Snapdragon Sensor Core) block contains a gpio controller,
+ i2c/spi/uart controllers, a hexagon core, and a clock controller
+ which provides clocks for the above.
config SUN50I_DE2_BUS
bool "Allwinner A64 DE2 Bus Driver"
@@ -183,7 +232,7 @@ config UNIPHIER_SYSTEM_BUS
needed to use on-board devices connected to UniPhier SoCs.
config VEXPRESS_CONFIG
- bool "Versatile Express configuration bus"
+ tristate "Versatile Express configuration bus"
default y if ARCH_VEXPRESS
depends on ARM || ARM64
depends on OF
@@ -201,5 +250,6 @@ config DA8XX_MSTPRI
peripherals.
source "drivers/bus/fsl-mc/Kconfig"
+source "drivers/bus/mhi/Kconfig"
endmenu
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index 1320bcf9fa9d..d90eed189a65 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -5,7 +5,7 @@
# Interconnect bus drivers for ARM platforms
obj-$(CONFIG_ARM_CCI) += arm-cci.o
-
+obj-$(CONFIG_ARM_INTEGRATOR_LM) += arm-integrator-lm.o
obj-$(CONFIG_HISILICON_LPC) += hisi_lpc.o
obj-$(CONFIG_BRCMSTB_GISB_ARB) += brcmstb_gisb.o
obj-$(CONFIG_MOXTET) += moxtet.o
@@ -13,7 +13,10 @@ obj-$(CONFIG_MOXTET) += moxtet.o
# DPAA2 fsl-mc bus
obj-$(CONFIG_FSL_MC_BUS) += fsl-mc/
+obj-$(CONFIG_BT1_APB) += bt1-apb.o
+obj-$(CONFIG_BT1_AXI) += bt1-axi.o
obj-$(CONFIG_IMX_WEIM) += imx-weim.o
+obj-$(CONFIG_INTEL_IXP4XX_EB) += intel-ixp4xx-eb.o
obj-$(CONFIG_MIPS_CDMM) += mips_cdmm.o
obj-$(CONFIG_MVEBU_MBUS) += mvebu-mbus.o
@@ -22,9 +25,10 @@ obj-$(CONFIG_OMAP_INTERCONNECT) += omap_l3_smx.o omap_l3_noc.o
obj-$(CONFIG_OMAP_OCP2SCP) += omap-ocp2scp.o
obj-$(CONFIG_QCOM_EBI2) += qcom-ebi2.o
+obj-$(CONFIG_QCOM_SSC_BLOCK_BUS) += qcom-ssc-block-bus.o
obj-$(CONFIG_SUN50I_DE2_BUS) += sun50i-de2.o
obj-$(CONFIG_SUNXI_RSB) += sunxi-rsb.o
-obj-$(CONFIG_SIMPLE_PM_BUS) += simple-pm-bus.o
+obj-$(CONFIG_OF) += simple-pm-bus.o
obj-$(CONFIG_TEGRA_ACONNECT) += tegra-aconnect.o
obj-$(CONFIG_TEGRA_GMI) += tegra-gmi.o
obj-$(CONFIG_TI_PWMSS) += ti-pwmss.o
@@ -34,3 +38,6 @@ obj-$(CONFIG_UNIPHIER_SYSTEM_BUS) += uniphier-system-bus.o
obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-config.o
obj-$(CONFIG_DA8XX_MSTPRI) += da8xx-mstpri.o
+
+# MHI
+obj-y += mhi/
diff --git a/drivers/bus/arm-integrator-lm.c b/drivers/bus/arm-integrator-lm.c
new file mode 100644
index 000000000000..2344d560b144
--- /dev/null
+++ b/drivers/bus/arm-integrator-lm.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ARM Integrator Logical Module bus driver
+ * Copyright (C) 2020 Linaro Ltd.
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * See the device tree bindings for this block for more details on the
+ * hardware.
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/bitops.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+/* All information about the connected logic modules are in here */
+#define INTEGRATOR_SC_DEC_OFFSET 0x10
+
+/* Base address for the expansion modules */
+#define INTEGRATOR_AP_EXP_BASE 0xc0000000
+#define INTEGRATOR_AP_EXP_STRIDE 0x10000000
+
+static int integrator_lm_populate(int num, struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct device_node *child;
+ u32 base;
+ int ret;
+
+ base = INTEGRATOR_AP_EXP_BASE + (num * INTEGRATOR_AP_EXP_STRIDE);
+
+ /* Walk over the child nodes and see what chipselects we use */
+ for_each_available_child_of_node(np, child) {
+ struct resource res;
+
+ ret = of_address_to_resource(child, 0, &res);
+ if (ret) {
+ dev_info(dev, "no valid address on child\n");
+ continue;
+ }
+
+ /* First populate the syscon then any devices */
+ if (res.start == base) {
+ dev_info(dev, "populate module @0x%08x from DT\n",
+ base);
+ ret = of_platform_default_populate(child, NULL, dev);
+ if (ret) {
+ dev_err(dev, "failed to populate module\n");
+ of_node_put(child);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static const struct of_device_id integrator_ap_syscon_match[] = {
+ { .compatible = "arm,integrator-ap-syscon"},
+ { },
+};
+
+static int integrator_ap_lm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *syscon;
+ static struct regmap *map;
+ u32 val;
+ int ret;
+ int i;
+
+ /* Look up the system controller */
+ syscon = of_find_matching_node(NULL, integrator_ap_syscon_match);
+ if (!syscon) {
+ dev_err(dev,
+ "could not find Integrator/AP system controller\n");
+ return -ENODEV;
+ }
+ map = syscon_node_to_regmap(syscon);
+ if (IS_ERR(map)) {
+ dev_err(dev,
+ "could not find Integrator/AP system controller\n");
+ return PTR_ERR(map);
+ }
+
+ ret = regmap_read(map, INTEGRATOR_SC_DEC_OFFSET, &val);
+ if (ret) {
+ dev_err(dev, "could not read from Integrator/AP syscon\n");
+ return ret;
+ }
+
+ /* Loop over the connected modules */
+ for (i = 0; i < 4; i++) {
+ if (!(val & BIT(4 + i)))
+ continue;
+
+ dev_info(dev, "detected module in slot %d\n", i);
+ ret = integrator_lm_populate(i, dev);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id integrator_ap_lm_match[] = {
+ { .compatible = "arm,integrator-ap-lm"},
+ { },
+};
+
+static struct platform_driver integrator_ap_lm_driver = {
+ .probe = integrator_ap_lm_probe,
+ .driver = {
+ .name = "integratorap-lm",
+ .of_match_table = integrator_ap_lm_match,
+ },
+};
+module_platform_driver(integrator_ap_lm_driver);
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("Integrator AP Logical Module driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c
index ec1004c858b8..b0c3704777e9 100644
--- a/drivers/bus/brcmstb_gisb.c
+++ b/drivers/bus/brcmstb_gisb.c
@@ -1,11 +1,12 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2014-2017 Broadcom
+ * Copyright (C) 2014-2021 Broadcom
*/
#include <linux/init.h>
#include <linux/types.h>
#include <linux/module.h>
+#include <linux/panic_notifier.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/sysfs.h>
@@ -30,8 +31,22 @@
#define ARB_ERR_CAP_STATUS_WRITE (1 << 1)
#define ARB_ERR_CAP_STATUS_VALID (1 << 0)
+#define ARB_BP_CAP_CLEAR (1 << 0)
+#define ARB_BP_CAP_STATUS_PROT_SHIFT 14
+#define ARB_BP_CAP_STATUS_TYPE (1 << 13)
+#define ARB_BP_CAP_STATUS_RSP_SHIFT 10
+#define ARB_BP_CAP_STATUS_MASK GENMASK(1, 0)
+#define ARB_BP_CAP_STATUS_BS_SHIFT 2
+#define ARB_BP_CAP_STATUS_WRITE (1 << 1)
+#define ARB_BP_CAP_STATUS_VALID (1 << 0)
+
enum {
ARB_TIMER,
+ ARB_BP_CAP_CLR,
+ ARB_BP_CAP_HI_ADDR,
+ ARB_BP_CAP_ADDR,
+ ARB_BP_CAP_STATUS,
+ ARB_BP_CAP_MASTER,
ARB_ERR_CAP_CLR,
ARB_ERR_CAP_HI_ADDR,
ARB_ERR_CAP_ADDR,
@@ -41,6 +56,11 @@ enum {
static const int gisb_offsets_bcm7038[] = {
[ARB_TIMER] = 0x00c,
+ [ARB_BP_CAP_CLR] = 0x014,
+ [ARB_BP_CAP_HI_ADDR] = -1,
+ [ARB_BP_CAP_ADDR] = 0x0b8,
+ [ARB_BP_CAP_STATUS] = 0x0c0,
+ [ARB_BP_CAP_MASTER] = -1,
[ARB_ERR_CAP_CLR] = 0x0c4,
[ARB_ERR_CAP_HI_ADDR] = -1,
[ARB_ERR_CAP_ADDR] = 0x0c8,
@@ -50,6 +70,11 @@ static const int gisb_offsets_bcm7038[] = {
static const int gisb_offsets_bcm7278[] = {
[ARB_TIMER] = 0x008,
+ [ARB_BP_CAP_CLR] = 0x01c,
+ [ARB_BP_CAP_HI_ADDR] = -1,
+ [ARB_BP_CAP_ADDR] = 0x220,
+ [ARB_BP_CAP_STATUS] = 0x230,
+ [ARB_BP_CAP_MASTER] = 0x234,
[ARB_ERR_CAP_CLR] = 0x7f8,
[ARB_ERR_CAP_HI_ADDR] = -1,
[ARB_ERR_CAP_ADDR] = 0x7e0,
@@ -59,6 +84,11 @@ static const int gisb_offsets_bcm7278[] = {
static const int gisb_offsets_bcm7400[] = {
[ARB_TIMER] = 0x00c,
+ [ARB_BP_CAP_CLR] = 0x014,
+ [ARB_BP_CAP_HI_ADDR] = -1,
+ [ARB_BP_CAP_ADDR] = 0x0b8,
+ [ARB_BP_CAP_STATUS] = 0x0c0,
+ [ARB_BP_CAP_MASTER] = 0x0c4,
[ARB_ERR_CAP_CLR] = 0x0c8,
[ARB_ERR_CAP_HI_ADDR] = -1,
[ARB_ERR_CAP_ADDR] = 0x0cc,
@@ -68,6 +98,11 @@ static const int gisb_offsets_bcm7400[] = {
static const int gisb_offsets_bcm7435[] = {
[ARB_TIMER] = 0x00c,
+ [ARB_BP_CAP_CLR] = 0x014,
+ [ARB_BP_CAP_HI_ADDR] = -1,
+ [ARB_BP_CAP_ADDR] = 0x158,
+ [ARB_BP_CAP_STATUS] = 0x160,
+ [ARB_BP_CAP_MASTER] = 0x164,
[ARB_ERR_CAP_CLR] = 0x168,
[ARB_ERR_CAP_HI_ADDR] = -1,
[ARB_ERR_CAP_ADDR] = 0x16c,
@@ -77,6 +112,11 @@ static const int gisb_offsets_bcm7435[] = {
static const int gisb_offsets_bcm7445[] = {
[ARB_TIMER] = 0x008,
+ [ARB_BP_CAP_CLR] = 0x010,
+ [ARB_BP_CAP_HI_ADDR] = -1,
+ [ARB_BP_CAP_ADDR] = 0x1d8,
+ [ARB_BP_CAP_STATUS] = 0x1e0,
+ [ARB_BP_CAP_MASTER] = 0x1e4,
[ARB_ERR_CAP_CLR] = 0x7e4,
[ARB_ERR_CAP_HI_ADDR] = 0x7e8,
[ARB_ERR_CAP_ADDR] = 0x7ec,
@@ -125,6 +165,16 @@ static u64 gisb_read_address(struct brcmstb_gisb_arb_device *gdev)
return value;
}
+static u64 gisb_read_bp_address(struct brcmstb_gisb_arb_device *gdev)
+{
+ u64 value;
+
+ value = gisb_read(gdev, ARB_BP_CAP_ADDR);
+ value |= (u64)gisb_read(gdev, ARB_BP_CAP_HI_ADDR) << 32;
+
+ return value;
+}
+
static void gisb_write(struct brcmstb_gisb_arb_device *gdev, u32 val, int reg)
{
int offset = gdev->gisb_offsets[reg];
@@ -210,8 +260,8 @@ static int brcmstb_gisb_arb_decode_addr(struct brcmstb_gisb_arb_device *gdev,
m_name = m_fmt;
}
- pr_crit("%s: %s at 0x%llx [%c %s], core: %s\n",
- __func__, reason, arb_addr,
+ pr_crit("GISB: %s at 0x%llx [%c %s], core: %s\n",
+ reason, arb_addr,
cap_status & ARB_ERR_CAP_STATUS_WRITE ? 'W' : 'R',
cap_status & ARB_ERR_CAP_STATUS_TIMEOUT ? "timeout" : "",
m_name);
@@ -259,6 +309,41 @@ static irqreturn_t brcmstb_gisb_tea_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static irqreturn_t brcmstb_gisb_bp_handler(int irq, void *dev_id)
+{
+ struct brcmstb_gisb_arb_device *gdev = dev_id;
+ const char *m_name;
+ u32 bp_status;
+ u64 arb_addr;
+ u32 master;
+ char m_fmt[11];
+
+ bp_status = gisb_read(gdev, ARB_BP_CAP_STATUS);
+
+ /* Invalid captured address, bail out */
+ if (!(bp_status & ARB_BP_CAP_STATUS_VALID))
+ return IRQ_HANDLED;
+
+ /* Read the address and master */
+ arb_addr = gisb_read_bp_address(gdev);
+ master = gisb_read(gdev, ARB_BP_CAP_MASTER);
+
+ m_name = brcmstb_gisb_master_to_str(gdev, master);
+ if (!m_name) {
+ snprintf(m_fmt, sizeof(m_fmt), "0x%08x", master);
+ m_name = m_fmt;
+ }
+
+ pr_crit("GISB: breakpoint at 0x%llx [%c], core: %s\n",
+ arb_addr, bp_status & ARB_BP_CAP_STATUS_WRITE ? 'W' : 'R',
+ m_name);
+
+ /* clear the GISB error */
+ gisb_write(gdev, ARB_ERR_CAP_CLEAR, ARB_ERR_CAP_CLR);
+
+ return IRQ_HANDLED;
+}
+
/*
* Dump out gisb errors on die or panic.
*/
@@ -317,13 +402,14 @@ static int __init brcmstb_gisb_arb_probe(struct platform_device *pdev)
struct brcmstb_gisb_arb_device *gdev;
const struct of_device_id *of_id;
struct resource *r;
- int err, timeout_irq, tea_irq;
+ int err, timeout_irq, tea_irq, bp_irq;
unsigned int num_masters, j = 0;
int i, first, last;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
timeout_irq = platform_get_irq(pdev, 0);
tea_irq = platform_get_irq(pdev, 1);
+ bp_irq = platform_get_irq(pdev, 2);
gdev = devm_kzalloc(&pdev->dev, sizeof(*gdev), GFP_KERNEL);
if (!gdev)
@@ -356,6 +442,15 @@ static int __init brcmstb_gisb_arb_probe(struct platform_device *pdev)
if (err < 0)
return err;
+ /* Interrupt is optional */
+ if (bp_irq > 0) {
+ err = devm_request_irq(&pdev->dev, bp_irq,
+ brcmstb_gisb_bp_handler, 0, pdev->name,
+ gdev);
+ if (err < 0)
+ return err;
+ }
+
/* If we do not have a valid mask, assume all masters are enabled */
if (of_property_read_u32(dn, "brcm,gisb-arb-master-mask",
&gdev->valid_mask))
@@ -390,7 +485,7 @@ static int __init brcmstb_gisb_arb_probe(struct platform_device *pdev)
list_add_tail(&gdev->next, &brcmstb_gisb_arb_device_list);
#ifdef CONFIG_MIPS
- board_be_handler = brcmstb_bus_error_handler;
+ mips_set_be_handler(brcmstb_bus_error_handler);
#endif
if (list_is_singular(&brcmstb_gisb_arb_device_list)) {
@@ -451,3 +546,7 @@ static int __init brcm_gisb_driver_init(void)
}
module_init(brcm_gisb_driver_init);
+
+MODULE_AUTHOR("Broadcom");
+MODULE_DESCRIPTION("Broadcom STB GISB arbiter driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/bt1-apb.c b/drivers/bus/bt1-apb.c
new file mode 100644
index 000000000000..63b1b4a76671
--- /dev/null
+++ b/drivers/bus/bt1-apb.c
@@ -0,0 +1,419 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Authors:
+ * Serge Semin <Sergey.Semin@baikalelectronics.ru>
+ *
+ * Baikal-T1 APB-bus driver
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/atomic.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/nmi.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/clk.h>
+#include <linux/reset.h>
+#include <linux/time64.h>
+#include <linux/clk.h>
+#include <linux/sysfs.h>
+
+#define APB_EHB_ISR 0x00
+#define APB_EHB_ISR_PENDING BIT(0)
+#define APB_EHB_ISR_MASK BIT(1)
+#define APB_EHB_ADDR 0x04
+#define APB_EHB_TIMEOUT 0x08
+
+#define APB_EHB_TIMEOUT_MIN 0x000003FFU
+#define APB_EHB_TIMEOUT_MAX 0xFFFFFFFFU
+
+/*
+ * struct bt1_apb - Baikal-T1 APB EHB private data
+ * @dev: Pointer to the device structure.
+ * @regs: APB EHB registers map.
+ * @res: No-device error injection memory region.
+ * @irq: Errors IRQ number.
+ * @rate: APB-bus reference clock rate.
+ * @pclk: APB-reference clock.
+ * @prst: APB domain reset line.
+ * @count: Number of errors detected.
+ */
+struct bt1_apb {
+ struct device *dev;
+
+ struct regmap *regs;
+ void __iomem *res;
+ int irq;
+
+ unsigned long rate;
+ struct clk *pclk;
+
+ struct reset_control *prst;
+
+ atomic_t count;
+};
+
+static const struct regmap_config bt1_apb_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = APB_EHB_TIMEOUT,
+ .fast_io = true
+};
+
+static inline unsigned long bt1_apb_n_to_timeout_us(struct bt1_apb *apb, u32 n)
+{
+ u64 timeout = (u64)n * USEC_PER_SEC;
+
+ do_div(timeout, apb->rate);
+
+ return timeout;
+
+}
+
+static inline unsigned long bt1_apb_timeout_to_n_us(struct bt1_apb *apb,
+ unsigned long timeout)
+{
+ u64 n = (u64)timeout * apb->rate;
+
+ do_div(n, USEC_PER_SEC);
+
+ return n;
+
+}
+
+static irqreturn_t bt1_apb_isr(int irq, void *data)
+{
+ struct bt1_apb *apb = data;
+ u32 addr = 0;
+
+ regmap_read(apb->regs, APB_EHB_ADDR, &addr);
+
+ dev_crit_ratelimited(apb->dev,
+ "APB-bus fault %d: Slave access timeout at 0x%08x\n",
+ atomic_inc_return(&apb->count),
+ addr);
+
+ /*
+ * Print backtrace on each CPU. This might be pointless if the fault
+ * has happened on the same CPU as the IRQ handler is executed or
+ * the other core proceeded further execution despite the error.
+ * But if it's not, by looking at the trace we would get straight to
+ * the cause of the problem.
+ */
+ trigger_all_cpu_backtrace();
+
+ regmap_update_bits(apb->regs, APB_EHB_ISR, APB_EHB_ISR_PENDING, 0);
+
+ return IRQ_HANDLED;
+}
+
+static void bt1_apb_clear_data(void *data)
+{
+ struct bt1_apb *apb = data;
+ struct platform_device *pdev = to_platform_device(apb->dev);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
+static struct bt1_apb *bt1_apb_create_data(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bt1_apb *apb;
+ int ret;
+
+ apb = devm_kzalloc(dev, sizeof(*apb), GFP_KERNEL);
+ if (!apb)
+ return ERR_PTR(-ENOMEM);
+
+ ret = devm_add_action(dev, bt1_apb_clear_data, apb);
+ if (ret) {
+ dev_err(dev, "Can't add APB EHB data clear action\n");
+ return ERR_PTR(ret);
+ }
+
+ apb->dev = dev;
+ atomic_set(&apb->count, 0);
+ platform_set_drvdata(pdev, apb);
+
+ return apb;
+}
+
+static int bt1_apb_request_regs(struct bt1_apb *apb)
+{
+ struct platform_device *pdev = to_platform_device(apb->dev);
+ void __iomem *regs;
+
+ regs = devm_platform_ioremap_resource_byname(pdev, "ehb");
+ if (IS_ERR(regs)) {
+ dev_err(apb->dev, "Couldn't map APB EHB registers\n");
+ return PTR_ERR(regs);
+ }
+
+ apb->regs = devm_regmap_init_mmio(apb->dev, regs, &bt1_apb_regmap_cfg);
+ if (IS_ERR(apb->regs)) {
+ dev_err(apb->dev, "Couldn't create APB EHB regmap\n");
+ return PTR_ERR(apb->regs);
+ }
+
+ apb->res = devm_platform_ioremap_resource_byname(pdev, "nodev");
+ if (IS_ERR(apb->res))
+ dev_err(apb->dev, "Couldn't map reserved region\n");
+
+ return PTR_ERR_OR_ZERO(apb->res);
+}
+
+static int bt1_apb_request_rst(struct bt1_apb *apb)
+{
+ int ret;
+
+ apb->prst = devm_reset_control_get_optional_exclusive(apb->dev, "prst");
+ if (IS_ERR(apb->prst))
+ return dev_err_probe(apb->dev, PTR_ERR(apb->prst),
+ "Couldn't get reset control line\n");
+
+ ret = reset_control_deassert(apb->prst);
+ if (ret)
+ dev_err(apb->dev, "Failed to deassert the reset line\n");
+
+ return ret;
+}
+
+static void bt1_apb_disable_clk(void *data)
+{
+ struct bt1_apb *apb = data;
+
+ clk_disable_unprepare(apb->pclk);
+}
+
+static int bt1_apb_request_clk(struct bt1_apb *apb)
+{
+ int ret;
+
+ apb->pclk = devm_clk_get(apb->dev, "pclk");
+ if (IS_ERR(apb->pclk))
+ return dev_err_probe(apb->dev, PTR_ERR(apb->pclk),
+ "Couldn't get APB clock descriptor\n");
+
+ ret = clk_prepare_enable(apb->pclk);
+ if (ret) {
+ dev_err(apb->dev, "Couldn't enable the APB clock\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(apb->dev, bt1_apb_disable_clk, apb);
+ if (ret) {
+ dev_err(apb->dev, "Can't add APB EHB clocks disable action\n");
+ return ret;
+ }
+
+ apb->rate = clk_get_rate(apb->pclk);
+ if (!apb->rate) {
+ dev_err(apb->dev, "Invalid clock rate\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void bt1_apb_clear_irq(void *data)
+{
+ struct bt1_apb *apb = data;
+
+ regmap_update_bits(apb->regs, APB_EHB_ISR, APB_EHB_ISR_MASK, 0);
+}
+
+static int bt1_apb_request_irq(struct bt1_apb *apb)
+{
+ struct platform_device *pdev = to_platform_device(apb->dev);
+ int ret;
+
+ apb->irq = platform_get_irq(pdev, 0);
+ if (apb->irq < 0)
+ return apb->irq;
+
+ ret = devm_request_irq(apb->dev, apb->irq, bt1_apb_isr, IRQF_SHARED,
+ "bt1-apb", apb);
+ if (ret) {
+ dev_err(apb->dev, "Couldn't request APB EHB IRQ\n");
+ return ret;
+ }
+
+ ret = devm_add_action(apb->dev, bt1_apb_clear_irq, apb);
+ if (ret) {
+ dev_err(apb->dev, "Can't add APB EHB IRQs clear action\n");
+ return ret;
+ }
+
+ /* Unmask IRQ and clear it' pending flag. */
+ regmap_update_bits(apb->regs, APB_EHB_ISR,
+ APB_EHB_ISR_PENDING | APB_EHB_ISR_MASK,
+ APB_EHB_ISR_MASK);
+
+ return 0;
+}
+
+static ssize_t count_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct bt1_apb *apb = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&apb->count));
+}
+static DEVICE_ATTR_RO(count);
+
+static ssize_t timeout_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct bt1_apb *apb = dev_get_drvdata(dev);
+ unsigned long timeout;
+ int ret;
+ u32 n;
+
+ ret = regmap_read(apb->regs, APB_EHB_TIMEOUT, &n);
+ if (ret)
+ return ret;
+
+ timeout = bt1_apb_n_to_timeout_us(apb, n);
+
+ return scnprintf(buf, PAGE_SIZE, "%lu\n", timeout);
+}
+
+static ssize_t timeout_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct bt1_apb *apb = dev_get_drvdata(dev);
+ unsigned long timeout;
+ int ret;
+ u32 n;
+
+ if (kstrtoul(buf, 0, &timeout) < 0)
+ return -EINVAL;
+
+ n = bt1_apb_timeout_to_n_us(apb, timeout);
+ n = clamp(n, APB_EHB_TIMEOUT_MIN, APB_EHB_TIMEOUT_MAX);
+
+ ret = regmap_write(apb->regs, APB_EHB_TIMEOUT, n);
+
+ return ret ?: count;
+}
+static DEVICE_ATTR_RW(timeout);
+
+static ssize_t inject_error_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "Error injection: nodev irq\n");
+}
+
+static ssize_t inject_error_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *data, size_t count)
+{
+ struct bt1_apb *apb = dev_get_drvdata(dev);
+
+ /*
+ * Either dummy read from the unmapped address in the APB IO area
+ * or manually set the IRQ status.
+ */
+ if (sysfs_streq(data, "nodev"))
+ readl(apb->res);
+ else if (sysfs_streq(data, "irq"))
+ regmap_update_bits(apb->regs, APB_EHB_ISR, APB_EHB_ISR_PENDING,
+ APB_EHB_ISR_PENDING);
+ else
+ return -EINVAL;
+
+ return count;
+}
+static DEVICE_ATTR_RW(inject_error);
+
+static struct attribute *bt1_apb_sysfs_attrs[] = {
+ &dev_attr_count.attr,
+ &dev_attr_timeout.attr,
+ &dev_attr_inject_error.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(bt1_apb_sysfs);
+
+static void bt1_apb_remove_sysfs(void *data)
+{
+ struct bt1_apb *apb = data;
+
+ device_remove_groups(apb->dev, bt1_apb_sysfs_groups);
+}
+
+static int bt1_apb_init_sysfs(struct bt1_apb *apb)
+{
+ int ret;
+
+ ret = device_add_groups(apb->dev, bt1_apb_sysfs_groups);
+ if (ret) {
+ dev_err(apb->dev, "Failed to create EHB APB sysfs nodes\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(apb->dev, bt1_apb_remove_sysfs, apb);
+ if (ret)
+ dev_err(apb->dev, "Can't add APB EHB sysfs remove action\n");
+
+ return ret;
+}
+
+static int bt1_apb_probe(struct platform_device *pdev)
+{
+ struct bt1_apb *apb;
+ int ret;
+
+ apb = bt1_apb_create_data(pdev);
+ if (IS_ERR(apb))
+ return PTR_ERR(apb);
+
+ ret = bt1_apb_request_regs(apb);
+ if (ret)
+ return ret;
+
+ ret = bt1_apb_request_rst(apb);
+ if (ret)
+ return ret;
+
+ ret = bt1_apb_request_clk(apb);
+ if (ret)
+ return ret;
+
+ ret = bt1_apb_request_irq(apb);
+ if (ret)
+ return ret;
+
+ ret = bt1_apb_init_sysfs(apb);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct of_device_id bt1_apb_of_match[] = {
+ { .compatible = "baikal,bt1-apb" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, bt1_apb_of_match);
+
+static struct platform_driver bt1_apb_driver = {
+ .probe = bt1_apb_probe,
+ .driver = {
+ .name = "bt1-apb",
+ .of_match_table = bt1_apb_of_match
+ }
+};
+module_platform_driver(bt1_apb_driver);
+
+MODULE_AUTHOR("Serge Semin <Sergey.Semin@baikalelectronics.ru>");
+MODULE_DESCRIPTION("Baikal-T1 APB-bus driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/bt1-axi.c b/drivers/bus/bt1-axi.c
new file mode 100644
index 000000000000..70e49a6e5374
--- /dev/null
+++ b/drivers/bus/bt1-axi.c
@@ -0,0 +1,312 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Authors:
+ * Serge Semin <Sergey.Semin@baikalelectronics.ru>
+ *
+ * Baikal-T1 AXI-bus driver
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/atomic.h>
+#include <linux/regmap.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/nmi.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/reset.h>
+#include <linux/sysfs.h>
+
+#define BT1_AXI_WERRL 0x110
+#define BT1_AXI_WERRH 0x114
+#define BT1_AXI_WERRH_TYPE BIT(23)
+#define BT1_AXI_WERRH_ADDR_FLD 24
+#define BT1_AXI_WERRH_ADDR_MASK GENMASK(31, BT1_AXI_WERRH_ADDR_FLD)
+
+/*
+ * struct bt1_axi - Baikal-T1 AXI-bus private data
+ * @dev: Pointer to the device structure.
+ * @qos_regs: AXI Interconnect QoS tuning registers.
+ * @sys_regs: Baikal-T1 System Controller registers map.
+ * @irq: Errors IRQ number.
+ * @aclk: AXI reference clock.
+ * @arst: AXI Interconnect reset line.
+ * @count: Number of errors detected.
+ */
+struct bt1_axi {
+ struct device *dev;
+
+ void __iomem *qos_regs;
+ struct regmap *sys_regs;
+ int irq;
+
+ struct clk *aclk;
+
+ struct reset_control *arst;
+
+ atomic_t count;
+};
+
+static irqreturn_t bt1_axi_isr(int irq, void *data)
+{
+ struct bt1_axi *axi = data;
+ u32 low = 0, high = 0;
+
+ regmap_read(axi->sys_regs, BT1_AXI_WERRL, &low);
+ regmap_read(axi->sys_regs, BT1_AXI_WERRH, &high);
+
+ dev_crit_ratelimited(axi->dev,
+ "AXI-bus fault %d: %s at 0x%x%08x\n",
+ atomic_inc_return(&axi->count),
+ high & BT1_AXI_WERRH_TYPE ? "no slave" : "slave protocol error",
+ high, low);
+
+ /*
+ * Print backtrace on each CPU. This might be pointless if the fault
+ * has happened on the same CPU as the IRQ handler is executed or
+ * the other core proceeded further execution despite the error.
+ * But if it's not, by looking at the trace we would get straight to
+ * the cause of the problem.
+ */
+ trigger_all_cpu_backtrace();
+
+ return IRQ_HANDLED;
+}
+
+static void bt1_axi_clear_data(void *data)
+{
+ struct bt1_axi *axi = data;
+ struct platform_device *pdev = to_platform_device(axi->dev);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
+static struct bt1_axi *bt1_axi_create_data(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bt1_axi *axi;
+ int ret;
+
+ axi = devm_kzalloc(dev, sizeof(*axi), GFP_KERNEL);
+ if (!axi)
+ return ERR_PTR(-ENOMEM);
+
+ ret = devm_add_action(dev, bt1_axi_clear_data, axi);
+ if (ret) {
+ dev_err(dev, "Can't add AXI EHB data clear action\n");
+ return ERR_PTR(ret);
+ }
+
+ axi->dev = dev;
+ atomic_set(&axi->count, 0);
+ platform_set_drvdata(pdev, axi);
+
+ return axi;
+}
+
+static int bt1_axi_request_regs(struct bt1_axi *axi)
+{
+ struct platform_device *pdev = to_platform_device(axi->dev);
+ struct device *dev = axi->dev;
+
+ axi->sys_regs = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon");
+ if (IS_ERR(axi->sys_regs)) {
+ dev_err(dev, "Couldn't find syscon registers\n");
+ return PTR_ERR(axi->sys_regs);
+ }
+
+ axi->qos_regs = devm_platform_ioremap_resource_byname(pdev, "qos");
+ if (IS_ERR(axi->qos_regs))
+ dev_err(dev, "Couldn't map AXI-bus QoS registers\n");
+
+ return PTR_ERR_OR_ZERO(axi->qos_regs);
+}
+
+static int bt1_axi_request_rst(struct bt1_axi *axi)
+{
+ int ret;
+
+ axi->arst = devm_reset_control_get_optional_exclusive(axi->dev, "arst");
+ if (IS_ERR(axi->arst))
+ return dev_err_probe(axi->dev, PTR_ERR(axi->arst),
+ "Couldn't get reset control line\n");
+
+ ret = reset_control_deassert(axi->arst);
+ if (ret)
+ dev_err(axi->dev, "Failed to deassert the reset line\n");
+
+ return ret;
+}
+
+static void bt1_axi_disable_clk(void *data)
+{
+ struct bt1_axi *axi = data;
+
+ clk_disable_unprepare(axi->aclk);
+}
+
+static int bt1_axi_request_clk(struct bt1_axi *axi)
+{
+ int ret;
+
+ axi->aclk = devm_clk_get(axi->dev, "aclk");
+ if (IS_ERR(axi->aclk))
+ return dev_err_probe(axi->dev, PTR_ERR(axi->aclk),
+ "Couldn't get AXI Interconnect clock\n");
+
+ ret = clk_prepare_enable(axi->aclk);
+ if (ret) {
+ dev_err(axi->dev, "Couldn't enable the AXI clock\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(axi->dev, bt1_axi_disable_clk, axi);
+ if (ret)
+ dev_err(axi->dev, "Can't add AXI clock disable action\n");
+
+ return ret;
+}
+
+static int bt1_axi_request_irq(struct bt1_axi *axi)
+{
+ struct platform_device *pdev = to_platform_device(axi->dev);
+ int ret;
+
+ axi->irq = platform_get_irq(pdev, 0);
+ if (axi->irq < 0)
+ return axi->irq;
+
+ ret = devm_request_irq(axi->dev, axi->irq, bt1_axi_isr, IRQF_SHARED,
+ "bt1-axi", axi);
+ if (ret)
+ dev_err(axi->dev, "Couldn't request AXI EHB IRQ\n");
+
+ return ret;
+}
+
+static ssize_t count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bt1_axi *axi = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&axi->count));
+}
+static DEVICE_ATTR_RO(count);
+
+static ssize_t inject_error_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "Error injection: bus unaligned\n");
+}
+
+static ssize_t inject_error_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *data, size_t count)
+{
+ struct bt1_axi *axi = dev_get_drvdata(dev);
+
+ /*
+ * Performing unaligned read from the memory will cause the CM2 bus
+ * error while unaligned writing - the AXI bus write error handled
+ * by this driver.
+ */
+ if (sysfs_streq(data, "bus"))
+ readb(axi->qos_regs);
+ else if (sysfs_streq(data, "unaligned"))
+ writeb(0, axi->qos_regs);
+ else
+ return -EINVAL;
+
+ return count;
+}
+static DEVICE_ATTR_RW(inject_error);
+
+static struct attribute *bt1_axi_sysfs_attrs[] = {
+ &dev_attr_count.attr,
+ &dev_attr_inject_error.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(bt1_axi_sysfs);
+
+static void bt1_axi_remove_sysfs(void *data)
+{
+ struct bt1_axi *axi = data;
+
+ device_remove_groups(axi->dev, bt1_axi_sysfs_groups);
+}
+
+static int bt1_axi_init_sysfs(struct bt1_axi *axi)
+{
+ int ret;
+
+ ret = device_add_groups(axi->dev, bt1_axi_sysfs_groups);
+ if (ret) {
+ dev_err(axi->dev, "Failed to add sysfs files group\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(axi->dev, bt1_axi_remove_sysfs, axi);
+ if (ret)
+ dev_err(axi->dev, "Can't add AXI EHB sysfs remove action\n");
+
+ return ret;
+}
+
+static int bt1_axi_probe(struct platform_device *pdev)
+{
+ struct bt1_axi *axi;
+ int ret;
+
+ axi = bt1_axi_create_data(pdev);
+ if (IS_ERR(axi))
+ return PTR_ERR(axi);
+
+ ret = bt1_axi_request_regs(axi);
+ if (ret)
+ return ret;
+
+ ret = bt1_axi_request_rst(axi);
+ if (ret)
+ return ret;
+
+ ret = bt1_axi_request_clk(axi);
+ if (ret)
+ return ret;
+
+ ret = bt1_axi_request_irq(axi);
+ if (ret)
+ return ret;
+
+ ret = bt1_axi_init_sysfs(axi);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct of_device_id bt1_axi_of_match[] = {
+ { .compatible = "baikal,bt1-axi" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, bt1_axi_of_match);
+
+static struct platform_driver bt1_axi_driver = {
+ .probe = bt1_axi_probe,
+ .driver = {
+ .name = "bt1-axi",
+ .of_match_table = bt1_axi_of_match
+ }
+};
+module_platform_driver(bt1_axi_driver);
+
+MODULE_AUTHOR("Serge Semin <Sergey.Semin@baikalelectronics.ru>");
+MODULE_DESCRIPTION("Baikal-T1 AXI-bus driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/fsl-mc/Kconfig b/drivers/bus/fsl-mc/Kconfig
index c23c77c9b705..b1fd55901c50 100644
--- a/drivers/bus/fsl-mc/Kconfig
+++ b/drivers/bus/fsl-mc/Kconfig
@@ -14,3 +14,10 @@ config FSL_MC_BUS
architecture. The fsl-mc bus driver handles discovery of
DPAA2 objects (which are represented as Linux devices) and
binding objects to drivers.
+
+config FSL_MC_UAPI_SUPPORT
+ bool "Management Complex (MC) userspace support"
+ depends on FSL_MC_BUS
+ help
+ Provides userspace support for interrogating, creating, destroying or
+ configuring DPAA2 objects exported by the Management Complex.
diff --git a/drivers/bus/fsl-mc/Makefile b/drivers/bus/fsl-mc/Makefile
index 3c518c7e8374..892946245527 100644
--- a/drivers/bus/fsl-mc/Makefile
+++ b/drivers/bus/fsl-mc/Makefile
@@ -15,4 +15,8 @@ mc-bus-driver-objs := fsl-mc-bus.o \
dprc-driver.o \
fsl-mc-allocator.o \
fsl-mc-msi.o \
- dpmcp.o
+ dpmcp.o \
+ obj-api.o
+
+# MC userspace support
+obj-$(CONFIG_FSL_MC_UAPI_SUPPORT) += fsl-mc-uapi.o
diff --git a/drivers/bus/fsl-mc/dprc-driver.c b/drivers/bus/fsl-mc/dprc-driver.c
index c8b1c3842c1a..5e70f9775a0e 100644
--- a/drivers/bus/fsl-mc/dprc-driver.c
+++ b/drivers/bus/fsl-mc/dprc-driver.c
@@ -3,6 +3,7 @@
* Freescale data path resource container (DPRC) driver
*
* Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ * Copyright 2019-2020 NXP
* Author: German Rivera <German.Rivera@freescale.com>
*
*/
@@ -27,7 +28,16 @@ static bool fsl_mc_device_match(struct fsl_mc_device *mc_dev,
{
return mc_dev->obj_desc.id == obj_desc->id &&
strcmp(mc_dev->obj_desc.type, obj_desc->type) == 0;
+}
+static bool fsl_mc_obj_desc_is_allocatable(struct fsl_mc_obj_desc *obj)
+{
+ if (strcmp(obj->type, "dpmcp") == 0 ||
+ strcmp(obj->type, "dpcon") == 0 ||
+ strcmp(obj->type, "dpbp") == 0)
+ return true;
+ else
+ return false;
}
static int __fsl_mc_device_remove_if_not_in_mc(struct device *dev, void *data)
@@ -71,9 +81,9 @@ static int __fsl_mc_device_remove(struct device *dev, void *data)
* the MC by removing devices that represent MC objects that have
* been dynamically removed in the physical DPRC.
*/
-static void dprc_remove_devices(struct fsl_mc_device *mc_bus_dev,
- struct fsl_mc_obj_desc *obj_desc_array,
- int num_child_objects_in_mc)
+void dprc_remove_devices(struct fsl_mc_device *mc_bus_dev,
+ struct fsl_mc_obj_desc *obj_desc_array,
+ int num_child_objects_in_mc)
{
if (num_child_objects_in_mc != 0) {
/*
@@ -95,6 +105,7 @@ static void dprc_remove_devices(struct fsl_mc_device *mc_bus_dev,
__fsl_mc_device_remove);
}
}
+EXPORT_SYMBOL_GPL(dprc_remove_devices);
static int __fsl_mc_device_match(struct device *dev, void *data)
{
@@ -150,6 +161,27 @@ static void check_plugged_state_change(struct fsl_mc_device *mc_dev,
}
}
+static void fsl_mc_obj_device_add(struct fsl_mc_device *mc_bus_dev,
+ struct fsl_mc_obj_desc *obj_desc)
+{
+ int error;
+ struct fsl_mc_device *child_dev;
+
+ /*
+ * Check if device is already known to Linux:
+ */
+ child_dev = fsl_mc_device_lookup(obj_desc, mc_bus_dev);
+ if (child_dev) {
+ check_plugged_state_change(child_dev, obj_desc);
+ put_device(&child_dev->dev);
+ } else {
+ error = fsl_mc_device_add(obj_desc, NULL, &mc_bus_dev->dev,
+ &child_dev);
+ if (error < 0)
+ return;
+ }
+}
+
/**
* dprc_add_new_devices - Adds devices to the logical bus for a DPRC
*
@@ -166,30 +198,23 @@ static void dprc_add_new_devices(struct fsl_mc_device *mc_bus_dev,
struct fsl_mc_obj_desc *obj_desc_array,
int num_child_objects_in_mc)
{
- int error;
int i;
+ /* probe the allocable objects first */
for (i = 0; i < num_child_objects_in_mc; i++) {
- struct fsl_mc_device *child_dev;
struct fsl_mc_obj_desc *obj_desc = &obj_desc_array[i];
- if (strlen(obj_desc->type) == 0)
- continue;
+ if (strlen(obj_desc->type) > 0 &&
+ fsl_mc_obj_desc_is_allocatable(obj_desc))
+ fsl_mc_obj_device_add(mc_bus_dev, obj_desc);
+ }
- /*
- * Check if device is already known to Linux:
- */
- child_dev = fsl_mc_device_lookup(obj_desc, mc_bus_dev);
- if (child_dev) {
- check_plugged_state_change(child_dev, obj_desc);
- put_device(&child_dev->dev);
- continue;
- }
+ for (i = 0; i < num_child_objects_in_mc; i++) {
+ struct fsl_mc_obj_desc *obj_desc = &obj_desc_array[i];
- error = fsl_mc_device_add(obj_desc, NULL, &mc_bus_dev->dev,
- &child_dev);
- if (error < 0)
- continue;
+ if (strlen(obj_desc->type) > 0 &&
+ !fsl_mc_obj_desc_is_allocatable(obj_desc))
+ fsl_mc_obj_device_add(mc_bus_dev, obj_desc);
}
}
@@ -197,8 +222,8 @@ static void dprc_add_new_devices(struct fsl_mc_device *mc_bus_dev,
* dprc_scan_objects - Discover objects in a DPRC
*
* @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
- * @total_irq_count: If argument is provided the function populates the
- * total number of IRQs created by objects in the DPRC.
+ * @alloc_interrupts: if true the function allocates the interrupt pool,
+ * otherwise the interrupt allocation is delayed
*
* Detects objects added and removed from a DPRC and synchronizes the
* state of the Linux bus driver, MC by adding and removing
@@ -212,8 +237,8 @@ static void dprc_add_new_devices(struct fsl_mc_device *mc_bus_dev,
* populated before they can get allocation requests from probe callbacks
* of the device drivers for the non-allocatable devices.
*/
-static int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
- unsigned int *total_irq_count)
+int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
+ bool alloc_interrupts)
{
int num_child_objects;
int dprc_get_obj_failures;
@@ -294,22 +319,21 @@ static int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
* Allocate IRQ's before binding the scanned devices with their
* respective drivers.
*/
- if (dev_get_msi_domain(&mc_bus_dev->dev) && !mc_bus->irq_resources) {
+ if (dev_get_msi_domain(&mc_bus_dev->dev)) {
if (irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS) {
dev_warn(&mc_bus_dev->dev,
"IRQs needed (%u) exceed IRQs preallocated (%u)\n",
irq_count, FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
}
- error = fsl_mc_populate_irq_pool(mc_bus,
- FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
- if (error < 0)
- return error;
+ if (alloc_interrupts && !mc_bus->irq_resources) {
+ error = fsl_mc_populate_irq_pool(mc_bus_dev,
+ FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
+ if (error < 0)
+ return error;
+ }
}
- if (total_irq_count)
- *total_irq_count = irq_count;
-
dprc_remove_devices(mc_bus_dev, child_obj_desc_array,
num_child_objects);
@@ -326,14 +350,16 @@ static int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
* dprc_scan_container - Scans a physical DPRC and synchronizes Linux bus state
*
* @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
- *
+ * @alloc_interrupts: if true the function allocates the interrupt pool,
+ * otherwise the interrupt allocation is delayed
* Scans the physical DPRC and synchronizes the state of the Linux
* bus driver with the actual state of the MC by adding and removing
* devices as appropriate.
*/
-static int dprc_scan_container(struct fsl_mc_device *mc_bus_dev)
+int dprc_scan_container(struct fsl_mc_device *mc_bus_dev,
+ bool alloc_interrupts)
{
- int error;
+ int error = 0;
struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
fsl_mc_init_all_resource_pools(mc_bus_dev);
@@ -342,20 +368,17 @@ static int dprc_scan_container(struct fsl_mc_device *mc_bus_dev)
* Discover objects in the DPRC:
*/
mutex_lock(&mc_bus->scan_mutex);
- error = dprc_scan_objects(mc_bus_dev, NULL);
+ error = dprc_scan_objects(mc_bus_dev, alloc_interrupts);
mutex_unlock(&mc_bus->scan_mutex);
- if (error < 0) {
- fsl_mc_cleanup_all_resource_pools(mc_bus_dev);
- return error;
- }
- return 0;
+ return error;
}
+EXPORT_SYMBOL_GPL(dprc_scan_container);
/**
* dprc_irq0_handler - Regular ISR for DPRC interrupt 0
*
- * @irq: IRQ number of the interrupt being handled
+ * @irq_num: IRQ number of the interrupt being handled
* @arg: Pointer to device structure
*/
static irqreturn_t dprc_irq0_handler(int irq_num, void *arg)
@@ -366,7 +389,7 @@ static irqreturn_t dprc_irq0_handler(int irq_num, void *arg)
/**
* dprc_irq0_handler_thread - Handler thread function for DPRC interrupt 0
*
- * @irq: IRQ number of the interrupt being handled
+ * @irq_num: IRQ number of the interrupt being handled
* @arg: Pointer to device structure
*/
static irqreturn_t dprc_irq0_handler_thread(int irq_num, void *arg)
@@ -377,7 +400,7 @@ static irqreturn_t dprc_irq0_handler_thread(int irq_num, void *arg)
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
struct fsl_mc_io *mc_io = mc_dev->mc_io;
- struct msi_desc *msi_desc = mc_dev->irqs[0]->msi_desc;
+ int irq = mc_dev->irqs[0]->virq;
dev_dbg(dev, "DPRC IRQ %d triggered on CPU %u\n",
irq_num, smp_processor_id());
@@ -386,7 +409,7 @@ static irqreturn_t dprc_irq0_handler_thread(int irq_num, void *arg)
return IRQ_HANDLED;
mutex_lock(&mc_bus->scan_mutex);
- if (!msi_desc || msi_desc->irq != (u32)irq_num)
+ if (irq != (u32)irq_num)
goto out;
status = 0;
@@ -411,9 +434,8 @@ static irqreturn_t dprc_irq0_handler_thread(int irq_num, void *arg)
DPRC_IRQ_EVENT_CONTAINER_DESTROYED |
DPRC_IRQ_EVENT_OBJ_DESTROYED |
DPRC_IRQ_EVENT_OBJ_CREATED)) {
- unsigned int irq_count;
- error = dprc_scan_objects(mc_dev, &irq_count);
+ error = dprc_scan_objects(mc_dev, true);
if (error < 0) {
/*
* If the error is -ENXIO, we ignore it, as it indicates
@@ -428,12 +450,6 @@ static irqreturn_t dprc_irq0_handler_thread(int irq_num, void *arg)
goto out;
}
-
- if (irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS) {
- dev_warn(dev,
- "IRQs needed (%u) exceed IRQs preallocated (%u)\n",
- irq_count, FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
- }
}
out:
@@ -444,8 +460,9 @@ out:
/*
* Disable and clear interrupt for a given DPRC object
*/
-static int disable_dprc_irq(struct fsl_mc_device *mc_dev)
+int disable_dprc_irq(struct fsl_mc_device *mc_dev)
{
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
int error;
struct fsl_mc_io *mc_io = mc_dev->mc_io;
@@ -482,9 +499,18 @@ static int disable_dprc_irq(struct fsl_mc_device *mc_dev)
return error;
}
+ mc_bus->irq_enabled = 0;
+
return 0;
}
+int get_dprc_irq_state(struct fsl_mc_device *mc_dev)
+{
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
+
+ return mc_bus->irq_enabled;
+}
+
static int register_dprc_irq_handler(struct fsl_mc_device *mc_dev)
{
int error;
@@ -495,7 +521,7 @@ static int register_dprc_irq_handler(struct fsl_mc_device *mc_dev)
* function that programs the MSI physically in the device
*/
error = devm_request_threaded_irq(&mc_dev->dev,
- irq->msi_desc->irq,
+ irq->virq,
dprc_irq0_handler,
dprc_irq0_handler_thread,
IRQF_NO_SUSPEND | IRQF_ONESHOT,
@@ -511,8 +537,9 @@ static int register_dprc_irq_handler(struct fsl_mc_device *mc_dev)
return 0;
}
-static int enable_dprc_irq(struct fsl_mc_device *mc_dev)
+int enable_dprc_irq(struct fsl_mc_device *mc_dev)
{
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
int error;
/*
@@ -540,6 +567,8 @@ static int enable_dprc_irq(struct fsl_mc_device *mc_dev)
return error;
}
+ mc_bus->irq_enabled = 1;
+
return 0;
}
@@ -574,24 +603,25 @@ error_free_irqs:
}
/**
- * dprc_probe - callback invoked when a DPRC is being bound to this driver
+ * dprc_setup - opens and creates a mc_io for DPRC
*
* @mc_dev: Pointer to fsl-mc device representing a DPRC
*
* It opens the physical DPRC in the MC.
- * It scans the DPRC to discover the MC objects contained in it.
- * It creates the interrupt pool for the MC bus associated with the DPRC.
- * It configures the interrupts for the DPRC device itself.
+ * It configures the DPRC portal used to communicate with MC
*/
-static int dprc_probe(struct fsl_mc_device *mc_dev)
+
+int dprc_setup(struct fsl_mc_device *mc_dev)
{
- int error;
- size_t region_size;
struct device *parent_dev = mc_dev->dev.parent;
struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
+ struct irq_domain *mc_msi_domain;
bool mc_io_created = false;
bool msi_domain_set = false;
+ bool uapi_created = false;
u16 major_ver, minor_ver;
+ size_t region_size;
+ int error;
if (!is_fsl_mc_bus_dprc(mc_dev))
return -EINVAL;
@@ -621,31 +651,20 @@ static int dprc_probe(struct fsl_mc_device *mc_dev)
return error;
mc_io_created = true;
-
- /*
- * Inherit parent MSI domain:
- */
- dev_set_msi_domain(&mc_dev->dev,
- dev_get_msi_domain(parent_dev));
- msi_domain_set = true;
} else {
- /*
- * This is a root DPRC
- */
- struct irq_domain *mc_msi_domain;
-
- if (dev_is_fsl_mc(parent_dev))
- return -EINVAL;
+ error = fsl_mc_uapi_create_device_file(mc_bus);
+ if (error < 0)
+ return -EPROBE_DEFER;
+ uapi_created = true;
+ }
- error = fsl_mc_find_msi_domain(parent_dev,
- &mc_msi_domain);
- if (error < 0) {
- dev_warn(&mc_dev->dev,
- "WARNING: MC bus without interrupt support\n");
- } else {
- dev_set_msi_domain(&mc_dev->dev, mc_msi_domain);
- msi_domain_set = true;
- }
+ mc_msi_domain = fsl_mc_find_msi_domain(&mc_dev->dev);
+ if (!mc_msi_domain) {
+ dev_warn(&mc_dev->dev,
+ "WARNING: MC bus without interrupt support\n");
+ } else {
+ dev_set_msi_domain(&mc_dev->dev, mc_msi_domain);
+ msi_domain_set = true;
}
error = dprc_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id,
@@ -672,9 +691,7 @@ static int dprc_probe(struct fsl_mc_device *mc_dev)
goto error_cleanup_open;
}
- if (major_ver < DPRC_MIN_VER_MAJOR ||
- (major_ver == DPRC_MIN_VER_MAJOR &&
- minor_ver < DPRC_MIN_VER_MINOR)) {
+ if (major_ver < DPRC_MIN_VER_MAJOR) {
dev_err(&mc_dev->dev,
"ERROR: DPRC version %d.%d not supported\n",
major_ver, minor_ver);
@@ -682,37 +699,66 @@ static int dprc_probe(struct fsl_mc_device *mc_dev)
goto error_cleanup_open;
}
- mutex_init(&mc_bus->scan_mutex);
+ return 0;
+
+error_cleanup_open:
+ (void)dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
+
+error_cleanup_msi_domain:
+ if (msi_domain_set)
+ dev_set_msi_domain(&mc_dev->dev, NULL);
+
+ if (mc_io_created) {
+ fsl_destroy_mc_io(mc_dev->mc_io);
+ mc_dev->mc_io = NULL;
+ }
+
+ if (uapi_created)
+ fsl_mc_uapi_remove_device_file(mc_bus);
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(dprc_setup);
+
+/**
+ * dprc_probe - callback invoked when a DPRC is being bound to this driver
+ *
+ * @mc_dev: Pointer to fsl-mc device representing a DPRC
+ *
+ * It opens the physical DPRC in the MC.
+ * It scans the DPRC to discover the MC objects contained in it.
+ * It creates the interrupt pool for the MC bus associated with the DPRC.
+ * It configures the interrupts for the DPRC device itself.
+ */
+static int dprc_probe(struct fsl_mc_device *mc_dev)
+{
+ int error;
+
+ error = dprc_setup(mc_dev);
+ if (error < 0)
+ return error;
/*
* Discover MC objects in DPRC object:
*/
- error = dprc_scan_container(mc_dev);
+ error = dprc_scan_container(mc_dev, true);
if (error < 0)
- goto error_cleanup_open;
+ goto dprc_cleanup;
/*
* Configure interrupt for the DPRC object associated with this MC bus:
*/
error = dprc_setup_irq(mc_dev);
if (error < 0)
- goto error_cleanup_open;
+ goto scan_cleanup;
dev_info(&mc_dev->dev, "DPRC device bound to driver");
return 0;
-error_cleanup_open:
- (void)dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
-
-error_cleanup_msi_domain:
- if (msi_domain_set)
- dev_set_msi_domain(&mc_dev->dev, NULL);
-
- if (mc_io_created) {
- fsl_destroy_mc_io(mc_dev->mc_io);
- mc_dev->mc_io = NULL;
- }
-
+scan_cleanup:
+ device_for_each_child(&mc_dev->dev, NULL, __fsl_mc_device_remove);
+dprc_cleanup:
+ dprc_cleanup(mc_dev);
return error;
}
@@ -725,46 +771,46 @@ static void dprc_teardown_irq(struct fsl_mc_device *mc_dev)
(void)disable_dprc_irq(mc_dev);
- devm_free_irq(&mc_dev->dev, irq->msi_desc->irq, &mc_dev->dev);
+ devm_free_irq(&mc_dev->dev, irq->virq, &mc_dev->dev);
fsl_mc_free_irqs(mc_dev);
}
/**
- * dprc_remove - callback invoked when a DPRC is being unbound from this driver
+ * dprc_cleanup - function that cleanups a DPRC
*
* @mc_dev: Pointer to fsl-mc device representing the DPRC
*
- * It removes the DPRC's child objects from Linux (not from the MC) and
- * closes the DPRC device in the MC.
- * It tears down the interrupts that were configured for the DPRC device.
+ * It closes the DPRC device in the MC.
* It destroys the interrupt pool associated with this MC bus.
*/
-static int dprc_remove(struct fsl_mc_device *mc_dev)
+
+int dprc_cleanup(struct fsl_mc_device *mc_dev)
{
- int error;
struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
+ int error;
+ /* this function should be called only for DPRCs, it
+ * is an error to call it for regular objects
+ */
if (!is_fsl_mc_bus_dprc(mc_dev))
return -EINVAL;
- if (!mc_dev->mc_io)
- return -EINVAL;
-
- if (!mc_bus->irq_resources)
- return -EINVAL;
-
- if (dev_get_msi_domain(&mc_dev->dev))
- dprc_teardown_irq(mc_dev);
-
- device_for_each_child(&mc_dev->dev, NULL, __fsl_mc_device_remove);
if (dev_get_msi_domain(&mc_dev->dev)) {
- fsl_mc_cleanup_irq_pool(mc_bus);
+ fsl_mc_cleanup_irq_pool(mc_dev);
dev_set_msi_domain(&mc_dev->dev, NULL);
}
fsl_mc_cleanup_all_resource_pools(mc_dev);
+ /* if this step fails we cannot go further with cleanup as there is no way of
+ * communicating with the firmware
+ */
+ if (!mc_dev->mc_io) {
+ dev_err(&mc_dev->dev, "mc_io is NULL, tear down cannot be performed in firmware\n");
+ return -EINVAL;
+ }
+
error = dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
if (error < 0)
dev_err(&mc_dev->dev, "dprc_close() failed: %d\n", error);
@@ -772,8 +818,41 @@ static int dprc_remove(struct fsl_mc_device *mc_dev)
if (!fsl_mc_is_root_dprc(&mc_dev->dev)) {
fsl_destroy_mc_io(mc_dev->mc_io);
mc_dev->mc_io = NULL;
+ } else {
+ fsl_mc_uapi_remove_device_file(mc_bus);
}
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dprc_cleanup);
+
+/**
+ * dprc_remove - callback invoked when a DPRC is being unbound from this driver
+ *
+ * @mc_dev: Pointer to fsl-mc device representing the DPRC
+ *
+ * It removes the DPRC's child objects from Linux (not from the MC) and
+ * closes the DPRC device in the MC.
+ * It tears down the interrupts that were configured for the DPRC device.
+ * It destroys the interrupt pool associated with this MC bus.
+ */
+static int dprc_remove(struct fsl_mc_device *mc_dev)
+{
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
+
+ if (!is_fsl_mc_bus_dprc(mc_dev))
+ return -EINVAL;
+
+ if (!mc_bus->irq_resources)
+ return -EINVAL;
+
+ if (dev_get_msi_domain(&mc_dev->dev))
+ dprc_teardown_irq(mc_dev);
+
+ device_for_each_child(&mc_dev->dev, NULL, __fsl_mc_device_remove);
+
+ dprc_cleanup(mc_dev);
+
dev_info(&mc_dev->dev, "DPRC device unbound from driver");
return 0;
}
diff --git a/drivers/bus/fsl-mc/dprc.c b/drivers/bus/fsl-mc/dprc.c
index 602f030d84eb..d129338b8bc0 100644
--- a/drivers/bus/fsl-mc/dprc.c
+++ b/drivers/bus/fsl-mc/dprc.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2020 NXP
*
*/
#include <linux/kernel.h>
@@ -8,6 +9,13 @@
#include "fsl-mc-private.h"
+/*
+ * cache the DPRC version to reduce the number of commands
+ * towards the mc firmware
+ */
+static u16 dprc_major_ver;
+static u16 dprc_minor_ver;
+
/**
* dprc_open() - Open DPRC object for use
* @mc_io: Pointer to MC portal's I/O object
@@ -73,6 +81,77 @@ int dprc_close(struct fsl_mc_io *mc_io,
EXPORT_SYMBOL_GPL(dprc_close);
/**
+ * dprc_reset_container - Reset child container.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @child_container_id: ID of the container to reset
+ * @options: 32 bit options:
+ * - 0 (no bits set) - all the objects inside the container are
+ * reset. The child containers are entered recursively and the
+ * objects reset. All the objects (including the child containers)
+ * are closed.
+ * - bit 0 set - all the objects inside the container are reset.
+ * However the child containers are not entered recursively.
+ * This option is supported for API versions >= 6.5
+ * In case a software context crashes or becomes non-responsive, the parent
+ * may wish to reset its resources container before the software context is
+ * restarted.
+ *
+ * This routine informs all objects assigned to the child container that the
+ * container is being reset, so they may perform any cleanup operations that are
+ * needed. All objects handles that were owned by the child container shall be
+ * closed.
+ *
+ * Note that such request may be submitted even if the child software context
+ * has not crashed, but the resulting object cleanup operations will not be
+ * aware of that.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_reset_container(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int child_container_id,
+ u32 options)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dprc_cmd_reset_container *cmd_params;
+ u32 cmdid = DPRC_CMDID_RESET_CONT;
+ int err;
+
+ /*
+ * If the DPRC object version was not yet cached, cache it now.
+ * Otherwise use the already cached value.
+ */
+ if (!dprc_major_ver && !dprc_minor_ver) {
+ err = dprc_get_api_version(mc_io, 0,
+ &dprc_major_ver,
+ &dprc_minor_ver);
+ if (err)
+ return err;
+ }
+
+ /*
+ * MC API 6.5 introduced a new field in the command used to pass
+ * some flags.
+ * Bit 0 indicates that the child containers are not recursively reset.
+ */
+ if (dprc_major_ver > 6 || (dprc_major_ver == 6 && dprc_minor_ver >= 5))
+ cmdid = DPRC_CMDID_RESET_CONT_V2;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(cmdid, cmd_flags, token);
+ cmd_params = (struct dprc_cmd_reset_container *)cmd.params;
+ cmd_params->child_container_id = cpu_to_le32(child_container_id);
+ cmd_params->options = cpu_to_le32(options);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dprc_reset_container);
+
+/**
* dprc_set_irq() - Set IRQ information for the DPRC to trigger an interrupt.
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
@@ -255,7 +334,7 @@ int dprc_clear_irq_status(struct fsl_mc_io *mc_io,
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPRC object
- * @attributes Returned container attributes
+ * @attr: Returned container attributes
*
* Return: '0' on Success; Error code otherwise.
*/
@@ -281,7 +360,7 @@ int dprc_get_attributes(struct fsl_mc_io *mc_io,
/* retrieve response parameters */
rsp_params = (struct dprc_rsp_get_attributes *)cmd.params;
attr->container_id = le32_to_cpu(rsp_params->container_id);
- attr->icid = le16_to_cpu(rsp_params->icid);
+ attr->icid = le32_to_cpu(rsp_params->icid);
attr->options = le32_to_cpu(rsp_params->options);
attr->portal_id = le32_to_cpu(rsp_params->portal_id);
@@ -425,7 +504,7 @@ EXPORT_SYMBOL_GPL(dprc_set_obj_irq);
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPRC object
- * @obj_type; Object type as returned in dprc_get_obj()
+ * @obj_type: Object type as returned in dprc_get_obj()
* @obj_id: Unique object instance as returned in dprc_get_obj()
* @region_index: The specific region to query
* @region_desc: Returns the requested region descriptor
@@ -443,30 +522,44 @@ int dprc_get_obj_region(struct fsl_mc_io *mc_io,
struct fsl_mc_command cmd = { 0 };
struct dprc_cmd_get_obj_region *cmd_params;
struct dprc_rsp_get_obj_region *rsp_params;
- u16 major_ver, minor_ver;
int err;
- /* prepare command */
- err = dprc_get_api_version(mc_io, 0,
- &major_ver,
- &minor_ver);
- if (err)
- return err;
-
- /**
- * MC API version 6.3 introduced a new field to the region
- * descriptor: base_address. If the older API is in use then the base
- * address is set to zero to indicate it needs to be obtained elsewhere
- * (typically the device tree).
- */
- if (major_ver > 6 || (major_ver == 6 && minor_ver >= 3))
- cmd.header =
- mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG_V2,
- cmd_flags, token);
- else
- cmd.header =
- mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG,
- cmd_flags, token);
+ /*
+ * If the DPRC object version was not yet cached, cache it now.
+ * Otherwise use the already cached value.
+ */
+ if (!dprc_major_ver && !dprc_minor_ver) {
+ err = dprc_get_api_version(mc_io, 0,
+ &dprc_major_ver,
+ &dprc_minor_ver);
+ if (err)
+ return err;
+ }
+
+ if (dprc_major_ver > 6 || (dprc_major_ver == 6 && dprc_minor_ver >= 6)) {
+ /*
+ * MC API version 6.6 changed the size of the MC portals and software
+ * portals to 64K (as implemented by hardware). If older API is in use the
+ * size reported is less (64 bytes for mc portals and 4K for software
+ * portals).
+ */
+
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG_V3,
+ cmd_flags, token);
+
+ } else if (dprc_major_ver == 6 && dprc_minor_ver >= 3) {
+ /*
+ * MC API version 6.3 introduced a new field to the region
+ * descriptor: base_address. If the older API is in use then the base
+ * address is set to zero to indicate it needs to be obtained elsewhere
+ * (typically the device tree).
+ */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG_V2,
+ cmd_flags, token);
+ } else {
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG,
+ cmd_flags, token);
+ }
cmd_params = (struct dprc_cmd_get_obj_region *)cmd.params;
cmd_params->obj_id = cpu_to_le32(obj_id);
@@ -483,7 +576,9 @@ int dprc_get_obj_region(struct fsl_mc_io *mc_io,
rsp_params = (struct dprc_rsp_get_obj_region *)cmd.params;
region_desc->base_offset = le64_to_cpu(rsp_params->base_offset);
region_desc->size = le32_to_cpu(rsp_params->size);
- if (major_ver > 6 || (major_ver == 6 && minor_ver >= 3))
+ region_desc->type = rsp_params->type;
+ region_desc->flags = le32_to_cpu(rsp_params->flags);
+ if (dprc_major_ver > 6 || (dprc_major_ver == 6 && dprc_minor_ver >= 3))
region_desc->base_address = le64_to_cpu(rsp_params->base_addr);
else
region_desc->base_address = 0;
diff --git a/drivers/bus/fsl-mc/fsl-mc-allocator.c b/drivers/bus/fsl-mc/fsl-mc-allocator.c
index cc7bb900f524..dced427ca8ba 100644
--- a/drivers/bus/fsl-mc/fsl-mc-allocator.c
+++ b/drivers/bus/fsl-mc/fsl-mc-allocator.c
@@ -254,7 +254,7 @@ EXPORT_SYMBOL_GPL(fsl_mc_resource_free);
* @mc_dev: fsl-mc device which is used in conjunction with the
* allocated object
* @pool_type: pool type
- * @new_mc_dev: pointer to area where the pointer to the allocated device
+ * @new_mc_adev: pointer to area where the pointer to the allocated device
* is to be returned
*
* Allocatable objects are always used in conjunction with some functional
@@ -292,8 +292,10 @@ int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev,
goto error;
mc_adev = resource->data;
- if (!mc_adev)
+ if (!mc_adev) {
+ error = -EINVAL;
goto error;
+ }
mc_adev->consumer_link = device_link_add(&mc_dev->dev,
&mc_adev->dev,
@@ -344,18 +346,21 @@ EXPORT_SYMBOL_GPL(fsl_mc_object_free);
* Initialize the interrupt pool associated with an fsl-mc bus.
* It allocates a block of IRQs from the GIC-ITS.
*/
-int fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus,
+int fsl_mc_populate_irq_pool(struct fsl_mc_device *mc_bus_dev,
unsigned int irq_count)
{
unsigned int i;
- struct msi_desc *msi_desc;
struct fsl_mc_device_irq *irq_resources;
struct fsl_mc_device_irq *mc_dev_irq;
int error;
- struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev;
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
struct fsl_mc_resource_pool *res_pool =
&mc_bus->resource_pools[FSL_MC_POOL_IRQ];
+ /* do nothing if the IRQ pool is already populated */
+ if (mc_bus->irq_resources)
+ return 0;
+
if (irq_count == 0 ||
irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS)
return -EINVAL;
@@ -382,16 +387,12 @@ int fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus,
mc_dev_irq->resource.type = res_pool->type;
mc_dev_irq->resource.data = mc_dev_irq;
mc_dev_irq->resource.parent_pool = res_pool;
+ mc_dev_irq->virq = msi_get_virq(&mc_bus_dev->dev, i);
+ mc_dev_irq->resource.id = mc_dev_irq->virq;
INIT_LIST_HEAD(&mc_dev_irq->resource.node);
list_add_tail(&mc_dev_irq->resource.node, &res_pool->free_list);
}
- for_each_msi_entry(msi_desc, &mc_bus_dev->dev) {
- mc_dev_irq = &irq_resources[msi_desc->fsl_mc.msi_index];
- mc_dev_irq->msi_desc = msi_desc;
- mc_dev_irq->resource.id = msi_desc->irq;
- }
-
res_pool->max_count = irq_count;
res_pool->free_count = irq_count;
mc_bus->irq_resources = irq_resources;
@@ -403,13 +404,13 @@ cleanup_msi_irqs:
}
EXPORT_SYMBOL_GPL(fsl_mc_populate_irq_pool);
-/**
+/*
* Teardown the interrupt pool associated with an fsl-mc bus.
* It frees the IRQs that were allocated to the pool, back to the GIC-ITS.
*/
-void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus)
+void fsl_mc_cleanup_irq_pool(struct fsl_mc_device *mc_bus_dev)
{
- struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev;
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
struct fsl_mc_resource_pool *res_pool =
&mc_bus->resource_pools[FSL_MC_POOL_IRQ];
@@ -430,7 +431,7 @@ void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus)
}
EXPORT_SYMBOL_GPL(fsl_mc_cleanup_irq_pool);
-/**
+/*
* Allocate the IRQs required by a given fsl-mc device.
*/
int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev)
@@ -572,7 +573,7 @@ void fsl_mc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev)
fsl_mc_cleanup_resource_pool(mc_bus_dev, pool_type);
}
-/**
+/*
* fsl_mc_allocator_probe - callback invoked when an allocatable device is
* being added to the system
*/
@@ -604,7 +605,7 @@ static int fsl_mc_allocator_probe(struct fsl_mc_device *mc_dev)
return 0;
}
-/**
+/*
* fsl_mc_allocator_remove - callback invoked when an allocatable device is
* being removed from the system
*/
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index c78d10ea641f..6143dbf31f31 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -3,6 +3,7 @@
* Freescale Management Complex (MC) bus driver
*
* Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ * Copyright 2019-2020 NXP
* Author: German Rivera <German.Rivera@freescale.com>
*
*/
@@ -18,24 +19,31 @@
#include <linux/bitops.h>
#include <linux/msi.h>
#include <linux/dma-mapping.h>
+#include <linux/acpi.h>
+#include <linux/iommu.h>
+#include <linux/dma-map-ops.h>
#include "fsl-mc-private.h"
-/**
+/*
* Default DMA mask for devices on a fsl-mc bus
*/
#define FSL_MC_DEFAULT_DMA_MASK (~0ULL)
+static struct fsl_mc_version mc_version;
+
/**
* struct fsl_mc - Private data of a "fsl,qoriq-mc" platform device
* @root_mc_bus_dev: fsl-mc device representing the root DPRC
* @num_translation_ranges: number of entries in addr_translation_ranges
* @translation_ranges: array of bus to system address translation ranges
+ * @fsl_mc_regs: base address of register bank
*/
struct fsl_mc {
struct fsl_mc_device *root_mc_bus_dev;
u8 num_translation_ranges;
struct fsl_mc_addr_translation_range *translation_ranges;
+ void __iomem *fsl_mc_regs;
};
/**
@@ -54,19 +62,15 @@ struct fsl_mc_addr_translation_range {
phys_addr_t start_phys_addr;
};
-/**
- * struct mc_version
- * @major: Major version number: incremented on API compatibility changes
- * @minor: Minor version number: incremented on API additions (that are
- * backward compatible); reset when major version is incremented
- * @revision: Internal revision number: incremented on implementation changes
- * and/or bug fixes that have no impact on API
- */
-struct mc_version {
- u32 major;
- u32 minor;
- u32 revision;
-};
+#define FSL_MC_GCR1 0x0
+#define GCR1_P1_STOP BIT(31)
+#define GCR1_P2_STOP BIT(30)
+
+#define FSL_MC_FAPR 0x28
+#define MC_FAPR_PL BIT(18)
+#define MC_FAPR_BMT BIT(17)
+
+static phys_addr_t mc_portal_base_phys_addr;
/**
* fsl_mc_bus_match - device to driver matching callback
@@ -83,6 +87,12 @@ static int fsl_mc_bus_match(struct device *dev, struct device_driver *drv)
struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(drv);
bool found = false;
+ /* When driver_override is set, only bind to the matching driver */
+ if (mc_dev->driver_override) {
+ found = !strcmp(mc_dev->driver_override, mc_drv->driver.name);
+ goto out;
+ }
+
if (!mc_drv->match_id_table)
goto out;
@@ -112,7 +122,7 @@ out:
return found;
}
-/**
+/*
* fsl_mc_bus_uevent - callback invoked when a device is added
*/
static int fsl_mc_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
@@ -130,11 +140,34 @@ static int fsl_mc_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
static int fsl_mc_dma_configure(struct device *dev)
{
struct device *dma_dev = dev;
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+ struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver);
+ u32 input_id = mc_dev->icid;
+ int ret;
while (dev_is_fsl_mc(dma_dev))
dma_dev = dma_dev->parent;
- return of_dma_configure(dev, dma_dev->of_node, 0);
+ if (dev_of_node(dma_dev))
+ ret = of_dma_configure_id(dev, dma_dev->of_node, 0, &input_id);
+ else
+ ret = acpi_dma_configure_id(dev, DEV_DMA_COHERENT, &input_id);
+
+ if (!ret && !mc_drv->driver_managed_dma) {
+ ret = iommu_device_use_default_domain(dev);
+ if (ret)
+ arch_teardown_dma_ops(dev);
+ }
+
+ return ret;
+}
+
+static void fsl_mc_dma_cleanup(struct device *dev)
+{
+ struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver);
+
+ if (!mc_drv->driver_managed_dma)
+ iommu_device_unuse_default_domain(dev);
}
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
@@ -147,19 +180,143 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(modalias);
+static ssize_t driver_override_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+ int ret;
+
+ if (WARN_ON(dev->bus != &fsl_mc_bus_type))
+ return -EINVAL;
+
+ ret = driver_set_override(dev, &mc_dev->driver_override, buf, count);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t driver_override_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", mc_dev->driver_override);
+}
+static DEVICE_ATTR_RW(driver_override);
+
static struct attribute *fsl_mc_dev_attrs[] = {
&dev_attr_modalias.attr,
+ &dev_attr_driver_override.attr,
NULL,
};
ATTRIBUTE_GROUPS(fsl_mc_dev);
+static int scan_fsl_mc_bus(struct device *dev, void *data)
+{
+ struct fsl_mc_device *root_mc_dev;
+ struct fsl_mc_bus *root_mc_bus;
+
+ if (!fsl_mc_is_root_dprc(dev))
+ goto exit;
+
+ root_mc_dev = to_fsl_mc_device(dev);
+ root_mc_bus = to_fsl_mc_bus(root_mc_dev);
+ mutex_lock(&root_mc_bus->scan_mutex);
+ dprc_scan_objects(root_mc_dev, false);
+ mutex_unlock(&root_mc_bus->scan_mutex);
+
+exit:
+ return 0;
+}
+
+static ssize_t rescan_store(struct bus_type *bus,
+ const char *buf, size_t count)
+{
+ unsigned long val;
+
+ if (kstrtoul(buf, 0, &val) < 0)
+ return -EINVAL;
+
+ if (val)
+ bus_for_each_dev(bus, NULL, NULL, scan_fsl_mc_bus);
+
+ return count;
+}
+static BUS_ATTR_WO(rescan);
+
+static int fsl_mc_bus_set_autorescan(struct device *dev, void *data)
+{
+ struct fsl_mc_device *root_mc_dev;
+ unsigned long val;
+ char *buf = data;
+
+ if (!fsl_mc_is_root_dprc(dev))
+ goto exit;
+
+ root_mc_dev = to_fsl_mc_device(dev);
+
+ if (kstrtoul(buf, 0, &val) < 0)
+ return -EINVAL;
+
+ if (val)
+ enable_dprc_irq(root_mc_dev);
+ else
+ disable_dprc_irq(root_mc_dev);
+
+exit:
+ return 0;
+}
+
+static int fsl_mc_bus_get_autorescan(struct device *dev, void *data)
+{
+ struct fsl_mc_device *root_mc_dev;
+ char *buf = data;
+
+ if (!fsl_mc_is_root_dprc(dev))
+ goto exit;
+
+ root_mc_dev = to_fsl_mc_device(dev);
+
+ sprintf(buf, "%d\n", get_dprc_irq_state(root_mc_dev));
+exit:
+ return 0;
+}
+
+static ssize_t autorescan_store(struct bus_type *bus,
+ const char *buf, size_t count)
+{
+ bus_for_each_dev(bus, NULL, (void *)buf, fsl_mc_bus_set_autorescan);
+
+ return count;
+}
+
+static ssize_t autorescan_show(struct bus_type *bus, char *buf)
+{
+ bus_for_each_dev(bus, NULL, (void *)buf, fsl_mc_bus_get_autorescan);
+ return strlen(buf);
+}
+
+static BUS_ATTR_RW(autorescan);
+
+static struct attribute *fsl_mc_bus_attrs[] = {
+ &bus_attr_rescan.attr,
+ &bus_attr_autorescan.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(fsl_mc_bus);
+
struct bus_type fsl_mc_bus_type = {
.name = "fsl-mc",
.match = fsl_mc_bus_match,
.uevent = fsl_mc_bus_uevent,
.dma_configure = fsl_mc_dma_configure,
+ .dma_cleanup = fsl_mc_dma_cleanup,
.dev_groups = fsl_mc_dev_groups,
+ .bus_groups = fsl_mc_bus_groups,
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_type);
@@ -213,6 +370,36 @@ struct device_type fsl_mc_bus_dpseci_type = {
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpseci_type);
+struct device_type fsl_mc_bus_dpdmux_type = {
+ .name = "fsl_mc_bus_dpdmux"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdmux_type);
+
+struct device_type fsl_mc_bus_dpdcei_type = {
+ .name = "fsl_mc_bus_dpdcei"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdcei_type);
+
+struct device_type fsl_mc_bus_dpaiop_type = {
+ .name = "fsl_mc_bus_dpaiop"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpaiop_type);
+
+struct device_type fsl_mc_bus_dpci_type = {
+ .name = "fsl_mc_bus_dpci"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpci_type);
+
+struct device_type fsl_mc_bus_dpdmai_type = {
+ .name = "fsl_mc_bus_dpdmai"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdmai_type);
+
+struct device_type fsl_mc_bus_dpdbg_type = {
+ .name = "fsl_mc_bus_dpdbg"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdbg_type);
+
static struct device_type *fsl_mc_get_device_type(const char *type)
{
static const struct {
@@ -229,6 +416,12 @@ static struct device_type *fsl_mc_get_device_type(const char *type)
{ &fsl_mc_bus_dpmac_type, "dpmac" },
{ &fsl_mc_bus_dprtc_type, "dprtc" },
{ &fsl_mc_bus_dpseci_type, "dpseci" },
+ { &fsl_mc_bus_dpdmux_type, "dpdmux" },
+ { &fsl_mc_bus_dpdcei_type, "dpdcei" },
+ { &fsl_mc_bus_dpaiop_type, "dpaiop" },
+ { &fsl_mc_bus_dpci_type, "dpci" },
+ { &fsl_mc_bus_dpdmai_type, "dpdmai" },
+ { &fsl_mc_bus_dpdbg_type, "dpdbg" },
{ NULL, NULL }
};
int i;
@@ -281,7 +474,7 @@ static void fsl_mc_driver_shutdown(struct device *dev)
mc_drv->shutdown(mc_dev);
}
-/**
+/*
* __fsl_mc_driver_register - registers a child device driver with the
* MC bus
*
@@ -317,7 +510,7 @@ int __fsl_mc_driver_register(struct fsl_mc_driver *mc_driver,
}
EXPORT_SYMBOL_GPL(__fsl_mc_driver_register);
-/**
+/*
* fsl_mc_driver_unregister - unregisters a device driver from the
* MC bus
*/
@@ -338,7 +531,7 @@ EXPORT_SYMBOL_GPL(fsl_mc_driver_unregister);
*/
static int mc_get_version(struct fsl_mc_io *mc_io,
u32 cmd_flags,
- struct mc_version *mc_ver_info)
+ struct fsl_mc_version *mc_ver_info)
{
struct fsl_mc_command cmd = { 0 };
struct dpmng_rsp_get_version *rsp_params;
@@ -364,10 +557,24 @@ static int mc_get_version(struct fsl_mc_io *mc_io,
}
/**
+ * fsl_mc_get_version - function to retrieve the MC f/w version information
+ *
+ * Return: mc version when called after fsl-mc-bus probe; NULL otherwise.
+ */
+struct fsl_mc_version *fsl_mc_get_version(void)
+{
+ if (mc_version.major)
+ return &mc_version;
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_get_version);
+
+/*
* fsl_mc_get_root_dprc - function to traverse to the root dprc
*/
-static void fsl_mc_get_root_dprc(struct device *dev,
- struct device **root_dprc_dev)
+void fsl_mc_get_root_dprc(struct device *dev,
+ struct device **root_dprc_dev)
{
if (!dev) {
*root_dprc_dev = NULL;
@@ -408,7 +615,7 @@ common_cleanup:
}
static int get_dprc_icid(struct fsl_mc_io *mc_io,
- int container_id, u16 *icid)
+ int container_id, u32 *icid)
{
struct dprc_attributes attr;
int error;
@@ -502,14 +709,30 @@ static int fsl_mc_device_get_mmio_regions(struct fsl_mc_device *mc_dev,
* If base address is in the region_desc use it otherwise
* revert to old mechanism
*/
- if (region_desc.base_address)
+ if (region_desc.base_address) {
regions[i].start = region_desc.base_address +
region_desc.base_offset;
- else
+ } else {
error = translate_mc_addr(mc_dev, mc_region_type,
region_desc.base_offset,
&regions[i].start);
+ /*
+ * Some versions of the MC firmware wrongly report
+ * 0 for register base address of the DPMCP associated
+ * with child DPRC objects thus rendering them unusable.
+ * This is particularly troublesome in ACPI boot
+ * scenarios where the legacy way of extracting this
+ * base address from the device tree does not apply.
+ * Given that DPMCPs share the same base address,
+ * workaround this by using the base address extracted
+ * from the root DPRC container.
+ */
+ if (is_fsl_mc_bus_dprc(mc_dev) &&
+ regions[i].start == region_desc.base_offset)
+ regions[i].start += mc_portal_base_phys_addr;
+ }
+
if (error < 0) {
dev_err(parent_dev,
"Invalid MC offset: %#x (for %s.%d\'s region %d)\n",
@@ -520,11 +743,8 @@ static int fsl_mc_device_get_mmio_regions(struct fsl_mc_device *mc_dev,
regions[i].end = regions[i].start + region_desc.size - 1;
regions[i].name = "fsl-mc object MMIO region";
- regions[i].flags = IORESOURCE_IO;
- if (region_desc.flags & DPRC_REGION_CACHEABLE)
- regions[i].flags |= IORESOURCE_CACHEABLE;
- if (region_desc.flags & DPRC_REGION_SHAREABLE)
- regions[i].flags |= IORESOURCE_MEM;
+ regions[i].flags = region_desc.flags & IORESOURCE_BITS;
+ regions[i].flags |= IORESOURCE_MEM;
}
mc_dev->regions = regions;
@@ -535,7 +755,7 @@ error_cleanup_regions:
return error;
}
-/**
+/*
* fsl_mc_is_root_dprc - function to check if a given device is a root dprc
*/
bool fsl_mc_is_root_dprc(struct device *dev)
@@ -560,7 +780,7 @@ static void fsl_mc_device_release(struct device *dev)
kfree(mc_dev);
}
-/**
+/*
* Add a newly discovered fsl-mc device to be visible in Linux
*/
int fsl_mc_device_add(struct fsl_mc_obj_desc *obj_desc,
@@ -586,6 +806,7 @@ int fsl_mc_device_add(struct fsl_mc_obj_desc *obj_desc,
if (!mc_bus)
return -ENOMEM;
+ mutex_init(&mc_bus->scan_mutex);
mc_dev = &mc_bus->mc_dev;
} else {
/*
@@ -696,6 +917,8 @@ error_cleanup_dev:
}
EXPORT_SYMBOL_GPL(fsl_mc_device_add);
+static struct notifier_block fsl_mc_nb;
+
/**
* fsl_mc_device_remove - Remove an fsl-mc device from being visible to
* Linux
@@ -704,6 +927,9 @@ EXPORT_SYMBOL_GPL(fsl_mc_device_add);
*/
void fsl_mc_device_remove(struct fsl_mc_device *mc_dev)
{
+ kfree(mc_dev->driver_override);
+ mc_dev->driver_override = NULL;
+
/*
* The device-specific remove callback will get invoked by device_del()
*/
@@ -712,7 +938,8 @@ void fsl_mc_device_remove(struct fsl_mc_device *mc_dev)
}
EXPORT_SYMBOL_GPL(fsl_mc_device_remove);
-struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev)
+struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev,
+ u16 if_id)
{
struct fsl_mc_device *mc_bus_dev, *endpoint;
struct fsl_mc_obj_desc endpoint_desc = {{ 0 }};
@@ -723,6 +950,7 @@ struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev)
mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
strcpy(endpoint1.type, mc_dev->obj_desc.type);
endpoint1.id = mc_dev->obj_desc.id;
+ endpoint1.if_id = if_id;
err = dprc_get_connection(mc_bus_dev->mc_io, 0,
mc_bus_dev->mc_handle,
@@ -741,6 +969,33 @@ struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev)
endpoint_desc.id = endpoint2.id;
endpoint = fsl_mc_device_lookup(&endpoint_desc, mc_bus_dev);
+ /*
+ * We know that the device has an endpoint because we verified by
+ * interrogating the firmware. This is the case when the device was not
+ * yet discovered by the fsl-mc bus, thus the lookup returned NULL.
+ * Force a rescan of the devices in this container and retry the lookup.
+ */
+ if (!endpoint) {
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
+
+ if (mutex_trylock(&mc_bus->scan_mutex)) {
+ err = dprc_scan_objects(mc_bus_dev, true);
+ mutex_unlock(&mc_bus->scan_mutex);
+ }
+
+ if (err < 0)
+ return ERR_PTR(err);
+ }
+
+ endpoint = fsl_mc_device_lookup(&endpoint_desc, mc_bus_dev);
+ /*
+ * This means that the endpoint might reside in a different isolation
+ * context (DPRC/container). Not much to do, so return a permssion
+ * error.
+ */
+ if (!endpoint)
+ return ERR_PTR(-EPERM);
+
return endpoint;
}
EXPORT_SYMBOL_GPL(fsl_mc_get_endpoint);
@@ -848,7 +1103,7 @@ static int get_mc_addr_translation_ranges(struct device *dev,
return 0;
}
-/**
+/*
* fsl_mc_bus_probe - callback invoked when the root MC bus is being
* added
*/
@@ -861,9 +1116,8 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
struct fsl_mc_io *mc_io = NULL;
int container_id;
phys_addr_t mc_portal_phys_addr;
- u32 mc_portal_size;
- struct mc_version mc_version;
- struct resource res;
+ u32 mc_portal_size, mc_stream_id;
+ struct resource *plat_res;
mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL);
if (!mc)
@@ -871,19 +1125,56 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mc);
+ plat_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (plat_res) {
+ mc->fsl_mc_regs = devm_ioremap_resource(&pdev->dev, plat_res);
+ if (IS_ERR(mc->fsl_mc_regs))
+ return PTR_ERR(mc->fsl_mc_regs);
+ }
+
+ if (mc->fsl_mc_regs) {
+ if (IS_ENABLED(CONFIG_ACPI) && !dev_of_node(&pdev->dev)) {
+ mc_stream_id = readl(mc->fsl_mc_regs + FSL_MC_FAPR);
+ /*
+ * HW ORs the PL and BMT bit, places the result in bit
+ * 14 of the StreamID and ORs in the ICID. Calculate it
+ * accordingly.
+ */
+ mc_stream_id = (mc_stream_id & 0xffff) |
+ ((mc_stream_id & (MC_FAPR_PL | MC_FAPR_BMT)) ?
+ BIT(14) : 0);
+ error = acpi_dma_configure_id(&pdev->dev,
+ DEV_DMA_COHERENT,
+ &mc_stream_id);
+ if (error == -EPROBE_DEFER)
+ return error;
+ if (error)
+ dev_warn(&pdev->dev,
+ "failed to configure dma: %d.\n",
+ error);
+ }
+
+ /*
+ * Some bootloaders pause the MC firmware before booting the
+ * kernel so that MC will not cause faults as soon as the
+ * SMMU probes due to the fact that there's no configuration
+ * in place for MC.
+ * At this point MC should have all its SMMU setup done so make
+ * sure it is resumed.
+ */
+ writel(readl(mc->fsl_mc_regs + FSL_MC_GCR1) &
+ (~(GCR1_P1_STOP | GCR1_P2_STOP)),
+ mc->fsl_mc_regs + FSL_MC_GCR1);
+ }
+
/*
* Get physical address of MC portal for the root DPRC:
*/
- error = of_address_to_resource(pdev->dev.of_node, 0, &res);
- if (error < 0) {
- dev_err(&pdev->dev,
- "of_address_to_resource() failed for %pOF\n",
- pdev->dev.of_node);
- return error;
- }
+ plat_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mc_portal_phys_addr = plat_res->start;
+ mc_portal_size = resource_size(plat_res);
+ mc_portal_base_phys_addr = mc_portal_phys_addr & ~0x3ffffff;
- mc_portal_phys_addr = res.start;
- mc_portal_size = resource_size(&res);
error = fsl_create_mc_io(&pdev->dev, mc_portal_phys_addr,
mc_portal_size, NULL,
FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, &mc_io);
@@ -900,11 +1191,13 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "MC firmware version: %u.%u.%u\n",
mc_version.major, mc_version.minor, mc_version.revision);
- error = get_mc_addr_translation_ranges(&pdev->dev,
- &mc->translation_ranges,
- &mc->num_translation_ranges);
- if (error < 0)
- goto error_cleanup_mc_io;
+ if (dev_of_node(&pdev->dev)) {
+ error = get_mc_addr_translation_ranges(&pdev->dev,
+ &mc->translation_ranges,
+ &mc->num_translation_ranges);
+ if (error < 0)
+ goto error_cleanup_mc_io;
+ }
error = dprc_get_container_id(mc_io, 0, &container_id);
if (error < 0) {
@@ -931,6 +1224,7 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
goto error_cleanup_mc_io;
mc->root_mc_bus_dev = mc_bus_dev;
+ mc_bus_dev->dev.fwnode = pdev->dev.fwnode;
return 0;
error_cleanup_mc_io:
@@ -938,25 +1232,42 @@ error_cleanup_mc_io:
return error;
}
-/**
+/*
* fsl_mc_bus_remove - callback invoked when the root MC bus is being
* removed
*/
static int fsl_mc_bus_remove(struct platform_device *pdev)
{
struct fsl_mc *mc = platform_get_drvdata(pdev);
+ struct fsl_mc_io *mc_io;
if (!fsl_mc_is_root_dprc(&mc->root_mc_bus_dev->dev))
return -EINVAL;
+ mc_io = mc->root_mc_bus_dev->mc_io;
fsl_mc_device_remove(mc->root_mc_bus_dev);
+ fsl_destroy_mc_io(mc_io);
- fsl_destroy_mc_io(mc->root_mc_bus_dev->mc_io);
- mc->root_mc_bus_dev->mc_io = NULL;
+ bus_unregister_notifier(&fsl_mc_bus_type, &fsl_mc_nb);
+
+ if (mc->fsl_mc_regs) {
+ /*
+ * Pause the MC firmware so that it doesn't crash in certain
+ * scenarios, such as kexec.
+ */
+ writel(readl(mc->fsl_mc_regs + FSL_MC_GCR1) |
+ (GCR1_P1_STOP | GCR1_P2_STOP),
+ mc->fsl_mc_regs + FSL_MC_GCR1);
+ }
return 0;
}
+static void fsl_mc_bus_shutdown(struct platform_device *pdev)
+{
+ fsl_mc_bus_remove(pdev);
+}
+
static const struct of_device_id fsl_mc_bus_match_table[] = {
{.compatible = "fsl,qoriq-mc",},
{},
@@ -964,14 +1275,60 @@ static const struct of_device_id fsl_mc_bus_match_table[] = {
MODULE_DEVICE_TABLE(of, fsl_mc_bus_match_table);
+static const struct acpi_device_id fsl_mc_bus_acpi_match_table[] = {
+ {"NXP0008", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, fsl_mc_bus_acpi_match_table);
+
static struct platform_driver fsl_mc_bus_driver = {
.driver = {
.name = "fsl_mc_bus",
.pm = NULL,
.of_match_table = fsl_mc_bus_match_table,
+ .acpi_match_table = fsl_mc_bus_acpi_match_table,
},
.probe = fsl_mc_bus_probe,
.remove = fsl_mc_bus_remove,
+ .shutdown = fsl_mc_bus_shutdown,
+};
+
+static int fsl_mc_bus_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device *dev = data;
+ struct resource *res;
+ void __iomem *fsl_mc_regs;
+
+ if (action != BUS_NOTIFY_ADD_DEVICE)
+ return 0;
+
+ if (!of_match_device(fsl_mc_bus_match_table, dev) &&
+ !acpi_match_device(fsl_mc_bus_acpi_match_table, dev))
+ return 0;
+
+ res = platform_get_resource(to_platform_device(dev), IORESOURCE_MEM, 1);
+ if (!res)
+ return 0;
+
+ fsl_mc_regs = ioremap(res->start, resource_size(res));
+ if (!fsl_mc_regs)
+ return 0;
+
+ /*
+ * Make sure that the MC firmware is paused before the IOMMU setup for
+ * it is done or otherwise the firmware will crash right after the SMMU
+ * gets probed and enabled.
+ */
+ writel(readl(fsl_mc_regs + FSL_MC_GCR1) | (GCR1_P1_STOP | GCR1_P2_STOP),
+ fsl_mc_regs + FSL_MC_GCR1);
+ iounmap(fsl_mc_regs);
+
+ return 0;
+}
+
+static struct notifier_block fsl_mc_nb = {
+ .notifier_call = fsl_mc_bus_notifier,
};
static int __init fsl_mc_bus_driver_init(void)
@@ -998,7 +1355,7 @@ static int __init fsl_mc_bus_driver_init(void)
if (error < 0)
goto error_cleanup_dprc_driver;
- return 0;
+ return bus_register_notifier(&platform_bus_type, &fsl_mc_nb);
error_cleanup_dprc_driver:
dprc_driver_exit();
diff --git a/drivers/bus/fsl-mc/fsl-mc-msi.c b/drivers/bus/fsl-mc/fsl-mc-msi.c
index 8b9c66d7c4ff..0cfe859a4ac4 100644
--- a/drivers/bus/fsl-mc/fsl-mc-msi.c
+++ b/drivers/bus/fsl-mc/fsl-mc-msi.c
@@ -13,6 +13,7 @@
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/msi.h>
+#include <linux/acpi_iort.h>
#include "fsl-mc-private.h"
@@ -28,7 +29,7 @@ static irq_hw_number_t fsl_mc_domain_calc_hwirq(struct fsl_mc_device *dev,
* Make the base hwirq value for ICID*10000 so it is readable
* as a decimal value in /proc/interrupts.
*/
- return (irq_hw_number_t)(desc->fsl_mc.msi_index + (dev->icid * 10000));
+ return (irq_hw_number_t)(desc->msi_index + (dev->icid * 10000));
}
static void fsl_mc_msi_set_desc(msi_alloc_info_t *arg,
@@ -57,11 +58,11 @@ static void fsl_mc_msi_update_dom_ops(struct msi_domain_info *info)
}
static void __fsl_mc_msi_write_msg(struct fsl_mc_device *mc_bus_dev,
- struct fsl_mc_device_irq *mc_dev_irq)
+ struct fsl_mc_device_irq *mc_dev_irq,
+ struct msi_desc *msi_desc)
{
int error;
struct fsl_mc_device *owner_mc_dev = mc_dev_irq->mc_dev;
- struct msi_desc *msi_desc = mc_dev_irq->msi_desc;
struct dprc_irq_cfg irq_cfg;
/*
@@ -121,14 +122,14 @@ static void fsl_mc_msi_write_msg(struct irq_data *irq_data,
struct fsl_mc_device *mc_bus_dev = to_fsl_mc_device(msi_desc->dev);
struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
struct fsl_mc_device_irq *mc_dev_irq =
- &mc_bus->irq_resources[msi_desc->fsl_mc.msi_index];
+ &mc_bus->irq_resources[msi_desc->msi_index];
msi_desc->msg = *msg;
/*
* Program the MSI (paddr, value) pair in the device:
*/
- __fsl_mc_msi_write_msg(mc_bus_dev, mc_dev_irq);
+ __fsl_mc_msi_write_msg(mc_bus_dev, mc_dev_irq, msi_desc);
}
static void fsl_mc_msi_update_chip_ops(struct msi_domain_info *info)
@@ -147,7 +148,7 @@ static void fsl_mc_msi_update_chip_ops(struct msi_domain_info *info)
/**
* fsl_mc_msi_create_irq_domain - Create a fsl-mc MSI interrupt domain
- * @np: Optional device-tree node of the interrupt controller
+ * @fwnode: Optional firmware node of the interrupt controller
* @info: MSI domain info
* @parent: Parent irq domain
*
@@ -169,6 +170,7 @@ struct irq_domain *fsl_mc_msi_create_irq_domain(struct fwnode_handle *fwnode,
fsl_mc_msi_update_dom_ops(info);
if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
fsl_mc_msi_update_chip_ops(info);
+ info->flags |= MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS | MSI_FLAG_FREE_MSI_DESCS;
domain = msi_create_irq_domain(fwnode, info, parent);
if (domain)
@@ -177,80 +179,57 @@ struct irq_domain *fsl_mc_msi_create_irq_domain(struct fwnode_handle *fwnode,
return domain;
}
-int fsl_mc_find_msi_domain(struct device *mc_platform_dev,
- struct irq_domain **mc_msi_domain)
+struct irq_domain *fsl_mc_find_msi_domain(struct device *dev)
{
+ struct device *root_dprc_dev;
+ struct device *bus_dev;
struct irq_domain *msi_domain;
- struct device_node *mc_of_node = mc_platform_dev->of_node;
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
- msi_domain = of_msi_get_domain(mc_platform_dev, mc_of_node,
- DOMAIN_BUS_FSL_MC_MSI);
- if (!msi_domain) {
- pr_err("Unable to find fsl-mc MSI domain for %pOF\n",
- mc_of_node);
+ fsl_mc_get_root_dprc(dev, &root_dprc_dev);
+ bus_dev = root_dprc_dev->parent;
- return -ENOENT;
- }
-
- *mc_msi_domain = msi_domain;
- return 0;
-}
-
-static void fsl_mc_msi_free_descs(struct device *dev)
-{
- struct msi_desc *desc, *tmp;
-
- list_for_each_entry_safe(desc, tmp, dev_to_msi_list(dev), list) {
- list_del(&desc->list);
- free_msi_entry(desc);
- }
-}
-
-static int fsl_mc_msi_alloc_descs(struct device *dev, unsigned int irq_count)
+ if (bus_dev->of_node) {
+ msi_domain = of_msi_map_get_device_domain(dev,
+ mc_dev->icid,
+ DOMAIN_BUS_FSL_MC_MSI);
-{
- unsigned int i;
- int error;
- struct msi_desc *msi_desc;
-
- for (i = 0; i < irq_count; i++) {
- msi_desc = alloc_msi_entry(dev, 1, NULL);
- if (!msi_desc) {
- dev_err(dev, "Failed to allocate msi entry\n");
- error = -ENOMEM;
- goto cleanup_msi_descs;
- }
+ /*
+ * if the msi-map property is missing assume that all the
+ * child containers inherit the domain from the parent
+ */
+ if (!msi_domain)
- msi_desc->fsl_mc.msi_index = i;
- INIT_LIST_HEAD(&msi_desc->list);
- list_add_tail(&msi_desc->list, dev_to_msi_list(dev));
+ msi_domain = of_msi_get_domain(bus_dev,
+ bus_dev->of_node,
+ DOMAIN_BUS_FSL_MC_MSI);
+ } else {
+ msi_domain = iort_get_device_domain(dev, mc_dev->icid,
+ DOMAIN_BUS_FSL_MC_MSI);
}
- return 0;
-
-cleanup_msi_descs:
- fsl_mc_msi_free_descs(dev);
- return error;
+ return msi_domain;
}
-int fsl_mc_msi_domain_alloc_irqs(struct device *dev,
- unsigned int irq_count)
+int fsl_mc_msi_domain_alloc_irqs(struct device *dev, unsigned int irq_count)
{
struct irq_domain *msi_domain;
int error;
- if (!list_empty(dev_to_msi_list(dev)))
+ msi_domain = dev_get_msi_domain(dev);
+ if (!msi_domain)
return -EINVAL;
- error = fsl_mc_msi_alloc_descs(dev, irq_count);
- if (error < 0)
+ error = msi_setup_device_data(dev);
+ if (error)
return error;
- msi_domain = dev_get_msi_domain(dev);
- if (!msi_domain) {
+ msi_lock_descs(dev);
+ if (msi_first_desc(dev, MSI_DESC_ALL))
error = -EINVAL;
- goto cleanup_msi_descs;
- }
+ msi_unlock_descs(dev);
+ if (error)
+ return error;
/*
* NOTE: Calling this function will trigger the invocation of the
@@ -258,15 +237,8 @@ int fsl_mc_msi_domain_alloc_irqs(struct device *dev,
*/
error = msi_domain_alloc_irqs(msi_domain, dev, irq_count);
- if (error) {
+ if (error)
dev_err(dev, "Failed to allocate IRQs\n");
- goto cleanup_msi_descs;
- }
-
- return 0;
-
-cleanup_msi_descs:
- fsl_mc_msi_free_descs(dev);
return error;
}
@@ -279,9 +251,4 @@ void fsl_mc_msi_domain_free_irqs(struct device *dev)
return;
msi_domain_free_irqs(msi_domain, dev);
-
- if (list_empty(dev_to_msi_list(dev)))
- return;
-
- fsl_mc_msi_free_descs(dev);
}
diff --git a/drivers/bus/fsl-mc/fsl-mc-private.h b/drivers/bus/fsl-mc/fsl-mc-private.h
index 21ca8c756ee7..b3520ea1b9f4 100644
--- a/drivers/bus/fsl-mc/fsl-mc-private.h
+++ b/drivers/bus/fsl-mc/fsl-mc-private.h
@@ -10,6 +10,8 @@
#include <linux/fsl/mc.h>
#include <linux/mutex.h>
+#include <linux/ioctl.h>
+#include <linux/miscdevice.h>
/*
* Data Path Management Complex (DPMNG) General API
@@ -46,7 +48,6 @@ struct dpmng_rsp_get_version {
/* DPMCP command IDs */
#define DPMCP_CMDID_CLOSE DPMCP_CMD(0x800)
-#define DPMCP_CMDID_OPEN DPMCP_CMD(0x80b)
#define DPMCP_CMDID_RESET DPMCP_CMD(0x005)
struct dpmcp_cmd_open {
@@ -80,17 +81,20 @@ int dpmcp_reset(struct fsl_mc_io *mc_io,
/* DPRC command versioning */
#define DPRC_CMD_BASE_VERSION 1
#define DPRC_CMD_2ND_VERSION 2
+#define DPRC_CMD_3RD_VERSION 3
#define DPRC_CMD_ID_OFFSET 4
#define DPRC_CMD(id) (((id) << DPRC_CMD_ID_OFFSET) | DPRC_CMD_BASE_VERSION)
#define DPRC_CMD_V2(id) (((id) << DPRC_CMD_ID_OFFSET) | DPRC_CMD_2ND_VERSION)
+#define DPRC_CMD_V3(id) (((id) << DPRC_CMD_ID_OFFSET) | DPRC_CMD_3RD_VERSION)
/* DPRC command IDs */
#define DPRC_CMDID_CLOSE DPRC_CMD(0x800)
-#define DPRC_CMDID_OPEN DPRC_CMD(0x805)
#define DPRC_CMDID_GET_API_VERSION DPRC_CMD(0xa05)
#define DPRC_CMDID_GET_ATTR DPRC_CMD(0x004)
+#define DPRC_CMDID_RESET_CONT DPRC_CMD(0x005)
+#define DPRC_CMDID_RESET_CONT_V2 DPRC_CMD_V2(0x005)
#define DPRC_CMDID_SET_IRQ DPRC_CMD(0x010)
#define DPRC_CMDID_SET_IRQ_ENABLE DPRC_CMD(0x012)
@@ -103,6 +107,7 @@ int dpmcp_reset(struct fsl_mc_io *mc_io,
#define DPRC_CMDID_GET_OBJ DPRC_CMD(0x15A)
#define DPRC_CMDID_GET_OBJ_REG DPRC_CMD(0x15E)
#define DPRC_CMDID_GET_OBJ_REG_V2 DPRC_CMD_V2(0x15E)
+#define DPRC_CMDID_GET_OBJ_REG_V3 DPRC_CMD_V3(0x15E)
#define DPRC_CMDID_SET_OBJ_IRQ DPRC_CMD(0x15F)
#define DPRC_CMDID_GET_CONNECTION DPRC_CMD(0x16C)
@@ -111,6 +116,11 @@ struct dprc_cmd_open {
__le32 container_id;
};
+struct dprc_cmd_reset_container {
+ __le32 child_container_id;
+ __le32 options;
+};
+
struct dprc_cmd_set_irq {
/* cmd word 0 */
__le32 irq_val;
@@ -152,8 +162,7 @@ struct dprc_cmd_clear_irq_status {
struct dprc_rsp_get_attributes {
/* response word 0 */
__le32 container_id;
- __le16 icid;
- __le16 pad;
+ __le32 icid;
/* response word 1 */
__le32 options;
__le32 portal_id;
@@ -202,12 +211,13 @@ struct dprc_cmd_get_obj_region {
struct dprc_rsp_get_obj_region {
/* response word 0 */
- __le64 pad;
+ __le64 pad0;
/* response word 1 */
__le64 base_offset;
/* response word 2 */
__le32 size;
- __le32 pad2;
+ u8 type;
+ u8 pad2[3];
/* response word 3 */
__le32 flags;
__le32 pad3;
@@ -330,7 +340,7 @@ int dprc_clear_irq_status(struct fsl_mc_io *mc_io,
*/
struct dprc_attributes {
int container_id;
- u16 icid;
+ u32 icid;
int portal_id;
u64 options;
};
@@ -358,12 +368,6 @@ int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
int obj_id,
u8 irq_index,
struct dprc_irq_cfg *irq_cfg);
-
-/* Region flags */
-/* Cacheable - Indicates that region should be mapped as cacheable */
-#define DPRC_REGION_CACHEABLE 0x00000001
-#define DPRC_REGION_SHAREABLE 0x00000002
-
/**
* enum dprc_region_type - Region type
* @DPRC_REGION_TYPE_MC_PORTAL: MC portal region
@@ -447,7 +451,6 @@ int dprc_get_connection(struct fsl_mc_io *mc_io,
/* Command IDs */
#define DPBP_CMDID_CLOSE DPBP_CMD(0x800)
-#define DPBP_CMDID_OPEN DPBP_CMD(0x804)
#define DPBP_CMDID_ENABLE DPBP_CMD(0x002)
#define DPBP_CMDID_DISABLE DPBP_CMD(0x003)
@@ -486,7 +489,6 @@ struct dpbp_rsp_get_attributes {
/* Command IDs */
#define DPCON_CMDID_CLOSE DPCON_CMD(0x800)
-#define DPCON_CMDID_OPEN DPCON_CMD(0x808)
#define DPCON_CMDID_ENABLE DPCON_CMD(0x002)
#define DPCON_CMDID_DISABLE DPCON_CMD(0x003)
@@ -518,11 +520,41 @@ struct dpcon_cmd_set_notification {
__le64 user_ctx;
};
-/**
- * Maximum number of total IRQs that can be pre-allocated for an MC bus'
- * IRQ pool
+/*
+ * Generic FSL MC API
*/
-#define FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS 256
+
+/* generic command versioning */
+#define OBJ_CMD_BASE_VERSION 1
+#define OBJ_CMD_ID_OFFSET 4
+
+#define OBJ_CMD(id) (((id) << OBJ_CMD_ID_OFFSET) | OBJ_CMD_BASE_VERSION)
+
+/* open command codes */
+#define DPRTC_CMDID_OPEN OBJ_CMD(0x810)
+#define DPNI_CMDID_OPEN OBJ_CMD(0x801)
+#define DPSW_CMDID_OPEN OBJ_CMD(0x802)
+#define DPIO_CMDID_OPEN OBJ_CMD(0x803)
+#define DPBP_CMDID_OPEN OBJ_CMD(0x804)
+#define DPRC_CMDID_OPEN OBJ_CMD(0x805)
+#define DPDMUX_CMDID_OPEN OBJ_CMD(0x806)
+#define DPCI_CMDID_OPEN OBJ_CMD(0x807)
+#define DPCON_CMDID_OPEN OBJ_CMD(0x808)
+#define DPSECI_CMDID_OPEN OBJ_CMD(0x809)
+#define DPAIOP_CMDID_OPEN OBJ_CMD(0x80a)
+#define DPMCP_CMDID_OPEN OBJ_CMD(0x80b)
+#define DPMAC_CMDID_OPEN OBJ_CMD(0x80c)
+#define DPDCEI_CMDID_OPEN OBJ_CMD(0x80d)
+#define DPDMAI_CMDID_OPEN OBJ_CMD(0x80e)
+#define DPDBG_CMDID_OPEN OBJ_CMD(0x80f)
+
+/* Generic object command IDs */
+#define OBJ_CMDID_CLOSE OBJ_CMD(0x800)
+#define OBJ_CMDID_RESET OBJ_CMD(0x005)
+
+struct fsl_mc_obj_cmd_open {
+ __le32 obj_id;
+};
/**
* struct fsl_mc_resource_pool - Pool of MC resources of a given
@@ -544,6 +576,22 @@ struct fsl_mc_resource_pool {
};
/**
+ * struct fsl_mc_uapi - information associated with a device file
+ * @misc: struct miscdevice linked to the root dprc
+ * @device: newly created device in /dev
+ * @mutex: mutex lock to serialize the open/release operations
+ * @local_instance_in_use: local MC I/O instance in use or not
+ * @static_mc_io: pointer to the static MC I/O object
+ */
+struct fsl_mc_uapi {
+ struct miscdevice misc;
+ struct device *device;
+ struct mutex mutex; /* serialize open/release operations */
+ u32 local_instance_in_use;
+ struct fsl_mc_io *static_mc_io;
+};
+
+/**
* struct fsl_mc_bus - logical bus that corresponds to a physical DPRC
* @mc_dev: fsl-mc device for the bus device itself.
* @resource_pools: array of resource pools (one pool per resource type)
@@ -552,6 +600,7 @@ struct fsl_mc_resource_pool {
* @irq_resources: Pointer to array of IRQ objects for the IRQ pool
* @scan_mutex: Serializes bus scanning
* @dprc_attr: DPRC attributes
+ * @uapi_misc: struct that abstracts the interaction with userspace
*/
struct fsl_mc_bus {
struct fsl_mc_device mc_dev;
@@ -559,6 +608,8 @@ struct fsl_mc_bus {
struct fsl_mc_device_irq *irq_resources;
struct mutex scan_mutex; /* serializes bus scanning */
struct dprc_attributes dprc_attr;
+ struct fsl_mc_uapi uapi_misc;
+ int irq_enabled;
};
#define to_fsl_mc_bus(_mc_dev) \
@@ -575,6 +626,9 @@ int __init dprc_driver_init(void);
void dprc_driver_exit(void);
+int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
+ bool alloc_interrupts);
+
int __init fsl_mc_allocator_driver_init(void);
void fsl_mc_allocator_driver_exit(void);
@@ -595,13 +649,7 @@ int fsl_mc_msi_domain_alloc_irqs(struct device *dev,
void fsl_mc_msi_domain_free_irqs(struct device *dev);
-int fsl_mc_find_msi_domain(struct device *mc_platform_dev,
- struct irq_domain **mc_msi_domain);
-
-int fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus,
- unsigned int irq_count);
-
-void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus);
+struct irq_domain *fsl_mc_find_msi_domain(struct device *dev);
int __must_check fsl_create_mc_io(struct device *dev,
phys_addr_t mc_portal_phys_addr,
@@ -613,7 +661,35 @@ void fsl_destroy_mc_io(struct fsl_mc_io *mc_io);
bool fsl_mc_is_root_dprc(struct device *dev);
+void fsl_mc_get_root_dprc(struct device *dev,
+ struct device **root_dprc_dev);
+
struct fsl_mc_device *fsl_mc_device_lookup(struct fsl_mc_obj_desc *obj_desc,
struct fsl_mc_device *mc_bus_dev);
+u16 mc_cmd_hdr_read_cmdid(struct fsl_mc_command *cmd);
+
+#ifdef CONFIG_FSL_MC_UAPI_SUPPORT
+
+int fsl_mc_uapi_create_device_file(struct fsl_mc_bus *mc_bus);
+
+void fsl_mc_uapi_remove_device_file(struct fsl_mc_bus *mc_bus);
+
+#else
+
+static inline int fsl_mc_uapi_create_device_file(struct fsl_mc_bus *mc_bus)
+{
+ return 0;
+}
+
+static inline void fsl_mc_uapi_remove_device_file(struct fsl_mc_bus *mc_bus)
+{
+}
+
+#endif
+
+int disable_dprc_irq(struct fsl_mc_device *mc_dev);
+int enable_dprc_irq(struct fsl_mc_device *mc_dev);
+int get_dprc_irq_state(struct fsl_mc_device *mc_dev);
+
#endif /* _FSL_MC_PRIVATE_H_ */
diff --git a/drivers/bus/fsl-mc/fsl-mc-uapi.c b/drivers/bus/fsl-mc/fsl-mc-uapi.c
new file mode 100644
index 000000000000..9c4c1395fcdb
--- /dev/null
+++ b/drivers/bus/fsl-mc/fsl-mc-uapi.c
@@ -0,0 +1,597 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Management Complex (MC) userspace support
+ *
+ * Copyright 2021 NXP
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+
+#include "fsl-mc-private.h"
+
+struct uapi_priv_data {
+ struct fsl_mc_uapi *uapi;
+ struct fsl_mc_io *mc_io;
+};
+
+struct fsl_mc_cmd_desc {
+ u16 cmdid_value;
+ u16 cmdid_mask;
+ int size;
+ bool token;
+ int flags;
+};
+
+#define FSL_MC_CHECK_MODULE_ID BIT(0)
+#define FSL_MC_CAP_NET_ADMIN_NEEDED BIT(1)
+
+enum fsl_mc_cmd_index {
+ DPDBG_DUMP = 0,
+ DPDBG_SET,
+ DPRC_GET_CONTAINER_ID,
+ DPRC_CREATE_CONT,
+ DPRC_DESTROY_CONT,
+ DPRC_ASSIGN,
+ DPRC_UNASSIGN,
+ DPRC_GET_OBJ_COUNT,
+ DPRC_GET_OBJ,
+ DPRC_GET_RES_COUNT,
+ DPRC_GET_RES_IDS,
+ DPRC_SET_OBJ_LABEL,
+ DPRC_SET_LOCKED,
+ DPRC_CONNECT,
+ DPRC_DISCONNECT,
+ DPRC_GET_POOL,
+ DPRC_GET_POOL_COUNT,
+ DPRC_GET_CONNECTION,
+ DPCI_GET_LINK_STATE,
+ DPCI_GET_PEER_ATTR,
+ DPAIOP_GET_SL_VERSION,
+ DPAIOP_GET_STATE,
+ DPMNG_GET_VERSION,
+ DPSECI_GET_TX_QUEUE,
+ DPMAC_GET_COUNTER,
+ DPMAC_GET_MAC_ADDR,
+ DPNI_SET_PRIM_MAC,
+ DPNI_GET_PRIM_MAC,
+ DPNI_GET_STATISTICS,
+ DPNI_GET_LINK_STATE,
+ DPNI_GET_MAX_FRAME_LENGTH,
+ DPSW_GET_TAILDROP,
+ DPSW_SET_TAILDROP,
+ DPSW_IF_GET_COUNTER,
+ DPSW_IF_GET_MAX_FRAME_LENGTH,
+ DPDMUX_GET_COUNTER,
+ DPDMUX_IF_GET_MAX_FRAME_LENGTH,
+ GET_ATTR,
+ GET_IRQ_MASK,
+ GET_IRQ_STATUS,
+ CLOSE,
+ OPEN,
+ GET_API_VERSION,
+ DESTROY,
+ CREATE,
+};
+
+static struct fsl_mc_cmd_desc fsl_mc_accepted_cmds[] = {
+ [DPDBG_DUMP] = {
+ .cmdid_value = 0x1300,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 28,
+ },
+ [DPDBG_SET] = {
+ .cmdid_value = 0x1400,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 28,
+ },
+ [DPRC_GET_CONTAINER_ID] = {
+ .cmdid_value = 0x8300,
+ .cmdid_mask = 0xFFF0,
+ .token = false,
+ .size = 8,
+ },
+ [DPRC_CREATE_CONT] = {
+ .cmdid_value = 0x1510,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 40,
+ .flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [DPRC_DESTROY_CONT] = {
+ .cmdid_value = 0x1520,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 12,
+ .flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [DPRC_ASSIGN] = {
+ .cmdid_value = 0x1570,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 40,
+ .flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [DPRC_UNASSIGN] = {
+ .cmdid_value = 0x1580,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 40,
+ .flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [DPRC_GET_OBJ_COUNT] = {
+ .cmdid_value = 0x1590,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 16,
+ },
+ [DPRC_GET_OBJ] = {
+ .cmdid_value = 0x15A0,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 12,
+ },
+ [DPRC_GET_RES_COUNT] = {
+ .cmdid_value = 0x15B0,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 32,
+ },
+ [DPRC_GET_RES_IDS] = {
+ .cmdid_value = 0x15C0,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 40,
+ },
+ [DPRC_SET_OBJ_LABEL] = {
+ .cmdid_value = 0x1610,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 48,
+ .flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [DPRC_SET_LOCKED] = {
+ .cmdid_value = 0x16B0,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 16,
+ .flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [DPRC_CONNECT] = {
+ .cmdid_value = 0x1670,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 56,
+ .flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [DPRC_DISCONNECT] = {
+ .cmdid_value = 0x1680,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 32,
+ .flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [DPRC_GET_POOL] = {
+ .cmdid_value = 0x1690,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 12,
+ },
+ [DPRC_GET_POOL_COUNT] = {
+ .cmdid_value = 0x16A0,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+ [DPRC_GET_CONNECTION] = {
+ .cmdid_value = 0x16C0,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 32,
+ },
+
+ [DPCI_GET_LINK_STATE] = {
+ .cmdid_value = 0x0E10,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+ [DPCI_GET_PEER_ATTR] = {
+ .cmdid_value = 0x0E20,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+ [DPAIOP_GET_SL_VERSION] = {
+ .cmdid_value = 0x2820,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+ [DPAIOP_GET_STATE] = {
+ .cmdid_value = 0x2830,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+ [DPMNG_GET_VERSION] = {
+ .cmdid_value = 0x8310,
+ .cmdid_mask = 0xFFF0,
+ .token = false,
+ .size = 8,
+ },
+ [DPSECI_GET_TX_QUEUE] = {
+ .cmdid_value = 0x1970,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 14,
+ },
+ [DPMAC_GET_COUNTER] = {
+ .cmdid_value = 0x0c40,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 9,
+ },
+ [DPMAC_GET_MAC_ADDR] = {
+ .cmdid_value = 0x0c50,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+ [DPNI_SET_PRIM_MAC] = {
+ .cmdid_value = 0x2240,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 16,
+ .flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [DPNI_GET_PRIM_MAC] = {
+ .cmdid_value = 0x2250,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+ [DPNI_GET_STATISTICS] = {
+ .cmdid_value = 0x25D0,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 10,
+ },
+ [DPNI_GET_LINK_STATE] = {
+ .cmdid_value = 0x2150,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+ [DPNI_GET_MAX_FRAME_LENGTH] = {
+ .cmdid_value = 0x2170,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+ [DPSW_GET_TAILDROP] = {
+ .cmdid_value = 0x0A80,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 14,
+ },
+ [DPSW_SET_TAILDROP] = {
+ .cmdid_value = 0x0A90,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 24,
+ .flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [DPSW_IF_GET_COUNTER] = {
+ .cmdid_value = 0x0340,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 11,
+ },
+ [DPSW_IF_GET_MAX_FRAME_LENGTH] = {
+ .cmdid_value = 0x0450,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 10,
+ },
+ [DPDMUX_GET_COUNTER] = {
+ .cmdid_value = 0x0b20,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 11,
+ },
+ [DPDMUX_IF_GET_MAX_FRAME_LENGTH] = {
+ .cmdid_value = 0x0a20,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 10,
+ },
+ [GET_ATTR] = {
+ .cmdid_value = 0x0040,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+ [GET_IRQ_MASK] = {
+ .cmdid_value = 0x0150,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 13,
+ },
+ [GET_IRQ_STATUS] = {
+ .cmdid_value = 0x0160,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 13,
+ },
+ [CLOSE] = {
+ .cmdid_value = 0x8000,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+
+ /* Common commands amongst all types of objects. Must be checked last. */
+ [OPEN] = {
+ .cmdid_value = 0x8000,
+ .cmdid_mask = 0xFC00,
+ .token = false,
+ .size = 12,
+ .flags = FSL_MC_CHECK_MODULE_ID,
+ },
+ [GET_API_VERSION] = {
+ .cmdid_value = 0xA000,
+ .cmdid_mask = 0xFC00,
+ .token = false,
+ .size = 8,
+ .flags = FSL_MC_CHECK_MODULE_ID,
+ },
+ [DESTROY] = {
+ .cmdid_value = 0x9800,
+ .cmdid_mask = 0xFC00,
+ .token = true,
+ .size = 12,
+ .flags = FSL_MC_CHECK_MODULE_ID | FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [CREATE] = {
+ .cmdid_value = 0x9000,
+ .cmdid_mask = 0xFC00,
+ .token = true,
+ .size = 64,
+ .flags = FSL_MC_CHECK_MODULE_ID | FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+};
+
+#define FSL_MC_NUM_ACCEPTED_CMDS ARRAY_SIZE(fsl_mc_accepted_cmds)
+
+#define FSL_MC_MAX_MODULE_ID 0x10
+
+static int fsl_mc_command_check(struct fsl_mc_device *mc_dev,
+ struct fsl_mc_command *mc_cmd)
+{
+ struct fsl_mc_cmd_desc *desc = NULL;
+ int mc_cmd_max_size, i;
+ bool token_provided;
+ u16 cmdid, module_id;
+ char *mc_cmd_end;
+ char sum = 0;
+
+ /* Check if this is an accepted MC command */
+ cmdid = mc_cmd_hdr_read_cmdid(mc_cmd);
+ for (i = 0; i < FSL_MC_NUM_ACCEPTED_CMDS; i++) {
+ desc = &fsl_mc_accepted_cmds[i];
+ if ((cmdid & desc->cmdid_mask) == desc->cmdid_value)
+ break;
+ }
+ if (i == FSL_MC_NUM_ACCEPTED_CMDS) {
+ dev_err(&mc_dev->dev, "MC command 0x%04x: cmdid not accepted\n", cmdid);
+ return -EACCES;
+ }
+
+ /* Check if the size of the command is honored. Anything beyond the
+ * last valid byte of the command should be zeroed.
+ */
+ mc_cmd_max_size = sizeof(*mc_cmd);
+ mc_cmd_end = ((char *)mc_cmd) + desc->size;
+ for (i = desc->size; i < mc_cmd_max_size; i++)
+ sum |= *mc_cmd_end++;
+ if (sum) {
+ dev_err(&mc_dev->dev, "MC command 0x%04x: garbage beyond max size of %d bytes!\n",
+ cmdid, desc->size);
+ return -EACCES;
+ }
+
+ /* Some MC commands request a token to be passed so that object
+ * identification is possible. Check if the token passed in the command
+ * is as expected.
+ */
+ token_provided = mc_cmd_hdr_read_token(mc_cmd) ? true : false;
+ if (token_provided != desc->token) {
+ dev_err(&mc_dev->dev, "MC command 0x%04x: token 0x%04x is invalid!\n",
+ cmdid, mc_cmd_hdr_read_token(mc_cmd));
+ return -EACCES;
+ }
+
+ /* If needed, check if the module ID passed is valid */
+ if (desc->flags & FSL_MC_CHECK_MODULE_ID) {
+ /* The module ID is represented by bits [4:9] from the cmdid */
+ module_id = (cmdid & GENMASK(9, 4)) >> 4;
+ if (module_id == 0 || module_id > FSL_MC_MAX_MODULE_ID) {
+ dev_err(&mc_dev->dev, "MC command 0x%04x: unknown module ID 0x%x\n",
+ cmdid, module_id);
+ return -EACCES;
+ }
+ }
+
+ /* Some commands alter how hardware resources are managed. For these
+ * commands, check for CAP_NET_ADMIN.
+ */
+ if (desc->flags & FSL_MC_CAP_NET_ADMIN_NEEDED) {
+ if (!capable(CAP_NET_ADMIN)) {
+ dev_err(&mc_dev->dev, "MC command 0x%04x: needs CAP_NET_ADMIN!\n",
+ cmdid);
+ return -EPERM;
+ }
+ }
+
+ return 0;
+}
+
+static int fsl_mc_uapi_send_command(struct fsl_mc_device *mc_dev, unsigned long arg,
+ struct fsl_mc_io *mc_io)
+{
+ struct fsl_mc_command mc_cmd;
+ int error;
+
+ error = copy_from_user(&mc_cmd, (void __user *)arg, sizeof(mc_cmd));
+ if (error)
+ return -EFAULT;
+
+ error = fsl_mc_command_check(mc_dev, &mc_cmd);
+ if (error)
+ return error;
+
+ error = mc_send_command(mc_io, &mc_cmd);
+ if (error)
+ return error;
+
+ error = copy_to_user((void __user *)arg, &mc_cmd, sizeof(mc_cmd));
+ if (error)
+ return -EFAULT;
+
+ return 0;
+}
+
+static int fsl_mc_uapi_dev_open(struct inode *inode, struct file *filep)
+{
+ struct fsl_mc_device *root_mc_device;
+ struct uapi_priv_data *priv_data;
+ struct fsl_mc_io *dynamic_mc_io;
+ struct fsl_mc_uapi *mc_uapi;
+ struct fsl_mc_bus *mc_bus;
+ int error;
+
+ priv_data = kzalloc(sizeof(*priv_data), GFP_KERNEL);
+ if (!priv_data)
+ return -ENOMEM;
+
+ mc_uapi = container_of(filep->private_data, struct fsl_mc_uapi, misc);
+ mc_bus = container_of(mc_uapi, struct fsl_mc_bus, uapi_misc);
+ root_mc_device = &mc_bus->mc_dev;
+
+ mutex_lock(&mc_uapi->mutex);
+
+ if (!mc_uapi->local_instance_in_use) {
+ priv_data->mc_io = mc_uapi->static_mc_io;
+ mc_uapi->local_instance_in_use = 1;
+ } else {
+ error = fsl_mc_portal_allocate(root_mc_device, 0,
+ &dynamic_mc_io);
+ if (error) {
+ dev_dbg(&root_mc_device->dev,
+ "Could not allocate MC portal\n");
+ goto error_portal_allocate;
+ }
+
+ priv_data->mc_io = dynamic_mc_io;
+ }
+ priv_data->uapi = mc_uapi;
+ filep->private_data = priv_data;
+
+ mutex_unlock(&mc_uapi->mutex);
+
+ return 0;
+
+error_portal_allocate:
+ mutex_unlock(&mc_uapi->mutex);
+ kfree(priv_data);
+
+ return error;
+}
+
+static int fsl_mc_uapi_dev_release(struct inode *inode, struct file *filep)
+{
+ struct uapi_priv_data *priv_data;
+ struct fsl_mc_uapi *mc_uapi;
+ struct fsl_mc_io *mc_io;
+
+ priv_data = filep->private_data;
+ mc_uapi = priv_data->uapi;
+ mc_io = priv_data->mc_io;
+
+ mutex_lock(&mc_uapi->mutex);
+
+ if (mc_io == mc_uapi->static_mc_io)
+ mc_uapi->local_instance_in_use = 0;
+ else
+ fsl_mc_portal_free(mc_io);
+
+ kfree(filep->private_data);
+ filep->private_data = NULL;
+
+ mutex_unlock(&mc_uapi->mutex);
+
+ return 0;
+}
+
+static long fsl_mc_uapi_dev_ioctl(struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ struct uapi_priv_data *priv_data = file->private_data;
+ struct fsl_mc_device *root_mc_device;
+ struct fsl_mc_bus *mc_bus;
+ int error;
+
+ mc_bus = container_of(priv_data->uapi, struct fsl_mc_bus, uapi_misc);
+ root_mc_device = &mc_bus->mc_dev;
+
+ switch (cmd) {
+ case FSL_MC_SEND_MC_COMMAND:
+ error = fsl_mc_uapi_send_command(root_mc_device, arg, priv_data->mc_io);
+ break;
+ default:
+ dev_dbg(&root_mc_device->dev, "unexpected ioctl call number\n");
+ error = -EINVAL;
+ }
+
+ return error;
+}
+
+static const struct file_operations fsl_mc_uapi_dev_fops = {
+ .owner = THIS_MODULE,
+ .open = fsl_mc_uapi_dev_open,
+ .release = fsl_mc_uapi_dev_release,
+ .unlocked_ioctl = fsl_mc_uapi_dev_ioctl,
+};
+
+int fsl_mc_uapi_create_device_file(struct fsl_mc_bus *mc_bus)
+{
+ struct fsl_mc_device *mc_dev = &mc_bus->mc_dev;
+ struct fsl_mc_uapi *mc_uapi = &mc_bus->uapi_misc;
+ int error;
+
+ mc_uapi->misc.minor = MISC_DYNAMIC_MINOR;
+ mc_uapi->misc.name = dev_name(&mc_dev->dev);
+ mc_uapi->misc.fops = &fsl_mc_uapi_dev_fops;
+
+ error = misc_register(&mc_uapi->misc);
+ if (error)
+ return error;
+
+ mc_uapi->static_mc_io = mc_bus->mc_dev.mc_io;
+
+ mutex_init(&mc_uapi->mutex);
+
+ return 0;
+}
+
+void fsl_mc_uapi_remove_device_file(struct fsl_mc_bus *mc_bus)
+{
+ misc_deregister(&mc_bus->uapi_misc.misc);
+}
diff --git a/drivers/bus/fsl-mc/mc-io.c b/drivers/bus/fsl-mc/mc-io.c
index 6ae48ad80409..95b10a6cf307 100644
--- a/drivers/bus/fsl-mc/mc-io.c
+++ b/drivers/bus/fsl-mc/mc-io.c
@@ -50,12 +50,12 @@ static void fsl_mc_io_unset_dpmcp(struct fsl_mc_io *mc_io)
}
/**
- * Creates an MC I/O object
+ * fsl_create_mc_io() - Creates an MC I/O object
*
* @dev: device to be associated with the MC I/O object
* @mc_portal_phys_addr: physical address of the MC portal to use
* @mc_portal_size: size in bytes of the MC portal
- * @dpmcp-dev: Pointer to the DPMCP object associated with this MC I/O
+ * @dpmcp_dev: Pointer to the DPMCP object associated with this MC I/O
* object or NULL if none.
* @flags: flags for the new MC I/O object
* @new_mc_io: Area to return pointer to newly created MC I/O object
@@ -82,7 +82,7 @@ int __must_check fsl_create_mc_io(struct device *dev,
mc_io->portal_phys_addr = mc_portal_phys_addr;
mc_io->portal_size = mc_portal_size;
if (flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)
- spin_lock_init(&mc_io->spinlock);
+ raw_spin_lock_init(&mc_io->spinlock);
else
mutex_init(&mc_io->mutex);
@@ -123,13 +123,18 @@ error_destroy_mc_io:
}
/**
- * Destroys an MC I/O object
+ * fsl_destroy_mc_io() - Destroys an MC I/O object
*
* @mc_io: MC I/O object to destroy
*/
void fsl_destroy_mc_io(struct fsl_mc_io *mc_io)
{
- struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev;
+ struct fsl_mc_device *dpmcp_dev;
+
+ if (!mc_io)
+ return;
+
+ dpmcp_dev = mc_io->dpmcp_dev;
if (dpmcp_dev)
fsl_mc_io_unset_dpmcp(mc_io);
diff --git a/drivers/bus/fsl-mc/mc-sys.c b/drivers/bus/fsl-mc/mc-sys.c
index 3221a7fbaf0a..f2052cd0a051 100644
--- a/drivers/bus/fsl-mc/mc-sys.c
+++ b/drivers/bus/fsl-mc/mc-sys.c
@@ -16,7 +16,7 @@
#include "fsl-mc-private.h"
-/**
+/*
* Timeout in milliseconds to wait for the completion of an MC command
*/
#define MC_CMD_COMPLETION_TIMEOUT_MS 500
@@ -35,7 +35,7 @@ static enum mc_cmd_status mc_cmd_hdr_read_status(struct fsl_mc_command *cmd)
return (enum mc_cmd_status)hdr->status;
}
-static u16 mc_cmd_hdr_read_cmdid(struct fsl_mc_command *cmd)
+u16 mc_cmd_hdr_read_cmdid(struct fsl_mc_command *cmd)
{
struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
u16 cmd_id = le16_to_cpu(hdr->cmd_id);
@@ -148,9 +148,10 @@ static inline enum mc_cmd_status mc_read_response(struct fsl_mc_command __iomem
}
/**
- * Waits for the completion of an MC command doing preemptible polling.
- * uslepp_range() is called between polling iterations.
- *
+ * mc_polling_wait_preemptible() - Waits for the completion of an MC
+ * command doing preemptible polling.
+ * uslepp_range() is called between
+ * polling iterations.
* @mc_io: MC I/O object to be used
* @cmd: command buffer to receive MC response
* @mc_status: MC command completion status
@@ -194,9 +195,9 @@ static int mc_polling_wait_preemptible(struct fsl_mc_io *mc_io,
}
/**
- * Waits for the completion of an MC command doing atomic polling.
- * udelay() is called between polling iterations.
- *
+ * mc_polling_wait_atomic() - Waits for the completion of an MC command
+ * doing atomic polling. udelay() is called
+ * between polling iterations.
* @mc_io: MC I/O object to be used
* @cmd: command buffer to receive MC response
* @mc_status: MC command completion status
@@ -234,8 +235,8 @@ static int mc_polling_wait_atomic(struct fsl_mc_io *mc_io,
}
/**
- * Sends a command to the MC device using the given MC I/O object
- *
+ * mc_send_command() - Sends a command to the MC device using the given
+ * MC I/O object
* @mc_io: MC I/O object to be used
* @cmd: command to be sent
*
@@ -251,7 +252,7 @@ int mc_send_command(struct fsl_mc_io *mc_io, struct fsl_mc_command *cmd)
return -EINVAL;
if (mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)
- spin_lock_irqsave(&mc_io->spinlock, irq_flags);
+ raw_spin_lock_irqsave(&mc_io->spinlock, irq_flags);
else
mutex_lock(&mc_io->mutex);
@@ -287,7 +288,7 @@ int mc_send_command(struct fsl_mc_io *mc_io, struct fsl_mc_command *cmd)
error = 0;
common_exit:
if (mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)
- spin_unlock_irqrestore(&mc_io->spinlock, irq_flags);
+ raw_spin_unlock_irqrestore(&mc_io->spinlock, irq_flags);
else
mutex_unlock(&mc_io->mutex);
diff --git a/drivers/bus/fsl-mc/obj-api.c b/drivers/bus/fsl-mc/obj-api.c
new file mode 100644
index 000000000000..06c1dd84e38d
--- /dev/null
+++ b/drivers/bus/fsl-mc/obj-api.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2021 NXP
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/fsl/mc.h>
+
+#include "fsl-mc-private.h"
+
+static int fsl_mc_get_open_cmd_id(const char *type)
+{
+ static const struct {
+ int cmd_id;
+ const char *type;
+ } dev_ids[] = {
+ { DPRTC_CMDID_OPEN, "dprtc" },
+ { DPRC_CMDID_OPEN, "dprc" },
+ { DPNI_CMDID_OPEN, "dpni" },
+ { DPIO_CMDID_OPEN, "dpio" },
+ { DPSW_CMDID_OPEN, "dpsw" },
+ { DPBP_CMDID_OPEN, "dpbp" },
+ { DPCON_CMDID_OPEN, "dpcon" },
+ { DPMCP_CMDID_OPEN, "dpmcp" },
+ { DPMAC_CMDID_OPEN, "dpmac" },
+ { DPSECI_CMDID_OPEN, "dpseci" },
+ { DPDMUX_CMDID_OPEN, "dpdmux" },
+ { DPDCEI_CMDID_OPEN, "dpdcei" },
+ { DPAIOP_CMDID_OPEN, "dpaiop" },
+ { DPCI_CMDID_OPEN, "dpci" },
+ { DPDMAI_CMDID_OPEN, "dpdmai" },
+ { DPDBG_CMDID_OPEN, "dpdbg" },
+ { 0, NULL }
+ };
+ int i;
+
+ for (i = 0; dev_ids[i].type; i++)
+ if (!strcmp(dev_ids[i].type, type))
+ return dev_ids[i].cmd_id;
+
+ return -1;
+}
+
+int fsl_mc_obj_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int obj_id,
+ char *obj_type,
+ u16 *token)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct fsl_mc_obj_cmd_open *cmd_params;
+ int err = 0;
+ int cmd_id = fsl_mc_get_open_cmd_id(obj_type);
+
+ if (cmd_id == -1)
+ return -ENODEV;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(cmd_id, cmd_flags, 0);
+ cmd_params = (struct fsl_mc_obj_cmd_open *)cmd.params;
+ cmd_params->obj_id = cpu_to_le32(obj_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_obj_open);
+
+int fsl_mc_obj_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(OBJ_CMDID_CLOSE, cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(fsl_mc_obj_close);
+
+int fsl_mc_obj_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(OBJ_CMDID_RESET, cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(fsl_mc_obj_reset);
diff --git a/drivers/bus/hisi_lpc.c b/drivers/bus/hisi_lpc.c
index 8101df901830..5b65a48f17e7 100644
--- a/drivers/bus/hisi_lpc.c
+++ b/drivers/bus/hisi_lpc.c
@@ -85,7 +85,7 @@ static int wait_lpc_idle(void __iomem *mbase, unsigned int waitcnt)
ndelay(LPC_NSEC_PERWAIT);
} while (--waitcnt);
- return -ETIME;
+ return -ETIMEDOUT;
}
/*
@@ -347,7 +347,7 @@ static int hisi_lpc_acpi_xlat_io_res(struct acpi_device *adev,
unsigned long sys_port;
resource_size_t len = resource_size(res);
- sys_port = logic_pio_trans_hwaddr(&host->fwnode, res->start, len);
+ sys_port = logic_pio_trans_hwaddr(acpi_fwnode_handle(host), res->start, len);
if (sys_port == ~0UL)
return -EFAULT;
@@ -358,8 +358,28 @@ static int hisi_lpc_acpi_xlat_io_res(struct acpi_device *adev,
}
/*
+ * Released firmware describes the IO port max address as 0x3fff, which is
+ * the max host bus address. Fixup to a proper range. This will probably
+ * never be fixed in firmware.
+ */
+static void hisi_lpc_acpi_fixup_child_resource(struct device *hostdev,
+ struct resource *r)
+{
+ if (r->end != 0x3fff)
+ return;
+
+ if (r->start == 0xe4)
+ r->end = 0xe4 + 0x04 - 1;
+ else if (r->start == 0x2f8)
+ r->end = 0x2f8 + 0x08 - 1;
+ else
+ dev_warn(hostdev, "unrecognised resource %pR to fixup, ignoring\n",
+ r);
+}
+
+/*
* hisi_lpc_acpi_set_io_res - set the resources for a child
- * @child: the device node to be updated the I/O resource
+ * @adev: ACPI companion of the device node to be updated the I/O resource
* @hostdev: the device node associated with host controller
* @res: double pointer to be set to the address of translated resources
* @num_res: pointer to variable to hold the number of translated resources
@@ -370,31 +390,24 @@ static int hisi_lpc_acpi_xlat_io_res(struct acpi_device *adev,
* host-relative address resource. This function will return the translated
* logical PIO addresses for each child devices resources.
*/
-static int hisi_lpc_acpi_set_io_res(struct device *child,
+static int hisi_lpc_acpi_set_io_res(struct acpi_device *adev,
struct device *hostdev,
const struct resource **res, int *num_res)
{
- struct acpi_device *adev;
- struct acpi_device *host;
+ struct acpi_device *host = to_acpi_device(adev->dev.parent);
struct resource_entry *rentry;
LIST_HEAD(resource_list);
struct resource *resources;
int count;
int i;
- if (!child || !hostdev)
- return -EINVAL;
-
- host = to_acpi_device(hostdev);
- adev = to_acpi_device(child);
-
if (!adev->status.present) {
- dev_dbg(child, "device is not present\n");
+ dev_dbg(&adev->dev, "device is not present\n");
return -EIO;
}
if (acpi_device_enumerated(adev)) {
- dev_dbg(child, "has been enumerated\n");
+ dev_dbg(&adev->dev, "has been enumerated\n");
return -EIO;
}
@@ -405,7 +418,7 @@ static int hisi_lpc_acpi_set_io_res(struct device *child,
*/
count = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
if (count <= 0) {
- dev_dbg(child, "failed to get resources\n");
+ dev_dbg(&adev->dev, "failed to get resources\n");
return count ? count : -EIO;
}
@@ -418,8 +431,11 @@ static int hisi_lpc_acpi_set_io_res(struct device *child,
return -ENOMEM;
}
count = 0;
- list_for_each_entry(rentry, &resource_list, node)
- resources[count++] = *rentry->res;
+ list_for_each_entry(rentry, &resource_list, node) {
+ resources[count] = *rentry->res;
+ hisi_lpc_acpi_fixup_child_resource(hostdev, &resources[count]);
+ count++;
+ }
acpi_dev_free_resource_list(&resource_list);
@@ -431,7 +447,7 @@ static int hisi_lpc_acpi_set_io_res(struct device *child,
continue;
ret = hisi_lpc_acpi_xlat_io_res(adev, host, &resources[i]);
if (ret) {
- dev_err(child, "translate IO range %pR failed (%d)\n",
+ dev_err(&adev->dev, "translate IO range %pR failed (%d)\n",
&resources[i], ret);
return ret;
}
@@ -448,22 +464,103 @@ static int hisi_lpc_acpi_remove_subdev(struct device *dev, void *unused)
return 0;
}
+static int hisi_lpc_acpi_clear_enumerated(struct acpi_device *adev, void *not_used)
+{
+ acpi_device_clear_enumerated(adev);
+ return 0;
+}
+
struct hisi_lpc_acpi_cell {
const char *hid;
- const char *name;
- void *pdata;
- size_t pdata_size;
+ const struct platform_device_info *pdevinfo;
};
static void hisi_lpc_acpi_remove(struct device *hostdev)
{
- struct acpi_device *adev = ACPI_COMPANION(hostdev);
- struct acpi_device *child;
-
device_for_each_child(hostdev, NULL, hisi_lpc_acpi_remove_subdev);
+ acpi_dev_for_each_child(ACPI_COMPANION(hostdev),
+ hisi_lpc_acpi_clear_enumerated, NULL);
+}
+
+static int hisi_lpc_acpi_add_child(struct acpi_device *child, void *data)
+{
+ const char *hid = acpi_device_hid(child);
+ struct device *hostdev = data;
+ const struct hisi_lpc_acpi_cell *cell;
+ struct platform_device *pdev;
+ const struct resource *res;
+ bool found = false;
+ int num_res;
+ int ret;
+
+ ret = hisi_lpc_acpi_set_io_res(child, hostdev, &res, &num_res);
+ if (ret) {
+ dev_warn(hostdev, "set resource fail (%d)\n", ret);
+ return ret;
+ }
+
+ cell = (struct hisi_lpc_acpi_cell []){
+ /* ipmi */
+ {
+ .hid = "IPI0001",
+ .pdevinfo = (struct platform_device_info []) {
+ {
+ .parent = hostdev,
+ .fwnode = acpi_fwnode_handle(child),
+ .name = "hisi-lpc-ipmi",
+ .id = PLATFORM_DEVID_AUTO,
+ .res = res,
+ .num_res = num_res,
+ },
+ },
+ },
+ /* 8250-compatible uart */
+ {
+ .hid = "HISI1031",
+ .pdevinfo = (struct platform_device_info []) {
+ {
+ .parent = hostdev,
+ .fwnode = acpi_fwnode_handle(child),
+ .name = "serial8250",
+ .id = PLATFORM_DEVID_AUTO,
+ .res = res,
+ .num_res = num_res,
+ .data = (struct plat_serial8250_port []) {
+ {
+ .iobase = res->start,
+ .uartclk = 1843200,
+ .iotype = UPIO_PORT,
+ .flags = UPF_BOOT_AUTOCONF,
+ },
+ {}
+ },
+ .size_data = 2 * sizeof(struct plat_serial8250_port),
+ },
+ },
+ },
+ {}
+ };
+
+ for (; cell && cell->hid; cell++) {
+ if (!strcmp(cell->hid, hid)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ dev_warn(hostdev,
+ "could not find cell for child device (%s), discarding\n",
+ hid);
+ return 0;
+ }
- list_for_each_entry(child, &adev->children, node)
- acpi_device_clear_enumerated(child);
+ pdev = platform_device_register_full(cell->pdevinfo);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
+ acpi_device_set_enumerated(child);
+ return 0;
}
/*
@@ -478,101 +575,16 @@ static void hisi_lpc_acpi_remove(struct device *hostdev)
*/
static int hisi_lpc_acpi_probe(struct device *hostdev)
{
- struct acpi_device *adev = ACPI_COMPANION(hostdev);
- struct acpi_device *child;
int ret;
/* Only consider the children of the host */
- list_for_each_entry(child, &adev->children, node) {
- const char *hid = acpi_device_hid(child);
- const struct hisi_lpc_acpi_cell *cell;
- struct platform_device *pdev;
- const struct resource *res;
- bool found = false;
- int num_res;
-
- ret = hisi_lpc_acpi_set_io_res(&child->dev, &adev->dev, &res,
- &num_res);
- if (ret) {
- dev_warn(hostdev, "set resource fail (%d)\n", ret);
- goto fail;
- }
-
- cell = (struct hisi_lpc_acpi_cell []){
- /* ipmi */
- {
- .hid = "IPI0001",
- .name = "hisi-lpc-ipmi",
- },
- /* 8250-compatible uart */
- {
- .hid = "HISI1031",
- .name = "serial8250",
- .pdata = (struct plat_serial8250_port []) {
- {
- .iobase = res->start,
- .uartclk = 1843200,
- .iotype = UPIO_PORT,
- .flags = UPF_BOOT_AUTOCONF,
- },
- {}
- },
- .pdata_size = 2 *
- sizeof(struct plat_serial8250_port),
- },
- {}
- };
-
- for (; cell && cell->name; cell++) {
- if (!strcmp(cell->hid, hid)) {
- found = true;
- break;
- }
- }
-
- if (!found) {
- dev_warn(hostdev,
- "could not find cell for child device (%s), discarding\n",
- hid);
- continue;
- }
-
- pdev = platform_device_alloc(cell->name, PLATFORM_DEVID_AUTO);
- if (!pdev) {
- ret = -ENOMEM;
- goto fail;
- }
-
- pdev->dev.parent = hostdev;
- ACPI_COMPANION_SET(&pdev->dev, child);
-
- ret = platform_device_add_resources(pdev, res, num_res);
- if (ret)
- goto fail;
-
- ret = platform_device_add_data(pdev, cell->pdata,
- cell->pdata_size);
- if (ret)
- goto fail;
-
- ret = platform_device_add(pdev);
- if (ret)
- goto fail;
-
- acpi_device_set_enumerated(child);
- }
-
- return 0;
+ ret = acpi_dev_for_each_child(ACPI_COMPANION(hostdev),
+ hisi_lpc_acpi_add_child, hostdev);
+ if (ret)
+ hisi_lpc_acpi_remove(hostdev);
-fail:
- hisi_lpc_acpi_remove(hostdev);
return ret;
}
-
-static const struct acpi_device_id hisi_lpc_acpi_match[] = {
- {"HISI0191"},
- {}
-};
#else
static int hisi_lpc_acpi_probe(struct device *dev)
{
@@ -594,11 +606,9 @@ static void hisi_lpc_acpi_remove(struct device *hostdev)
static int hisi_lpc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct acpi_device *acpi_device = ACPI_COMPANION(dev);
struct logic_pio_hwaddr *range;
struct hisi_lpc_dev *lpcdev;
resource_size_t io_end;
- struct resource *res;
int ret;
lpcdev = devm_kzalloc(dev, sizeof(*lpcdev), GFP_KERNEL);
@@ -607,8 +617,7 @@ static int hisi_lpc_probe(struct platform_device *pdev)
spin_lock_init(&lpcdev->cycle_lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- lpcdev->membase = devm_ioremap_resource(dev, res);
+ lpcdev->membase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(lpcdev->membase))
return PTR_ERR(lpcdev->membase);
@@ -616,7 +625,7 @@ static int hisi_lpc_probe(struct platform_device *pdev)
if (!range)
return -ENOMEM;
- range->fwnode = dev->fwnode;
+ range->fwnode = dev_fwnode(dev);
range->flags = LOGIC_PIO_INDIRECT;
range->size = PIO_INDIRECT_SIZE;
range->hostdata = lpcdev;
@@ -630,7 +639,7 @@ static int hisi_lpc_probe(struct platform_device *pdev)
}
/* register the LPC host PIO resources */
- if (acpi_device)
+ if (is_acpi_device_node(range->fwnode))
ret = hisi_lpc_acpi_probe(dev);
else
ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
@@ -651,11 +660,10 @@ static int hisi_lpc_probe(struct platform_device *pdev)
static int hisi_lpc_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct acpi_device *acpi_device = ACPI_COMPANION(dev);
struct hisi_lpc_dev *lpcdev = dev_get_drvdata(dev);
struct logic_pio_hwaddr *range = lpcdev->io_host;
- if (acpi_device)
+ if (is_acpi_device_node(range->fwnode))
hisi_lpc_acpi_remove(dev);
else
of_platform_depopulate(dev);
@@ -671,11 +679,16 @@ static const struct of_device_id hisi_lpc_of_match[] = {
{}
};
+static const struct acpi_device_id hisi_lpc_acpi_match[] = {
+ {"HISI0191"},
+ {}
+};
+
static struct platform_driver hisi_lpc_driver = {
.driver = {
.name = DRV_NAME,
.of_match_table = hisi_lpc_of_match,
- .acpi_match_table = ACPI_PTR(hisi_lpc_acpi_match),
+ .acpi_match_table = hisi_lpc_acpi_match,
},
.probe = hisi_lpc_probe,
.remove = hisi_lpc_remove,
diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
index 28bb65a5613f..828c66bbaa67 100644
--- a/drivers/bus/imx-weim.c
+++ b/drivers/bus/imx-weim.c
@@ -21,6 +21,7 @@ struct imx_weim_devtype {
unsigned int cs_stride;
unsigned int wcr_offset;
unsigned int wcr_bcm;
+ unsigned int wcr_cont_bclk;
};
static const struct imx_weim_devtype imx1_weim_devtype = {
@@ -41,6 +42,7 @@ static const struct imx_weim_devtype imx50_weim_devtype = {
.cs_stride = 0x18,
.wcr_offset = 0x90,
.wcr_bcm = BIT(0),
+ .wcr_cont_bclk = BIT(3),
};
static const struct imx_weim_devtype imx51_weim_devtype = {
@@ -62,6 +64,11 @@ struct cs_timing_state {
struct cs_timing cs[MAX_CS_COUNT];
};
+struct weim_priv {
+ void __iomem *base;
+ struct cs_timing_state timing_state;
+};
+
static const struct of_device_id weim_id_table[] = {
/* i.MX1/21 */
{ .compatible = "fsl,imx1-weim", .data = &imx1_weim_devtype, },
@@ -126,21 +133,26 @@ err:
}
/* Parse and set the timing for this device. */
-static int weim_timing_setup(struct device *dev,
- struct device_node *np, void __iomem *base,
- const struct imx_weim_devtype *devtype,
- struct cs_timing_state *ts)
+static int weim_timing_setup(struct device *dev, struct device_node *np,
+ const struct imx_weim_devtype *devtype)
{
u32 cs_idx, value[MAX_CS_REGS_COUNT];
int i, ret;
int reg_idx, num_regs;
struct cs_timing *cst;
+ struct weim_priv *priv;
+ struct cs_timing_state *ts;
+ void __iomem *base;
if (WARN_ON(devtype->cs_regs_count > MAX_CS_REGS_COUNT))
return -EINVAL;
if (WARN_ON(devtype->cs_count > MAX_CS_COUNT))
return -EINVAL;
+ priv = dev_get_drvdata(dev);
+ base = priv->base;
+ ts = &priv->timing_state;
+
ret = of_property_read_u32_array(np, "fsl,weim-cs-timing",
value, devtype->cs_regs_count);
if (ret)
@@ -187,14 +199,15 @@ static int weim_timing_setup(struct device *dev,
return 0;
}
-static int weim_parse_dt(struct platform_device *pdev, void __iomem *base)
+static int weim_parse_dt(struct platform_device *pdev)
{
const struct of_device_id *of_id = of_match_device(weim_id_table,
&pdev->dev);
const struct imx_weim_devtype *devtype = of_id->data;
struct device_node *child;
int ret, have_child = 0;
- struct cs_timing_state ts = {};
+ struct weim_priv *priv;
+ void __iomem *base;
u32 reg;
if (devtype == &imx50_weim_devtype) {
@@ -203,11 +216,26 @@ static int weim_parse_dt(struct platform_device *pdev, void __iomem *base)
return ret;
}
+ priv = dev_get_drvdata(&pdev->dev);
+ base = priv->base;
+
if (of_property_read_bool(pdev->dev.of_node, "fsl,burst-clk-enable")) {
if (devtype->wcr_bcm) {
reg = readl(base + devtype->wcr_offset);
- writel(reg | devtype->wcr_bcm,
- base + devtype->wcr_offset);
+ reg |= devtype->wcr_bcm;
+
+ if (of_property_read_bool(pdev->dev.of_node,
+ "fsl,continuous-burst-clk")) {
+ if (devtype->wcr_cont_bclk) {
+ reg |= devtype->wcr_cont_bclk;
+ } else {
+ dev_err(&pdev->dev,
+ "continuous burst clk not supported.\n");
+ return -EINVAL;
+ }
+ }
+
+ writel(reg, base + devtype->wcr_offset);
} else {
dev_err(&pdev->dev, "burst clk mode not supported.\n");
return -EINVAL;
@@ -215,7 +243,7 @@ static int weim_parse_dt(struct platform_device *pdev, void __iomem *base)
}
for_each_available_child_of_node(pdev->dev.of_node, child) {
- ret = weim_timing_setup(&pdev->dev, child, base, devtype, &ts);
+ ret = weim_timing_setup(&pdev->dev, child, devtype);
if (ret)
dev_warn(&pdev->dev, "%pOF set timing failed.\n",
child);
@@ -234,17 +262,25 @@ static int weim_parse_dt(struct platform_device *pdev, void __iomem *base)
static int weim_probe(struct platform_device *pdev)
{
+ struct weim_priv *priv;
struct resource *res;
struct clk *clk;
void __iomem *base;
int ret;
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
/* get the resource */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
+ priv->base = base;
+ dev_set_drvdata(&pdev->dev, priv);
+
/* get the clock */
clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(clk))
@@ -255,7 +291,7 @@ static int weim_probe(struct platform_device *pdev)
return ret;
/* parse the device node */
- ret = weim_parse_dt(pdev, base);
+ ret = weim_parse_dt(pdev);
if (ret)
clk_disable_unprepare(clk);
else
@@ -264,6 +300,80 @@ static int weim_probe(struct platform_device *pdev)
return ret;
}
+#if IS_ENABLED(CONFIG_OF_DYNAMIC)
+static int of_weim_notify(struct notifier_block *nb, unsigned long action,
+ void *arg)
+{
+ const struct imx_weim_devtype *devtype;
+ struct of_reconfig_data *rd = arg;
+ const struct of_device_id *of_id;
+ struct platform_device *pdev;
+ int ret = NOTIFY_OK;
+
+ switch (of_reconfig_get_state_change(action, rd)) {
+ case OF_RECONFIG_CHANGE_ADD:
+ of_id = of_match_node(weim_id_table, rd->dn->parent);
+ if (!of_id)
+ return NOTIFY_OK; /* not for us */
+
+ devtype = of_id->data;
+
+ pdev = of_find_device_by_node(rd->dn->parent);
+ if (!pdev) {
+ pr_err("%s: could not find platform device for '%pOF'\n",
+ __func__, rd->dn->parent);
+
+ return notifier_from_errno(-EINVAL);
+ }
+
+ if (weim_timing_setup(&pdev->dev, rd->dn, devtype))
+ dev_warn(&pdev->dev,
+ "Failed to setup timing for '%pOF'\n", rd->dn);
+
+ if (!of_node_check_flag(rd->dn, OF_POPULATED)) {
+ if (!of_platform_device_create(rd->dn, NULL, &pdev->dev)) {
+ dev_err(&pdev->dev,
+ "Failed to create child device '%pOF'\n",
+ rd->dn);
+ ret = notifier_from_errno(-EINVAL);
+ }
+ }
+
+ platform_device_put(pdev);
+
+ break;
+ case OF_RECONFIG_CHANGE_REMOVE:
+ if (!of_node_check_flag(rd->dn, OF_POPULATED))
+ return NOTIFY_OK; /* device already destroyed */
+
+ of_id = of_match_node(weim_id_table, rd->dn->parent);
+ if (!of_id)
+ return NOTIFY_OK; /* not for us */
+
+ pdev = of_find_device_by_node(rd->dn);
+ if (!pdev) {
+ pr_err("Could not find platform device for '%pOF'\n",
+ rd->dn);
+
+ ret = notifier_from_errno(-EINVAL);
+ } else {
+ of_platform_device_destroy(&pdev->dev, NULL);
+ platform_device_put(pdev);
+ }
+
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static struct notifier_block weim_of_notifier = {
+ .notifier_call = of_weim_notify,
+};
+#endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
+
static struct platform_driver weim_driver = {
.driver = {
.name = "imx-weim",
@@ -271,7 +381,27 @@ static struct platform_driver weim_driver = {
},
.probe = weim_probe,
};
-module_platform_driver(weim_driver);
+
+static int __init weim_init(void)
+{
+#if IS_ENABLED(CONFIG_OF_DYNAMIC)
+ WARN_ON(of_reconfig_notifier_register(&weim_of_notifier));
+#endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
+
+ return platform_driver_register(&weim_driver);
+}
+module_init(weim_init);
+
+static void __exit weim_exit(void)
+{
+#if IS_ENABLED(CONFIG_OF_DYNAMIC)
+ of_reconfig_notifier_unregister(&weim_of_notifier);
+#endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
+
+ return platform_driver_unregister(&weim_driver);
+
+}
+module_exit(weim_exit);
MODULE_AUTHOR("Freescale Semiconductor Inc.");
MODULE_DESCRIPTION("i.MX EIM Controller Driver");
diff --git a/drivers/bus/intel-ixp4xx-eb.c b/drivers/bus/intel-ixp4xx-eb.c
new file mode 100644
index 000000000000..a4388440aca7
--- /dev/null
+++ b/drivers/bus/intel-ixp4xx-eb.c
@@ -0,0 +1,429 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel IXP4xx Expansion Bus Controller
+ * Copyright (C) 2021 Linaro Ltd.
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/log2.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define IXP4XX_EXP_NUM_CS 8
+
+#define IXP4XX_EXP_TIMING_CS0 0x00
+#define IXP4XX_EXP_TIMING_CS1 0x04
+#define IXP4XX_EXP_TIMING_CS2 0x08
+#define IXP4XX_EXP_TIMING_CS3 0x0c
+#define IXP4XX_EXP_TIMING_CS4 0x10
+#define IXP4XX_EXP_TIMING_CS5 0x14
+#define IXP4XX_EXP_TIMING_CS6 0x18
+#define IXP4XX_EXP_TIMING_CS7 0x1c
+
+/* Bits inside each CS timing register */
+#define IXP4XX_EXP_TIMING_STRIDE 0x04
+#define IXP4XX_EXP_CS_EN BIT(31)
+#define IXP456_EXP_PAR_EN BIT(30) /* Only on IXP45x and IXP46x */
+#define IXP4XX_EXP_T1_MASK GENMASK(28, 27)
+#define IXP4XX_EXP_T1_SHIFT 28
+#define IXP4XX_EXP_T2_MASK GENMASK(27, 26)
+#define IXP4XX_EXP_T2_SHIFT 26
+#define IXP4XX_EXP_T3_MASK GENMASK(25, 22)
+#define IXP4XX_EXP_T3_SHIFT 22
+#define IXP4XX_EXP_T4_MASK GENMASK(21, 20)
+#define IXP4XX_EXP_T4_SHIFT 20
+#define IXP4XX_EXP_T5_MASK GENMASK(19, 16)
+#define IXP4XX_EXP_T5_SHIFT 16
+#define IXP4XX_EXP_CYC_TYPE_MASK GENMASK(15, 14)
+#define IXP4XX_EXP_CYC_TYPE_SHIFT 14
+#define IXP4XX_EXP_SIZE_MASK GENMASK(13, 10)
+#define IXP4XX_EXP_SIZE_SHIFT 10
+#define IXP4XX_EXP_CNFG_0 BIT(9) /* Always zero */
+#define IXP43X_EXP_SYNC_INTEL BIT(8) /* Only on IXP43x */
+#define IXP43X_EXP_EXP_CHIP BIT(7) /* Only on IXP43x */
+#define IXP4XX_EXP_BYTE_RD16 BIT(6)
+#define IXP4XX_EXP_HRDY_POL BIT(5) /* Only on IXP42x */
+#define IXP4XX_EXP_MUX_EN BIT(4)
+#define IXP4XX_EXP_SPLT_EN BIT(3)
+#define IXP4XX_EXP_WORD BIT(2) /* Always zero */
+#define IXP4XX_EXP_WR_EN BIT(1)
+#define IXP4XX_EXP_BYTE_EN BIT(0)
+#define IXP42X_RESERVED (BIT(30)|IXP4XX_EXP_CNFG_0|BIT(8)|BIT(7)|IXP4XX_EXP_WORD)
+#define IXP43X_RESERVED (BIT(30)|IXP4XX_EXP_CNFG_0|BIT(5)|IXP4XX_EXP_WORD)
+
+#define IXP4XX_EXP_CNFG0 0x20
+#define IXP4XX_EXP_CNFG0_MEM_MAP BIT(31)
+#define IXP4XX_EXP_CNFG1 0x24
+
+#define IXP4XX_EXP_BOOT_BASE 0x00000000
+#define IXP4XX_EXP_NORMAL_BASE 0x50000000
+#define IXP4XX_EXP_STRIDE 0x01000000
+
+/* Fuses on the IXP43x */
+#define IXP43X_EXP_UNIT_FUSE_RESET 0x28
+#define IXP43x_EXP_FUSE_SPEED_MASK GENMASK(23, 22)
+
+/* Number of device tree values in "reg" */
+#define IXP4XX_OF_REG_SIZE 3
+
+struct ixp4xx_eb {
+ struct device *dev;
+ struct regmap *rmap;
+ u32 bus_base;
+ bool is_42x;
+ bool is_43x;
+};
+
+struct ixp4xx_exp_tim_prop {
+ const char *prop;
+ u32 max;
+ u32 mask;
+ u16 shift;
+};
+
+static const struct ixp4xx_exp_tim_prop ixp4xx_exp_tim_props[] = {
+ {
+ .prop = "intel,ixp4xx-eb-t1",
+ .max = 3,
+ .mask = IXP4XX_EXP_T1_MASK,
+ .shift = IXP4XX_EXP_T1_SHIFT,
+ },
+ {
+ .prop = "intel,ixp4xx-eb-t2",
+ .max = 3,
+ .mask = IXP4XX_EXP_T2_MASK,
+ .shift = IXP4XX_EXP_T2_SHIFT,
+ },
+ {
+ .prop = "intel,ixp4xx-eb-t3",
+ .max = 15,
+ .mask = IXP4XX_EXP_T3_MASK,
+ .shift = IXP4XX_EXP_T3_SHIFT,
+ },
+ {
+ .prop = "intel,ixp4xx-eb-t4",
+ .max = 3,
+ .mask = IXP4XX_EXP_T4_MASK,
+ .shift = IXP4XX_EXP_T4_SHIFT,
+ },
+ {
+ .prop = "intel,ixp4xx-eb-t5",
+ .max = 15,
+ .mask = IXP4XX_EXP_T5_MASK,
+ .shift = IXP4XX_EXP_T5_SHIFT,
+ },
+ {
+ .prop = "intel,ixp4xx-eb-byte-access-on-halfword",
+ .max = 1,
+ .mask = IXP4XX_EXP_BYTE_RD16,
+ },
+ {
+ .prop = "intel,ixp4xx-eb-hpi-hrdy-pol-high",
+ .max = 1,
+ .mask = IXP4XX_EXP_HRDY_POL,
+ },
+ {
+ .prop = "intel,ixp4xx-eb-mux-address-and-data",
+ .max = 1,
+ .mask = IXP4XX_EXP_MUX_EN,
+ },
+ {
+ .prop = "intel,ixp4xx-eb-ahb-split-transfers",
+ .max = 1,
+ .mask = IXP4XX_EXP_SPLT_EN,
+ },
+ {
+ .prop = "intel,ixp4xx-eb-write-enable",
+ .max = 1,
+ .mask = IXP4XX_EXP_WR_EN,
+ },
+ {
+ .prop = "intel,ixp4xx-eb-byte-access",
+ .max = 1,
+ .mask = IXP4XX_EXP_BYTE_EN,
+ },
+};
+
+static void ixp4xx_exp_setup_chipselect(struct ixp4xx_eb *eb,
+ struct device_node *np,
+ u32 cs_index,
+ u32 cs_size)
+{
+ u32 cs_cfg;
+ u32 val;
+ u32 cur_cssize;
+ u32 cs_order;
+ int ret;
+ int i;
+
+ if (eb->is_42x && (cs_index > 7)) {
+ dev_err(eb->dev,
+ "invalid chipselect %u, we only support 0-7\n",
+ cs_index);
+ return;
+ }
+ if (eb->is_43x && (cs_index > 3)) {
+ dev_err(eb->dev,
+ "invalid chipselect %u, we only support 0-3\n",
+ cs_index);
+ return;
+ }
+
+ /* Several chip selects can be joined into one device */
+ if (cs_size > IXP4XX_EXP_STRIDE)
+ cur_cssize = IXP4XX_EXP_STRIDE;
+ else
+ cur_cssize = cs_size;
+
+
+ /*
+ * The following will read/modify/write the configuration for one
+ * chipselect, attempting to leave the boot defaults in place unless
+ * something is explicitly defined.
+ */
+ regmap_read(eb->rmap, IXP4XX_EXP_TIMING_CS0 +
+ IXP4XX_EXP_TIMING_STRIDE * cs_index, &cs_cfg);
+ dev_info(eb->dev, "CS%d at %#08x, size %#08x, config before: %#08x\n",
+ cs_index, eb->bus_base + IXP4XX_EXP_STRIDE * cs_index,
+ cur_cssize, cs_cfg);
+
+ /* Size set-up first align to 2^9 .. 2^24 */
+ cur_cssize = roundup_pow_of_two(cur_cssize);
+ if (cur_cssize < 512)
+ cur_cssize = 512;
+ cs_order = ilog2(cur_cssize);
+ if (cs_order < 9 || cs_order > 24) {
+ dev_err(eb->dev, "illegal size order %d\n", cs_order);
+ return;
+ }
+ dev_dbg(eb->dev, "CS%d size order: %d\n", cs_index, cs_order);
+ cs_cfg &= ~(IXP4XX_EXP_SIZE_MASK);
+ cs_cfg |= ((cs_order - 9) << IXP4XX_EXP_SIZE_SHIFT);
+
+ for (i = 0; i < ARRAY_SIZE(ixp4xx_exp_tim_props); i++) {
+ const struct ixp4xx_exp_tim_prop *ip = &ixp4xx_exp_tim_props[i];
+
+ /* All are regular u32 values */
+ ret = of_property_read_u32(np, ip->prop, &val);
+ if (ret)
+ continue;
+
+ /* Handle bools (single bits) first */
+ if (ip->max == 1) {
+ if (val)
+ cs_cfg |= ip->mask;
+ else
+ cs_cfg &= ~ip->mask;
+ dev_info(eb->dev, "CS%d %s %s\n", cs_index,
+ val ? "enabled" : "disabled",
+ ip->prop);
+ continue;
+ }
+
+ if (val > ip->max) {
+ dev_err(eb->dev,
+ "CS%d too high value for %s: %u, capped at %u\n",
+ cs_index, ip->prop, val, ip->max);
+ val = ip->max;
+ }
+ /* This assumes max value fills all the assigned bits (and it does) */
+ cs_cfg &= ~ip->mask;
+ cs_cfg |= (val << ip->shift);
+ dev_info(eb->dev, "CS%d set %s to %u\n", cs_index, ip->prop, val);
+ }
+
+ ret = of_property_read_u32(np, "intel,ixp4xx-eb-cycle-type", &val);
+ if (!ret) {
+ if (val > 3) {
+ dev_err(eb->dev, "illegal cycle type %d\n", val);
+ return;
+ }
+ dev_info(eb->dev, "CS%d set cycle type %d\n", cs_index, val);
+ cs_cfg &= ~IXP4XX_EXP_CYC_TYPE_MASK;
+ cs_cfg |= val << IXP4XX_EXP_CYC_TYPE_SHIFT;
+ }
+
+ if (eb->is_42x)
+ cs_cfg &= ~IXP42X_RESERVED;
+ if (eb->is_43x) {
+ cs_cfg &= ~IXP43X_RESERVED;
+ /*
+ * This bit for Intel strata flash is currently unused, but let's
+ * report it if we find one.
+ */
+ if (cs_cfg & IXP43X_EXP_SYNC_INTEL)
+ dev_info(eb->dev, "claims to be Intel strata flash\n");
+ }
+ cs_cfg |= IXP4XX_EXP_CS_EN;
+
+ regmap_write(eb->rmap,
+ IXP4XX_EXP_TIMING_CS0 + IXP4XX_EXP_TIMING_STRIDE * cs_index,
+ cs_cfg);
+ dev_info(eb->dev, "CS%d wrote %#08x into CS config\n", cs_index, cs_cfg);
+
+ /*
+ * If several chip selects are joined together into one big
+ * device area, we call ourselves recursively for each successive
+ * chip select. For a 32MB flash chip this results in two calls
+ * for example.
+ */
+ if (cs_size > IXP4XX_EXP_STRIDE)
+ ixp4xx_exp_setup_chipselect(eb, np,
+ cs_index + 1,
+ cs_size - IXP4XX_EXP_STRIDE);
+}
+
+static void ixp4xx_exp_setup_child(struct ixp4xx_eb *eb,
+ struct device_node *np)
+{
+ u32 cs_sizes[IXP4XX_EXP_NUM_CS];
+ int num_regs;
+ u32 csindex;
+ u32 cssize;
+ int ret;
+ int i;
+
+ num_regs = of_property_count_elems_of_size(np, "reg", IXP4XX_OF_REG_SIZE);
+ if (num_regs <= 0)
+ return;
+ dev_dbg(eb->dev, "child %s has %d register sets\n",
+ of_node_full_name(np), num_regs);
+
+ for (csindex = 0; csindex < IXP4XX_EXP_NUM_CS; csindex++)
+ cs_sizes[csindex] = 0;
+
+ for (i = 0; i < num_regs; i++) {
+ u32 rbase, rsize;
+
+ ret = of_property_read_u32_index(np, "reg",
+ i * IXP4XX_OF_REG_SIZE, &csindex);
+ if (ret)
+ break;
+ ret = of_property_read_u32_index(np, "reg",
+ i * IXP4XX_OF_REG_SIZE + 1, &rbase);
+ if (ret)
+ break;
+ ret = of_property_read_u32_index(np, "reg",
+ i * IXP4XX_OF_REG_SIZE + 2, &rsize);
+ if (ret)
+ break;
+
+ if (csindex >= IXP4XX_EXP_NUM_CS) {
+ dev_err(eb->dev, "illegal CS %d\n", csindex);
+ continue;
+ }
+ /*
+ * The memory window always starts from CS base so we need to add
+ * the start and size to get to the size from the start of the CS
+ * base. For example if CS0 is at 0x50000000 and the reg is
+ * <0 0xe40000 0x40000> the size is e80000.
+ *
+ * Roof this if we have several regs setting the same CS.
+ */
+ cssize = rbase + rsize;
+ dev_dbg(eb->dev, "CS%d size %#08x\n", csindex, cssize);
+ if (cs_sizes[csindex] < cssize)
+ cs_sizes[csindex] = cssize;
+ }
+
+ for (csindex = 0; csindex < IXP4XX_EXP_NUM_CS; csindex++) {
+ cssize = cs_sizes[csindex];
+ if (!cssize)
+ continue;
+ /* Just this one, so set it up and return */
+ ixp4xx_exp_setup_chipselect(eb, np, csindex, cssize);
+ }
+}
+
+static int ixp4xx_exp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct ixp4xx_eb *eb;
+ struct device_node *child;
+ bool have_children = false;
+ u32 val;
+ int ret;
+
+ eb = devm_kzalloc(dev, sizeof(*eb), GFP_KERNEL);
+ if (!eb)
+ return -ENOMEM;
+
+ eb->dev = dev;
+ eb->is_42x = of_device_is_compatible(np, "intel,ixp42x-expansion-bus-controller");
+ eb->is_43x = of_device_is_compatible(np, "intel,ixp43x-expansion-bus-controller");
+
+ eb->rmap = syscon_node_to_regmap(np);
+ if (IS_ERR(eb->rmap))
+ return dev_err_probe(dev, PTR_ERR(eb->rmap), "no regmap\n");
+
+ /* We check that the regmap work only on first read */
+ ret = regmap_read(eb->rmap, IXP4XX_EXP_CNFG0, &val);
+ if (ret)
+ return dev_err_probe(dev, ret, "cannot read regmap\n");
+ if (val & IXP4XX_EXP_CNFG0_MEM_MAP)
+ eb->bus_base = IXP4XX_EXP_BOOT_BASE;
+ else
+ eb->bus_base = IXP4XX_EXP_NORMAL_BASE;
+ dev_info(dev, "expansion bus at %08x\n", eb->bus_base);
+
+ if (eb->is_43x) {
+ /* Check some fuses */
+ regmap_read(eb->rmap, IXP43X_EXP_UNIT_FUSE_RESET, &val);
+ switch (FIELD_GET(IXP43x_EXP_FUSE_SPEED_MASK, val)) {
+ case 0:
+ dev_info(dev, "IXP43x at 533 MHz\n");
+ break;
+ case 1:
+ dev_info(dev, "IXP43x at 400 MHz\n");
+ break;
+ case 2:
+ dev_info(dev, "IXP43x at 667 MHz\n");
+ break;
+ default:
+ dev_info(dev, "IXP43x unknown speed\n");
+ break;
+ }
+ }
+
+ /* Walk over the child nodes and see what chipselects we use */
+ for_each_available_child_of_node(np, child) {
+ ixp4xx_exp_setup_child(eb, child);
+ /* We have at least one child */
+ have_children = true;
+ }
+
+ if (have_children)
+ return of_platform_default_populate(np, NULL, dev);
+
+ return 0;
+}
+
+static const struct of_device_id ixp4xx_exp_of_match[] = {
+ { .compatible = "intel,ixp42x-expansion-bus-controller", },
+ { .compatible = "intel,ixp43x-expansion-bus-controller", },
+ { .compatible = "intel,ixp45x-expansion-bus-controller", },
+ { .compatible = "intel,ixp46x-expansion-bus-controller", },
+ { }
+};
+
+static struct platform_driver ixp4xx_exp_driver = {
+ .probe = ixp4xx_exp_probe,
+ .driver = {
+ .name = "intel-extbus",
+ .of_match_table = ixp4xx_exp_of_match,
+ },
+};
+module_platform_driver(ixp4xx_exp_driver);
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("Intel IXP4xx external bus driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/bus/mhi/Kconfig b/drivers/bus/mhi/Kconfig
new file mode 100644
index 000000000000..b39a11e6c624
--- /dev/null
+++ b/drivers/bus/mhi/Kconfig
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# MHI bus
+#
+# Copyright (c) 2021, Linaro Ltd.
+#
+
+source "drivers/bus/mhi/host/Kconfig"
+source "drivers/bus/mhi/ep/Kconfig"
diff --git a/drivers/bus/mhi/Makefile b/drivers/bus/mhi/Makefile
new file mode 100644
index 000000000000..46981331b38f
--- /dev/null
+++ b/drivers/bus/mhi/Makefile
@@ -0,0 +1,5 @@
+# Host MHI stack
+obj-y += host/
+
+# Endpoint MHI stack
+obj-y += ep/
diff --git a/drivers/bus/mhi/common.h b/drivers/bus/mhi/common.h
new file mode 100644
index 000000000000..f794b9c8049e
--- /dev/null
+++ b/drivers/bus/mhi/common.h
@@ -0,0 +1,326 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022, Linaro Ltd.
+ *
+ */
+
+#ifndef _MHI_COMMON_H
+#define _MHI_COMMON_H
+
+#include <linux/bitfield.h>
+#include <linux/mhi.h>
+
+/* MHI registers */
+#define MHIREGLEN 0x00
+#define MHIVER 0x08
+#define MHICFG 0x10
+#define CHDBOFF 0x18
+#define ERDBOFF 0x20
+#define BHIOFF 0x28
+#define BHIEOFF 0x2c
+#define DEBUGOFF 0x30
+#define MHICTRL 0x38
+#define MHISTATUS 0x48
+#define CCABAP_LOWER 0x58
+#define CCABAP_HIGHER 0x5c
+#define ECABAP_LOWER 0x60
+#define ECABAP_HIGHER 0x64
+#define CRCBAP_LOWER 0x68
+#define CRCBAP_HIGHER 0x6c
+#define CRDB_LOWER 0x70
+#define CRDB_HIGHER 0x74
+#define MHICTRLBASE_LOWER 0x80
+#define MHICTRLBASE_HIGHER 0x84
+#define MHICTRLLIMIT_LOWER 0x88
+#define MHICTRLLIMIT_HIGHER 0x8c
+#define MHIDATABASE_LOWER 0x98
+#define MHIDATABASE_HIGHER 0x9c
+#define MHIDATALIMIT_LOWER 0xa0
+#define MHIDATALIMIT_HIGHER 0xa4
+
+/* MHI BHI registers */
+#define BHI_BHIVERSION_MINOR 0x00
+#define BHI_BHIVERSION_MAJOR 0x04
+#define BHI_IMGADDR_LOW 0x08
+#define BHI_IMGADDR_HIGH 0x0c
+#define BHI_IMGSIZE 0x10
+#define BHI_RSVD1 0x14
+#define BHI_IMGTXDB 0x18
+#define BHI_RSVD2 0x1c
+#define BHI_INTVEC 0x20
+#define BHI_RSVD3 0x24
+#define BHI_EXECENV 0x28
+#define BHI_STATUS 0x2c
+#define BHI_ERRCODE 0x30
+#define BHI_ERRDBG1 0x34
+#define BHI_ERRDBG2 0x38
+#define BHI_ERRDBG3 0x3c
+#define BHI_SERIALNU 0x40
+#define BHI_SBLANTIROLLVER 0x44
+#define BHI_NUMSEG 0x48
+#define BHI_MSMHWID(n) (0x4c + (0x4 * (n)))
+#define BHI_OEMPKHASH(n) (0x64 + (0x4 * (n)))
+#define BHI_RSVD5 0xc4
+
+/* BHI register bits */
+#define BHI_TXDB_SEQNUM_BMSK GENMASK(29, 0)
+#define BHI_TXDB_SEQNUM_SHFT 0
+#define BHI_STATUS_MASK GENMASK(31, 30)
+#define BHI_STATUS_ERROR 0x03
+#define BHI_STATUS_SUCCESS 0x02
+#define BHI_STATUS_RESET 0x00
+
+/* MHI BHIE registers */
+#define BHIE_MSMSOCID_OFFS 0x00
+#define BHIE_TXVECADDR_LOW_OFFS 0x2c
+#define BHIE_TXVECADDR_HIGH_OFFS 0x30
+#define BHIE_TXVECSIZE_OFFS 0x34
+#define BHIE_TXVECDB_OFFS 0x3c
+#define BHIE_TXVECSTATUS_OFFS 0x44
+#define BHIE_RXVECADDR_LOW_OFFS 0x60
+#define BHIE_RXVECADDR_HIGH_OFFS 0x64
+#define BHIE_RXVECSIZE_OFFS 0x68
+#define BHIE_RXVECDB_OFFS 0x70
+#define BHIE_RXVECSTATUS_OFFS 0x78
+
+/* BHIE register bits */
+#define BHIE_TXVECDB_SEQNUM_BMSK GENMASK(29, 0)
+#define BHIE_TXVECDB_SEQNUM_SHFT 0
+#define BHIE_TXVECSTATUS_SEQNUM_BMSK GENMASK(29, 0)
+#define BHIE_TXVECSTATUS_SEQNUM_SHFT 0
+#define BHIE_TXVECSTATUS_STATUS_BMSK GENMASK(31, 30)
+#define BHIE_TXVECSTATUS_STATUS_SHFT 30
+#define BHIE_TXVECSTATUS_STATUS_RESET 0x00
+#define BHIE_TXVECSTATUS_STATUS_XFER_COMPL 0x02
+#define BHIE_TXVECSTATUS_STATUS_ERROR 0x03
+#define BHIE_RXVECDB_SEQNUM_BMSK GENMASK(29, 0)
+#define BHIE_RXVECDB_SEQNUM_SHFT 0
+#define BHIE_RXVECSTATUS_SEQNUM_BMSK GENMASK(29, 0)
+#define BHIE_RXVECSTATUS_SEQNUM_SHFT 0
+#define BHIE_RXVECSTATUS_STATUS_BMSK GENMASK(31, 30)
+#define BHIE_RXVECSTATUS_STATUS_SHFT 30
+#define BHIE_RXVECSTATUS_STATUS_RESET 0x00
+#define BHIE_RXVECSTATUS_STATUS_XFER_COMPL 0x02
+#define BHIE_RXVECSTATUS_STATUS_ERROR 0x03
+
+/* MHI register bits */
+#define MHICFG_NHWER_MASK GENMASK(31, 24)
+#define MHICFG_NER_MASK GENMASK(23, 16)
+#define MHICFG_NHWCH_MASK GENMASK(15, 8)
+#define MHICFG_NCH_MASK GENMASK(7, 0)
+#define MHICTRL_MHISTATE_MASK GENMASK(15, 8)
+#define MHICTRL_RESET_MASK BIT(1)
+#define MHISTATUS_MHISTATE_MASK GENMASK(15, 8)
+#define MHISTATUS_SYSERR_MASK BIT(2)
+#define MHISTATUS_READY_MASK BIT(0)
+
+/* Command Ring Element macros */
+/* No operation command */
+#define MHI_TRE_CMD_NOOP_PTR 0
+#define MHI_TRE_CMD_NOOP_DWORD0 0
+#define MHI_TRE_CMD_NOOP_DWORD1 cpu_to_le32(FIELD_PREP(GENMASK(23, 16), MHI_CMD_NOP))
+
+/* Channel reset command */
+#define MHI_TRE_CMD_RESET_PTR 0
+#define MHI_TRE_CMD_RESET_DWORD0 0
+#define MHI_TRE_CMD_RESET_DWORD1(chid) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), chid) | \
+ FIELD_PREP(GENMASK(23, 16), \
+ MHI_CMD_RESET_CHAN))
+
+/* Channel stop command */
+#define MHI_TRE_CMD_STOP_PTR 0
+#define MHI_TRE_CMD_STOP_DWORD0 0
+#define MHI_TRE_CMD_STOP_DWORD1(chid) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), chid) | \
+ FIELD_PREP(GENMASK(23, 16), \
+ MHI_CMD_STOP_CHAN))
+
+/* Channel start command */
+#define MHI_TRE_CMD_START_PTR 0
+#define MHI_TRE_CMD_START_DWORD0 0
+#define MHI_TRE_CMD_START_DWORD1(chid) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), chid) | \
+ FIELD_PREP(GENMASK(23, 16), \
+ MHI_CMD_START_CHAN))
+
+#define MHI_TRE_GET_DWORD(tre, word) le32_to_cpu((tre)->dword[(word)])
+#define MHI_TRE_GET_CMD_CHID(tre) FIELD_GET(GENMASK(31, 24), MHI_TRE_GET_DWORD(tre, 1))
+#define MHI_TRE_GET_CMD_TYPE(tre) FIELD_GET(GENMASK(23, 16), MHI_TRE_GET_DWORD(tre, 1))
+
+/* Event descriptor macros */
+#define MHI_TRE_EV_PTR(ptr) cpu_to_le64(ptr)
+#define MHI_TRE_EV_DWORD0(code, len) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), code) | \
+ FIELD_PREP(GENMASK(15, 0), len))
+#define MHI_TRE_EV_DWORD1(chid, type) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), chid) | \
+ FIELD_PREP(GENMASK(23, 16), type))
+#define MHI_TRE_GET_EV_PTR(tre) le64_to_cpu((tre)->ptr)
+#define MHI_TRE_GET_EV_CODE(tre) FIELD_GET(GENMASK(31, 24), (MHI_TRE_GET_DWORD(tre, 0)))
+#define MHI_TRE_GET_EV_LEN(tre) FIELD_GET(GENMASK(15, 0), (MHI_TRE_GET_DWORD(tre, 0)))
+#define MHI_TRE_GET_EV_CHID(tre) FIELD_GET(GENMASK(31, 24), (MHI_TRE_GET_DWORD(tre, 1)))
+#define MHI_TRE_GET_EV_TYPE(tre) FIELD_GET(GENMASK(23, 16), (MHI_TRE_GET_DWORD(tre, 1)))
+#define MHI_TRE_GET_EV_STATE(tre) FIELD_GET(GENMASK(31, 24), (MHI_TRE_GET_DWORD(tre, 0)))
+#define MHI_TRE_GET_EV_EXECENV(tre) FIELD_GET(GENMASK(31, 24), (MHI_TRE_GET_DWORD(tre, 0)))
+#define MHI_TRE_GET_EV_SEQ(tre) MHI_TRE_GET_DWORD(tre, 0)
+#define MHI_TRE_GET_EV_TIME(tre) MHI_TRE_GET_EV_PTR(tre)
+#define MHI_TRE_GET_EV_COOKIE(tre) lower_32_bits(MHI_TRE_GET_EV_PTR(tre))
+#define MHI_TRE_GET_EV_VEID(tre) FIELD_GET(GENMASK(23, 16), (MHI_TRE_GET_DWORD(tre, 0)))
+#define MHI_TRE_GET_EV_LINKSPEED(tre) FIELD_GET(GENMASK(31, 24), (MHI_TRE_GET_DWORD(tre, 1)))
+#define MHI_TRE_GET_EV_LINKWIDTH(tre) FIELD_GET(GENMASK(7, 0), (MHI_TRE_GET_DWORD(tre, 0)))
+
+/* State change event */
+#define MHI_SC_EV_PTR 0
+#define MHI_SC_EV_DWORD0(state) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), state))
+#define MHI_SC_EV_DWORD1(type) cpu_to_le32(FIELD_PREP(GENMASK(23, 16), type))
+
+/* EE event */
+#define MHI_EE_EV_PTR 0
+#define MHI_EE_EV_DWORD0(ee) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), ee))
+#define MHI_EE_EV_DWORD1(type) cpu_to_le32(FIELD_PREP(GENMASK(23, 16), type))
+
+
+/* Command Completion event */
+#define MHI_CC_EV_PTR(ptr) cpu_to_le64(ptr)
+#define MHI_CC_EV_DWORD0(code) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), code))
+#define MHI_CC_EV_DWORD1(type) cpu_to_le32(FIELD_PREP(GENMASK(23, 16), type))
+
+/* Transfer descriptor macros */
+#define MHI_TRE_DATA_PTR(ptr) cpu_to_le64(ptr)
+#define MHI_TRE_DATA_DWORD0(len) cpu_to_le32(FIELD_PREP(GENMASK(15, 0), len))
+#define MHI_TRE_TYPE_TRANSFER 2
+#define MHI_TRE_DATA_DWORD1(bei, ieot, ieob, chain) cpu_to_le32(FIELD_PREP(GENMASK(23, 16), \
+ MHI_TRE_TYPE_TRANSFER) | \
+ FIELD_PREP(BIT(10), bei) | \
+ FIELD_PREP(BIT(9), ieot) | \
+ FIELD_PREP(BIT(8), ieob) | \
+ FIELD_PREP(BIT(0), chain))
+#define MHI_TRE_DATA_GET_PTR(tre) le64_to_cpu((tre)->ptr)
+#define MHI_TRE_DATA_GET_LEN(tre) FIELD_GET(GENMASK(15, 0), MHI_TRE_GET_DWORD(tre, 0))
+#define MHI_TRE_DATA_GET_CHAIN(tre) (!!(FIELD_GET(BIT(0), MHI_TRE_GET_DWORD(tre, 1))))
+#define MHI_TRE_DATA_GET_IEOB(tre) (!!(FIELD_GET(BIT(8), MHI_TRE_GET_DWORD(tre, 1))))
+#define MHI_TRE_DATA_GET_IEOT(tre) (!!(FIELD_GET(BIT(9), MHI_TRE_GET_DWORD(tre, 1))))
+#define MHI_TRE_DATA_GET_BEI(tre) (!!(FIELD_GET(BIT(10), MHI_TRE_GET_DWORD(tre, 1))))
+
+/* RSC transfer descriptor macros */
+#define MHI_RSCTRE_DATA_PTR(ptr, len) cpu_to_le64(FIELD_PREP(GENMASK(64, 48), len) | ptr)
+#define MHI_RSCTRE_DATA_DWORD0(cookie) cpu_to_le32(cookie)
+#define MHI_RSCTRE_DATA_DWORD1 cpu_to_le32(FIELD_PREP(GENMASK(23, 16), \
+ MHI_PKT_TYPE_COALESCING))
+
+enum mhi_pkt_type {
+ MHI_PKT_TYPE_INVALID = 0x0,
+ MHI_PKT_TYPE_NOOP_CMD = 0x1,
+ MHI_PKT_TYPE_TRANSFER = 0x2,
+ MHI_PKT_TYPE_COALESCING = 0x8,
+ MHI_PKT_TYPE_RESET_CHAN_CMD = 0x10,
+ MHI_PKT_TYPE_STOP_CHAN_CMD = 0x11,
+ MHI_PKT_TYPE_START_CHAN_CMD = 0x12,
+ MHI_PKT_TYPE_STATE_CHANGE_EVENT = 0x20,
+ MHI_PKT_TYPE_CMD_COMPLETION_EVENT = 0x21,
+ MHI_PKT_TYPE_TX_EVENT = 0x22,
+ MHI_PKT_TYPE_RSC_TX_EVENT = 0x28,
+ MHI_PKT_TYPE_EE_EVENT = 0x40,
+ MHI_PKT_TYPE_TSYNC_EVENT = 0x48,
+ MHI_PKT_TYPE_BW_REQ_EVENT = 0x50,
+ MHI_PKT_TYPE_STALE_EVENT, /* internal event */
+};
+
+/* MHI transfer completion events */
+enum mhi_ev_ccs {
+ MHI_EV_CC_INVALID = 0x0,
+ MHI_EV_CC_SUCCESS = 0x1,
+ MHI_EV_CC_EOT = 0x2, /* End of transfer event */
+ MHI_EV_CC_OVERFLOW = 0x3,
+ MHI_EV_CC_EOB = 0x4, /* End of block event */
+ MHI_EV_CC_OOB = 0x5, /* Out of block event */
+ MHI_EV_CC_DB_MODE = 0x6,
+ MHI_EV_CC_UNDEFINED_ERR = 0x10,
+ MHI_EV_CC_BAD_TRE = 0x11,
+};
+
+/* Channel state */
+enum mhi_ch_state {
+ MHI_CH_STATE_DISABLED,
+ MHI_CH_STATE_ENABLED,
+ MHI_CH_STATE_RUNNING,
+ MHI_CH_STATE_SUSPENDED,
+ MHI_CH_STATE_STOP,
+ MHI_CH_STATE_ERROR,
+};
+
+enum mhi_cmd_type {
+ MHI_CMD_NOP = 1,
+ MHI_CMD_RESET_CHAN = 16,
+ MHI_CMD_STOP_CHAN = 17,
+ MHI_CMD_START_CHAN = 18,
+};
+
+#define EV_CTX_RESERVED_MASK GENMASK(7, 0)
+#define EV_CTX_INTMODC_MASK GENMASK(15, 8)
+#define EV_CTX_INTMODT_MASK GENMASK(31, 16)
+struct mhi_event_ctxt {
+ __le32 intmod;
+ __le32 ertype;
+ __le32 msivec;
+
+ __le64 rbase __packed __aligned(4);
+ __le64 rlen __packed __aligned(4);
+ __le64 rp __packed __aligned(4);
+ __le64 wp __packed __aligned(4);
+};
+
+#define CHAN_CTX_CHSTATE_MASK GENMASK(7, 0)
+#define CHAN_CTX_BRSTMODE_MASK GENMASK(9, 8)
+#define CHAN_CTX_POLLCFG_MASK GENMASK(15, 10)
+#define CHAN_CTX_RESERVED_MASK GENMASK(31, 16)
+struct mhi_chan_ctxt {
+ __le32 chcfg;
+ __le32 chtype;
+ __le32 erindex;
+
+ __le64 rbase __packed __aligned(4);
+ __le64 rlen __packed __aligned(4);
+ __le64 rp __packed __aligned(4);
+ __le64 wp __packed __aligned(4);
+};
+
+struct mhi_cmd_ctxt {
+ __le32 reserved0;
+ __le32 reserved1;
+ __le32 reserved2;
+
+ __le64 rbase __packed __aligned(4);
+ __le64 rlen __packed __aligned(4);
+ __le64 rp __packed __aligned(4);
+ __le64 wp __packed __aligned(4);
+};
+
+struct mhi_ring_element {
+ __le64 ptr;
+ __le32 dword[2];
+};
+
+static inline const char *mhi_state_str(enum mhi_state state)
+{
+ switch (state) {
+ case MHI_STATE_RESET:
+ return "RESET";
+ case MHI_STATE_READY:
+ return "READY";
+ case MHI_STATE_M0:
+ return "M0";
+ case MHI_STATE_M1:
+ return "M1";
+ case MHI_STATE_M2:
+ return "M2";
+ case MHI_STATE_M3:
+ return "M3";
+ case MHI_STATE_M3_FAST:
+ return "M3 FAST";
+ case MHI_STATE_BHI:
+ return "BHI";
+ case MHI_STATE_SYS_ERR:
+ return "SYS ERROR";
+ default:
+ return "Unknown state";
+ }
+};
+
+#endif /* _MHI_COMMON_H */
diff --git a/drivers/bus/mhi/ep/Kconfig b/drivers/bus/mhi/ep/Kconfig
new file mode 100644
index 000000000000..90ab3b040672
--- /dev/null
+++ b/drivers/bus/mhi/ep/Kconfig
@@ -0,0 +1,10 @@
+config MHI_BUS_EP
+ tristate "Modem Host Interface (MHI) bus Endpoint implementation"
+ help
+ Bus driver for MHI protocol. Modem Host Interface (MHI) is a
+ communication protocol used by a host processor to control
+ and communicate a modem device over a high speed peripheral
+ bus or shared memory.
+
+ MHI_BUS_EP implements the MHI protocol for the endpoint devices,
+ such as SDX55 modem connected to the host machine over PCIe.
diff --git a/drivers/bus/mhi/ep/Makefile b/drivers/bus/mhi/ep/Makefile
new file mode 100644
index 000000000000..aad85f180b70
--- /dev/null
+++ b/drivers/bus/mhi/ep/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_MHI_BUS_EP) += mhi_ep.o
+mhi_ep-y := main.o mmio.o ring.o sm.o
diff --git a/drivers/bus/mhi/ep/internal.h b/drivers/bus/mhi/ep/internal.h
new file mode 100644
index 000000000000..a2125fa5fe2f
--- /dev/null
+++ b/drivers/bus/mhi/ep/internal.h
@@ -0,0 +1,218 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022, Linaro Ltd.
+ *
+ */
+
+#ifndef _MHI_EP_INTERNAL_
+#define _MHI_EP_INTERNAL_
+
+#include <linux/bitfield.h>
+
+#include "../common.h"
+
+extern struct bus_type mhi_ep_bus_type;
+
+#define MHI_REG_OFFSET 0x100
+#define BHI_REG_OFFSET 0x200
+
+/* MHI registers */
+#define EP_MHIREGLEN (MHI_REG_OFFSET + MHIREGLEN)
+#define EP_MHIVER (MHI_REG_OFFSET + MHIVER)
+#define EP_MHICFG (MHI_REG_OFFSET + MHICFG)
+#define EP_CHDBOFF (MHI_REG_OFFSET + CHDBOFF)
+#define EP_ERDBOFF (MHI_REG_OFFSET + ERDBOFF)
+#define EP_BHIOFF (MHI_REG_OFFSET + BHIOFF)
+#define EP_BHIEOFF (MHI_REG_OFFSET + BHIEOFF)
+#define EP_DEBUGOFF (MHI_REG_OFFSET + DEBUGOFF)
+#define EP_MHICTRL (MHI_REG_OFFSET + MHICTRL)
+#define EP_MHISTATUS (MHI_REG_OFFSET + MHISTATUS)
+#define EP_CCABAP_LOWER (MHI_REG_OFFSET + CCABAP_LOWER)
+#define EP_CCABAP_HIGHER (MHI_REG_OFFSET + CCABAP_HIGHER)
+#define EP_ECABAP_LOWER (MHI_REG_OFFSET + ECABAP_LOWER)
+#define EP_ECABAP_HIGHER (MHI_REG_OFFSET + ECABAP_HIGHER)
+#define EP_CRCBAP_LOWER (MHI_REG_OFFSET + CRCBAP_LOWER)
+#define EP_CRCBAP_HIGHER (MHI_REG_OFFSET + CRCBAP_HIGHER)
+#define EP_CRDB_LOWER (MHI_REG_OFFSET + CRDB_LOWER)
+#define EP_CRDB_HIGHER (MHI_REG_OFFSET + CRDB_HIGHER)
+#define EP_MHICTRLBASE_LOWER (MHI_REG_OFFSET + MHICTRLBASE_LOWER)
+#define EP_MHICTRLBASE_HIGHER (MHI_REG_OFFSET + MHICTRLBASE_HIGHER)
+#define EP_MHICTRLLIMIT_LOWER (MHI_REG_OFFSET + MHICTRLLIMIT_LOWER)
+#define EP_MHICTRLLIMIT_HIGHER (MHI_REG_OFFSET + MHICTRLLIMIT_HIGHER)
+#define EP_MHIDATABASE_LOWER (MHI_REG_OFFSET + MHIDATABASE_LOWER)
+#define EP_MHIDATABASE_HIGHER (MHI_REG_OFFSET + MHIDATABASE_HIGHER)
+#define EP_MHIDATALIMIT_LOWER (MHI_REG_OFFSET + MHIDATALIMIT_LOWER)
+#define EP_MHIDATALIMIT_HIGHER (MHI_REG_OFFSET + MHIDATALIMIT_HIGHER)
+
+/* MHI BHI registers */
+#define EP_BHI_INTVEC (BHI_REG_OFFSET + BHI_INTVEC)
+#define EP_BHI_EXECENV (BHI_REG_OFFSET + BHI_EXECENV)
+
+/* MHI Doorbell registers */
+#define CHDB_LOWER_n(n) (0x400 + 0x8 * (n))
+#define CHDB_HIGHER_n(n) (0x404 + 0x8 * (n))
+#define ERDB_LOWER_n(n) (0x800 + 0x8 * (n))
+#define ERDB_HIGHER_n(n) (0x804 + 0x8 * (n))
+
+#define MHI_CTRL_INT_STATUS 0x4
+#define MHI_CTRL_INT_STATUS_MSK BIT(0)
+#define MHI_CTRL_INT_STATUS_CRDB_MSK BIT(1)
+#define MHI_CHDB_INT_STATUS_n(n) (0x28 + 0x4 * (n))
+#define MHI_ERDB_INT_STATUS_n(n) (0x38 + 0x4 * (n))
+
+#define MHI_CTRL_INT_CLEAR 0x4c
+#define MHI_CTRL_INT_MMIO_WR_CLEAR BIT(2)
+#define MHI_CTRL_INT_CRDB_CLEAR BIT(1)
+#define MHI_CTRL_INT_CRDB_MHICTRL_CLEAR BIT(0)
+
+#define MHI_CHDB_INT_CLEAR_n(n) (0x70 + 0x4 * (n))
+#define MHI_CHDB_INT_CLEAR_n_CLEAR_ALL GENMASK(31, 0)
+#define MHI_ERDB_INT_CLEAR_n(n) (0x80 + 0x4 * (n))
+#define MHI_ERDB_INT_CLEAR_n_CLEAR_ALL GENMASK(31, 0)
+
+/*
+ * Unlike the usual "masking" convention, writing "1" to a bit in this register
+ * enables the interrupt and writing "0" will disable it..
+ */
+#define MHI_CTRL_INT_MASK 0x94
+#define MHI_CTRL_INT_MASK_MASK GENMASK(1, 0)
+#define MHI_CTRL_MHICTRL_MASK BIT(0)
+#define MHI_CTRL_CRDB_MASK BIT(1)
+
+#define MHI_CHDB_INT_MASK_n(n) (0xb8 + 0x4 * (n))
+#define MHI_CHDB_INT_MASK_n_EN_ALL GENMASK(31, 0)
+#define MHI_ERDB_INT_MASK_n(n) (0xc8 + 0x4 * (n))
+#define MHI_ERDB_INT_MASK_n_EN_ALL GENMASK(31, 0)
+
+#define NR_OF_CMD_RINGS 1
+#define MHI_MASK_ROWS_CH_DB 4
+#define MHI_MASK_ROWS_EV_DB 4
+#define MHI_MASK_CH_LEN 32
+#define MHI_MASK_EV_LEN 32
+
+/* Generic context */
+struct mhi_generic_ctx {
+ __le32 reserved0;
+ __le32 reserved1;
+ __le32 reserved2;
+
+ __le64 rbase __packed __aligned(4);
+ __le64 rlen __packed __aligned(4);
+ __le64 rp __packed __aligned(4);
+ __le64 wp __packed __aligned(4);
+};
+
+enum mhi_ep_ring_type {
+ RING_TYPE_CMD,
+ RING_TYPE_ER,
+ RING_TYPE_CH,
+};
+
+/* Ring element */
+union mhi_ep_ring_ctx {
+ struct mhi_cmd_ctxt cmd;
+ struct mhi_event_ctxt ev;
+ struct mhi_chan_ctxt ch;
+ struct mhi_generic_ctx generic;
+};
+
+struct mhi_ep_ring_item {
+ struct list_head node;
+ struct mhi_ep_ring *ring;
+};
+
+struct mhi_ep_ring {
+ struct mhi_ep_cntrl *mhi_cntrl;
+ union mhi_ep_ring_ctx *ring_ctx;
+ struct mhi_ring_element *ring_cache;
+ enum mhi_ep_ring_type type;
+ u64 rbase;
+ size_t rd_offset;
+ size_t wr_offset;
+ size_t ring_size;
+ u32 db_offset_h;
+ u32 db_offset_l;
+ u32 ch_id;
+ u32 er_index;
+ u32 irq_vector;
+ bool started;
+};
+
+struct mhi_ep_cmd {
+ struct mhi_ep_ring ring;
+};
+
+struct mhi_ep_event {
+ struct mhi_ep_ring ring;
+};
+
+struct mhi_ep_state_transition {
+ struct list_head node;
+ enum mhi_state state;
+};
+
+struct mhi_ep_chan {
+ char *name;
+ struct mhi_ep_device *mhi_dev;
+ struct mhi_ep_ring ring;
+ struct mutex lock;
+ void (*xfer_cb)(struct mhi_ep_device *mhi_dev, struct mhi_result *result);
+ enum mhi_ch_state state;
+ enum dma_data_direction dir;
+ u64 tre_loc;
+ u32 tre_size;
+ u32 tre_bytes_left;
+ u32 chan;
+ bool skip_td;
+};
+
+/* MHI Ring related functions */
+void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 id);
+void mhi_ep_ring_reset(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring);
+int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
+ union mhi_ep_ring_ctx *ctx);
+size_t mhi_ep_ring_addr2offset(struct mhi_ep_ring *ring, u64 ptr);
+int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *element);
+void mhi_ep_ring_inc_index(struct mhi_ep_ring *ring);
+int mhi_ep_update_wr_offset(struct mhi_ep_ring *ring);
+
+/* MMIO related functions */
+u32 mhi_ep_mmio_read(struct mhi_ep_cntrl *mhi_cntrl, u32 offset);
+void mhi_ep_mmio_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 val);
+void mhi_ep_mmio_masked_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 mask, u32 val);
+u32 mhi_ep_mmio_masked_read(struct mhi_ep_cntrl *dev, u32 offset, u32 mask);
+void mhi_ep_mmio_enable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_disable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_enable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_disable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_enable_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id);
+void mhi_ep_mmio_disable_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id);
+void mhi_ep_mmio_enable_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl);
+bool mhi_ep_mmio_read_chdb_status_interrupts(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_mask_interrupts(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_get_chc_base(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_get_erc_base(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_get_crc_base(struct mhi_ep_cntrl *mhi_cntrl);
+u64 mhi_ep_mmio_get_db(struct mhi_ep_ring *ring);
+void mhi_ep_mmio_set_env(struct mhi_ep_cntrl *mhi_cntrl, u32 value);
+void mhi_ep_mmio_clear_reset(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_reset(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_get_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state *state,
+ bool *mhi_reset);
+void mhi_ep_mmio_init(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_update_ner(struct mhi_ep_cntrl *mhi_cntrl);
+
+/* MHI EP core functions */
+int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state state);
+int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_env);
+bool mhi_ep_check_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state cur_mhi_state,
+ enum mhi_state mhi_state);
+int mhi_ep_set_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state mhi_state);
+int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl);
+int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl);
+int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_handle_syserr(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_resume_channels(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_suspend_channels(struct mhi_ep_cntrl *mhi_cntrl);
+
+#endif
diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c
new file mode 100644
index 000000000000..1dc8a3557a46
--- /dev/null
+++ b/drivers/bus/mhi/ep/main.c
@@ -0,0 +1,1598 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MHI Endpoint bus stack
+ *
+ * Copyright (C) 2022 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/dma-direction.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/mhi_ep.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include "internal.h"
+
+#define M0_WAIT_DELAY_MS 100
+#define M0_WAIT_COUNT 100
+
+static DEFINE_IDA(mhi_ep_cntrl_ida);
+
+static int mhi_ep_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id);
+static int mhi_ep_destroy_device(struct device *dev, void *data);
+
+static int mhi_ep_send_event(struct mhi_ep_cntrl *mhi_cntrl, u32 ring_idx,
+ struct mhi_ring_element *el, bool bei)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ union mhi_ep_ring_ctx *ctx;
+ struct mhi_ep_ring *ring;
+ int ret;
+
+ mutex_lock(&mhi_cntrl->event_lock);
+ ring = &mhi_cntrl->mhi_event[ring_idx].ring;
+ ctx = (union mhi_ep_ring_ctx *)&mhi_cntrl->ev_ctx_cache[ring_idx];
+ if (!ring->started) {
+ ret = mhi_ep_ring_start(mhi_cntrl, ring, ctx);
+ if (ret) {
+ dev_err(dev, "Error starting event ring (%u)\n", ring_idx);
+ goto err_unlock;
+ }
+ }
+
+ /* Add element to the event ring */
+ ret = mhi_ep_ring_add_element(ring, el);
+ if (ret) {
+ dev_err(dev, "Error adding element to event ring (%u)\n", ring_idx);
+ goto err_unlock;
+ }
+
+ mutex_unlock(&mhi_cntrl->event_lock);
+
+ /*
+ * Raise IRQ to host only if the BEI flag is not set in TRE. Host might
+ * set this flag for interrupt moderation as per MHI protocol.
+ */
+ if (!bei)
+ mhi_cntrl->raise_irq(mhi_cntrl, ring->irq_vector);
+
+ return 0;
+
+err_unlock:
+ mutex_unlock(&mhi_cntrl->event_lock);
+
+ return ret;
+}
+
+static int mhi_ep_send_completion_event(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
+ struct mhi_ring_element *tre, u32 len, enum mhi_ev_ccs code)
+{
+ struct mhi_ring_element event = {};
+
+ event.ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(*tre));
+ event.dword[0] = MHI_TRE_EV_DWORD0(code, len);
+ event.dword[1] = MHI_TRE_EV_DWORD1(ring->ch_id, MHI_PKT_TYPE_TX_EVENT);
+
+ return mhi_ep_send_event(mhi_cntrl, ring->er_index, &event, MHI_TRE_DATA_GET_BEI(tre));
+}
+
+int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state state)
+{
+ struct mhi_ring_element event = {};
+
+ event.dword[0] = MHI_SC_EV_DWORD0(state);
+ event.dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_STATE_CHANGE_EVENT);
+
+ return mhi_ep_send_event(mhi_cntrl, 0, &event, 0);
+}
+
+int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_env)
+{
+ struct mhi_ring_element event = {};
+
+ event.dword[0] = MHI_EE_EV_DWORD0(exec_env);
+ event.dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_EE_EVENT);
+
+ return mhi_ep_send_event(mhi_cntrl, 0, &event, 0);
+}
+
+static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ev_ccs code)
+{
+ struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring;
+ struct mhi_ring_element event = {};
+
+ event.ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(struct mhi_ring_element));
+ event.dword[0] = MHI_CC_EV_DWORD0(code);
+ event.dword[1] = MHI_CC_EV_DWORD1(MHI_PKT_TYPE_CMD_COMPLETION_EVENT);
+
+ return mhi_ep_send_event(mhi_cntrl, 0, &event, 0);
+}
+
+static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct mhi_result result = {};
+ struct mhi_ep_chan *mhi_chan;
+ struct mhi_ep_ring *ch_ring;
+ u32 tmp, ch_id;
+ int ret;
+
+ ch_id = MHI_TRE_GET_CMD_CHID(el);
+ mhi_chan = &mhi_cntrl->mhi_chan[ch_id];
+ ch_ring = &mhi_cntrl->mhi_chan[ch_id].ring;
+
+ switch (MHI_TRE_GET_CMD_TYPE(el)) {
+ case MHI_PKT_TYPE_START_CHAN_CMD:
+ dev_dbg(dev, "Received START command for channel (%u)\n", ch_id);
+
+ mutex_lock(&mhi_chan->lock);
+ /* Initialize and configure the corresponding channel ring */
+ if (!ch_ring->started) {
+ ret = mhi_ep_ring_start(mhi_cntrl, ch_ring,
+ (union mhi_ep_ring_ctx *)&mhi_cntrl->ch_ctx_cache[ch_id]);
+ if (ret) {
+ dev_err(dev, "Failed to start ring for channel (%u)\n", ch_id);
+ ret = mhi_ep_send_cmd_comp_event(mhi_cntrl,
+ MHI_EV_CC_UNDEFINED_ERR);
+ if (ret)
+ dev_err(dev, "Error sending completion event: %d\n", ret);
+
+ goto err_unlock;
+ }
+ }
+
+ /* Set channel state to RUNNING */
+ mhi_chan->state = MHI_CH_STATE_RUNNING;
+ tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg);
+ tmp &= ~CHAN_CTX_CHSTATE_MASK;
+ tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING);
+ mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp);
+
+ ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS);
+ if (ret) {
+ dev_err(dev, "Error sending command completion event (%u)\n",
+ MHI_EV_CC_SUCCESS);
+ goto err_unlock;
+ }
+
+ mutex_unlock(&mhi_chan->lock);
+
+ /*
+ * Create MHI device only during UL channel start. Since the MHI
+ * channels operate in a pair, we'll associate both UL and DL
+ * channels to the same device.
+ *
+ * We also need to check for mhi_dev != NULL because, the host
+ * will issue START_CHAN command during resume and we don't
+ * destroy the device during suspend.
+ */
+ if (!(ch_id % 2) && !mhi_chan->mhi_dev) {
+ ret = mhi_ep_create_device(mhi_cntrl, ch_id);
+ if (ret) {
+ dev_err(dev, "Error creating device for channel (%u)\n", ch_id);
+ mhi_ep_handle_syserr(mhi_cntrl);
+ return ret;
+ }
+ }
+
+ /* Finally, enable DB for the channel */
+ mhi_ep_mmio_enable_chdb(mhi_cntrl, ch_id);
+
+ break;
+ case MHI_PKT_TYPE_STOP_CHAN_CMD:
+ dev_dbg(dev, "Received STOP command for channel (%u)\n", ch_id);
+ if (!ch_ring->started) {
+ dev_err(dev, "Channel (%u) not opened\n", ch_id);
+ return -ENODEV;
+ }
+
+ mutex_lock(&mhi_chan->lock);
+ /* Disable DB for the channel */
+ mhi_ep_mmio_disable_chdb(mhi_cntrl, ch_id);
+
+ /* Send channel disconnect status to client drivers */
+ result.transaction_status = -ENOTCONN;
+ result.bytes_xferd = 0;
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+
+ /* Set channel state to STOP */
+ mhi_chan->state = MHI_CH_STATE_STOP;
+ tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg);
+ tmp &= ~CHAN_CTX_CHSTATE_MASK;
+ tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_STOP);
+ mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp);
+
+ ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS);
+ if (ret) {
+ dev_err(dev, "Error sending command completion event (%u)\n",
+ MHI_EV_CC_SUCCESS);
+ goto err_unlock;
+ }
+
+ mutex_unlock(&mhi_chan->lock);
+ break;
+ case MHI_PKT_TYPE_RESET_CHAN_CMD:
+ dev_dbg(dev, "Received STOP command for channel (%u)\n", ch_id);
+ if (!ch_ring->started) {
+ dev_err(dev, "Channel (%u) not opened\n", ch_id);
+ return -ENODEV;
+ }
+
+ mutex_lock(&mhi_chan->lock);
+ /* Stop and reset the transfer ring */
+ mhi_ep_ring_reset(mhi_cntrl, ch_ring);
+
+ /* Send channel disconnect status to client driver */
+ result.transaction_status = -ENOTCONN;
+ result.bytes_xferd = 0;
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+
+ /* Set channel state to DISABLED */
+ mhi_chan->state = MHI_CH_STATE_DISABLED;
+ tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg);
+ tmp &= ~CHAN_CTX_CHSTATE_MASK;
+ tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED);
+ mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp);
+
+ ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS);
+ if (ret) {
+ dev_err(dev, "Error sending command completion event (%u)\n",
+ MHI_EV_CC_SUCCESS);
+ goto err_unlock;
+ }
+
+ mutex_unlock(&mhi_chan->lock);
+ break;
+ default:
+ dev_err(dev, "Invalid command received: %lu for channel (%u)\n",
+ MHI_TRE_GET_CMD_TYPE(el), ch_id);
+ return -EINVAL;
+ }
+
+ return 0;
+
+err_unlock:
+ mutex_unlock(&mhi_chan->lock);
+
+ return ret;
+}
+
+bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir)
+{
+ struct mhi_ep_chan *mhi_chan = (dir == DMA_FROM_DEVICE) ? mhi_dev->dl_chan :
+ mhi_dev->ul_chan;
+ struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring;
+
+ return !!(ring->rd_offset == ring->wr_offset);
+}
+EXPORT_SYMBOL_GPL(mhi_ep_queue_is_empty);
+
+static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
+ struct mhi_ep_ring *ring,
+ struct mhi_result *result,
+ u32 len)
+{
+ struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id];
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ size_t tr_len, read_offset, write_offset;
+ struct mhi_ring_element *el;
+ bool tr_done = false;
+ void *write_addr;
+ u64 read_addr;
+ u32 buf_left;
+ int ret;
+
+ buf_left = len;
+
+ do {
+ /* Don't process the transfer ring if the channel is not in RUNNING state */
+ if (mhi_chan->state != MHI_CH_STATE_RUNNING) {
+ dev_err(dev, "Channel not available\n");
+ return -ENODEV;
+ }
+
+ el = &ring->ring_cache[ring->rd_offset];
+
+ /* Check if there is data pending to be read from previous read operation */
+ if (mhi_chan->tre_bytes_left) {
+ dev_dbg(dev, "TRE bytes remaining: %u\n", mhi_chan->tre_bytes_left);
+ tr_len = min(buf_left, mhi_chan->tre_bytes_left);
+ } else {
+ mhi_chan->tre_loc = MHI_TRE_DATA_GET_PTR(el);
+ mhi_chan->tre_size = MHI_TRE_DATA_GET_LEN(el);
+ mhi_chan->tre_bytes_left = mhi_chan->tre_size;
+
+ tr_len = min(buf_left, mhi_chan->tre_size);
+ }
+
+ read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left;
+ write_offset = len - buf_left;
+ read_addr = mhi_chan->tre_loc + read_offset;
+ write_addr = result->buf_addr + write_offset;
+
+ dev_dbg(dev, "Reading %zd bytes from channel (%u)\n", tr_len, ring->ch_id);
+ ret = mhi_cntrl->read_from_host(mhi_cntrl, read_addr, write_addr, tr_len);
+ if (ret < 0) {
+ dev_err(&mhi_chan->mhi_dev->dev, "Error reading from channel\n");
+ return ret;
+ }
+
+ buf_left -= tr_len;
+ mhi_chan->tre_bytes_left -= tr_len;
+
+ /*
+ * Once the TRE (Transfer Ring Element) of a TD (Transfer Descriptor) has been
+ * read completely:
+ *
+ * 1. Send completion event to the host based on the flags set in TRE.
+ * 2. Increment the local read offset of the transfer ring.
+ */
+ if (!mhi_chan->tre_bytes_left) {
+ /*
+ * The host will split the data packet into multiple TREs if it can't fit
+ * the packet in a single TRE. In that case, CHAIN flag will be set by the
+ * host for all TREs except the last one.
+ */
+ if (MHI_TRE_DATA_GET_CHAIN(el)) {
+ /*
+ * IEOB (Interrupt on End of Block) flag will be set by the host if
+ * it expects the completion event for all TREs of a TD.
+ */
+ if (MHI_TRE_DATA_GET_IEOB(el)) {
+ ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
+ MHI_TRE_DATA_GET_LEN(el),
+ MHI_EV_CC_EOB);
+ if (ret < 0) {
+ dev_err(&mhi_chan->mhi_dev->dev,
+ "Error sending transfer compl. event\n");
+ return ret;
+ }
+ }
+ } else {
+ /*
+ * IEOT (Interrupt on End of Transfer) flag will be set by the host
+ * for the last TRE of the TD and expects the completion event for
+ * the same.
+ */
+ if (MHI_TRE_DATA_GET_IEOT(el)) {
+ ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
+ MHI_TRE_DATA_GET_LEN(el),
+ MHI_EV_CC_EOT);
+ if (ret < 0) {
+ dev_err(&mhi_chan->mhi_dev->dev,
+ "Error sending transfer compl. event\n");
+ return ret;
+ }
+ }
+
+ tr_done = true;
+ }
+
+ mhi_ep_ring_inc_index(ring);
+ }
+
+ result->bytes_xferd += tr_len;
+ } while (buf_left && !tr_done);
+
+ return 0;
+}
+
+static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
+ struct mhi_result result = {};
+ u32 len = MHI_EP_DEFAULT_MTU;
+ struct mhi_ep_chan *mhi_chan;
+ int ret;
+
+ mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id];
+
+ /*
+ * Bail out if transfer callback is not registered for the channel.
+ * This is most likely due to the client driver not loaded at this point.
+ */
+ if (!mhi_chan->xfer_cb) {
+ dev_err(&mhi_chan->mhi_dev->dev, "Client driver not available\n");
+ return -ENODEV;
+ }
+
+ if (ring->ch_id % 2) {
+ /* DL channel */
+ result.dir = mhi_chan->dir;
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+ } else {
+ /* UL channel */
+ result.buf_addr = kzalloc(len, GFP_KERNEL);
+ if (!result.buf_addr)
+ return -ENOMEM;
+
+ do {
+ ret = mhi_ep_read_channel(mhi_cntrl, ring, &result, len);
+ if (ret < 0) {
+ dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n");
+ kfree(result.buf_addr);
+ return ret;
+ }
+
+ result.dir = mhi_chan->dir;
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+ result.bytes_xferd = 0;
+ memset(result.buf_addr, 0, len);
+
+ /* Read until the ring becomes empty */
+ } while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE));
+
+ kfree(result.buf_addr);
+ }
+
+ return 0;
+}
+
+/* TODO: Handle partially formed TDs */
+int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_ep_chan *mhi_chan = mhi_dev->dl_chan;
+ struct device *dev = &mhi_chan->mhi_dev->dev;
+ struct mhi_ring_element *el;
+ u32 buf_left, read_offset;
+ struct mhi_ep_ring *ring;
+ enum mhi_ev_ccs code;
+ void *read_addr;
+ u64 write_addr;
+ size_t tr_len;
+ u32 tre_len;
+ int ret;
+
+ buf_left = skb->len;
+ ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring;
+
+ mutex_lock(&mhi_chan->lock);
+
+ do {
+ /* Don't process the transfer ring if the channel is not in RUNNING state */
+ if (mhi_chan->state != MHI_CH_STATE_RUNNING) {
+ dev_err(dev, "Channel not available\n");
+ ret = -ENODEV;
+ goto err_exit;
+ }
+
+ if (mhi_ep_queue_is_empty(mhi_dev, DMA_FROM_DEVICE)) {
+ dev_err(dev, "TRE not available!\n");
+ ret = -ENOSPC;
+ goto err_exit;
+ }
+
+ el = &ring->ring_cache[ring->rd_offset];
+ tre_len = MHI_TRE_DATA_GET_LEN(el);
+
+ tr_len = min(buf_left, tre_len);
+ read_offset = skb->len - buf_left;
+ read_addr = skb->data + read_offset;
+ write_addr = MHI_TRE_DATA_GET_PTR(el);
+
+ dev_dbg(dev, "Writing %zd bytes to channel (%u)\n", tr_len, ring->ch_id);
+ ret = mhi_cntrl->write_to_host(mhi_cntrl, read_addr, write_addr, tr_len);
+ if (ret < 0) {
+ dev_err(dev, "Error writing to the channel\n");
+ goto err_exit;
+ }
+
+ buf_left -= tr_len;
+ /*
+ * For all TREs queued by the host for DL channel, only the EOT flag will be set.
+ * If the packet doesn't fit into a single TRE, send the OVERFLOW event to
+ * the host so that the host can adjust the packet boundary to next TREs. Else send
+ * the EOT event to the host indicating the packet boundary.
+ */
+ if (buf_left)
+ code = MHI_EV_CC_OVERFLOW;
+ else
+ code = MHI_EV_CC_EOT;
+
+ ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, tr_len, code);
+ if (ret) {
+ dev_err(dev, "Error sending transfer completion event\n");
+ goto err_exit;
+ }
+
+ mhi_ep_ring_inc_index(ring);
+ } while (buf_left);
+
+ mutex_unlock(&mhi_chan->lock);
+
+ return 0;
+
+err_exit:
+ mutex_unlock(&mhi_chan->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_ep_queue_skb);
+
+static int mhi_ep_cache_host_cfg(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int ret;
+
+ /* Update the number of event rings (NER) programmed by the host */
+ mhi_ep_mmio_update_ner(mhi_cntrl);
+
+ dev_dbg(dev, "Number of Event rings: %u, HW Event rings: %u\n",
+ mhi_cntrl->event_rings, mhi_cntrl->hw_event_rings);
+
+ ch_ctx_host_size = sizeof(struct mhi_chan_ctxt) * mhi_cntrl->max_chan;
+ ev_ctx_host_size = sizeof(struct mhi_event_ctxt) * mhi_cntrl->event_rings;
+ cmd_ctx_host_size = sizeof(struct mhi_cmd_ctxt) * NR_OF_CMD_RINGS;
+
+ /* Get the channel context base pointer from host */
+ mhi_ep_mmio_get_chc_base(mhi_cntrl);
+
+ /* Allocate and map memory for caching host channel context */
+ ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa,
+ &mhi_cntrl->ch_ctx_cache_phys,
+ (void __iomem **) &mhi_cntrl->ch_ctx_cache,
+ ch_ctx_host_size);
+ if (ret) {
+ dev_err(dev, "Failed to allocate and map ch_ctx_cache\n");
+ return ret;
+ }
+
+ /* Get the event context base pointer from host */
+ mhi_ep_mmio_get_erc_base(mhi_cntrl);
+
+ /* Allocate and map memory for caching host event context */
+ ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa,
+ &mhi_cntrl->ev_ctx_cache_phys,
+ (void __iomem **) &mhi_cntrl->ev_ctx_cache,
+ ev_ctx_host_size);
+ if (ret) {
+ dev_err(dev, "Failed to allocate and map ev_ctx_cache\n");
+ goto err_ch_ctx;
+ }
+
+ /* Get the command context base pointer from host */
+ mhi_ep_mmio_get_crc_base(mhi_cntrl);
+
+ /* Allocate and map memory for caching host command context */
+ ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa,
+ &mhi_cntrl->cmd_ctx_cache_phys,
+ (void __iomem **) &mhi_cntrl->cmd_ctx_cache,
+ cmd_ctx_host_size);
+ if (ret) {
+ dev_err(dev, "Failed to allocate and map cmd_ctx_cache\n");
+ goto err_ev_ctx;
+ }
+
+ /* Initialize command ring */
+ ret = mhi_ep_ring_start(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring,
+ (union mhi_ep_ring_ctx *)mhi_cntrl->cmd_ctx_cache);
+ if (ret) {
+ dev_err(dev, "Failed to start the command ring\n");
+ goto err_cmd_ctx;
+ }
+
+ return ret;
+
+err_cmd_ctx:
+ mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, mhi_cntrl->cmd_ctx_cache_phys,
+ (void __iomem *) mhi_cntrl->cmd_ctx_cache, cmd_ctx_host_size);
+
+err_ev_ctx:
+ mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, mhi_cntrl->ev_ctx_cache_phys,
+ (void __iomem *) mhi_cntrl->ev_ctx_cache, ev_ctx_host_size);
+
+err_ch_ctx:
+ mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, mhi_cntrl->ch_ctx_cache_phys,
+ (void __iomem *) mhi_cntrl->ch_ctx_cache, ch_ctx_host_size);
+
+ return ret;
+}
+
+static void mhi_ep_free_host_cfg(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size;
+
+ ch_ctx_host_size = sizeof(struct mhi_chan_ctxt) * mhi_cntrl->max_chan;
+ ev_ctx_host_size = sizeof(struct mhi_event_ctxt) * mhi_cntrl->event_rings;
+ cmd_ctx_host_size = sizeof(struct mhi_cmd_ctxt) * NR_OF_CMD_RINGS;
+
+ mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, mhi_cntrl->cmd_ctx_cache_phys,
+ (void __iomem *) mhi_cntrl->cmd_ctx_cache, cmd_ctx_host_size);
+
+ mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, mhi_cntrl->ev_ctx_cache_phys,
+ (void __iomem *) mhi_cntrl->ev_ctx_cache, ev_ctx_host_size);
+
+ mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, mhi_cntrl->ch_ctx_cache_phys,
+ (void __iomem *) mhi_cntrl->ch_ctx_cache, ch_ctx_host_size);
+}
+
+static void mhi_ep_enable_int(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ /*
+ * Doorbell interrupts are enabled when the corresponding channel gets started.
+ * Enabling all interrupts here triggers spurious irqs as some of the interrupts
+ * associated with hw channels always get triggered.
+ */
+ mhi_ep_mmio_enable_ctrl_interrupt(mhi_cntrl);
+ mhi_ep_mmio_enable_cmdb_interrupt(mhi_cntrl);
+}
+
+static int mhi_ep_enable(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_state state;
+ bool mhi_reset;
+ u32 count = 0;
+ int ret;
+
+ /* Wait for Host to set the M0 state */
+ do {
+ msleep(M0_WAIT_DELAY_MS);
+ mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset);
+ if (mhi_reset) {
+ /* Clear the MHI reset if host is in reset state */
+ mhi_ep_mmio_clear_reset(mhi_cntrl);
+ dev_info(dev, "Detected Host reset while waiting for M0\n");
+ }
+ count++;
+ } while (state != MHI_STATE_M0 && count < M0_WAIT_COUNT);
+
+ if (state != MHI_STATE_M0) {
+ dev_err(dev, "Host failed to enter M0\n");
+ return -ETIMEDOUT;
+ }
+
+ ret = mhi_ep_cache_host_cfg(mhi_cntrl);
+ if (ret) {
+ dev_err(dev, "Failed to cache host config\n");
+ return ret;
+ }
+
+ mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS);
+
+ /* Enable all interrupts now */
+ mhi_ep_enable_int(mhi_cntrl);
+
+ return 0;
+}
+
+static void mhi_ep_cmd_ring_worker(struct work_struct *work)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, cmd_ring_work);
+ struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct mhi_ring_element *el;
+ int ret;
+
+ /* Update the write offset for the ring */
+ ret = mhi_ep_update_wr_offset(ring);
+ if (ret) {
+ dev_err(dev, "Error updating write offset for ring\n");
+ return;
+ }
+
+ /* Sanity check to make sure there are elements in the ring */
+ if (ring->rd_offset == ring->wr_offset)
+ return;
+
+ /*
+ * Process command ring element till write offset. In case of an error, just try to
+ * process next element.
+ */
+ while (ring->rd_offset != ring->wr_offset) {
+ el = &ring->ring_cache[ring->rd_offset];
+
+ ret = mhi_ep_process_cmd_ring(ring, el);
+ if (ret)
+ dev_err(dev, "Error processing cmd ring element: %zu\n", ring->rd_offset);
+
+ mhi_ep_ring_inc_index(ring);
+ }
+}
+
+static void mhi_ep_ch_ring_worker(struct work_struct *work)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, ch_ring_work);
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct mhi_ep_ring_item *itr, *tmp;
+ struct mhi_ring_element *el;
+ struct mhi_ep_ring *ring;
+ struct mhi_ep_chan *chan;
+ unsigned long flags;
+ LIST_HEAD(head);
+ int ret;
+
+ spin_lock_irqsave(&mhi_cntrl->list_lock, flags);
+ list_splice_tail_init(&mhi_cntrl->ch_db_list, &head);
+ spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags);
+
+ /* Process each queued channel ring. In case of an error, just process next element. */
+ list_for_each_entry_safe(itr, tmp, &head, node) {
+ list_del(&itr->node);
+ ring = itr->ring;
+
+ /* Update the write offset for the ring */
+ ret = mhi_ep_update_wr_offset(ring);
+ if (ret) {
+ dev_err(dev, "Error updating write offset for ring\n");
+ kfree(itr);
+ continue;
+ }
+
+ /* Sanity check to make sure there are elements in the ring */
+ if (ring->rd_offset == ring->wr_offset) {
+ kfree(itr);
+ continue;
+ }
+
+ el = &ring->ring_cache[ring->rd_offset];
+ chan = &mhi_cntrl->mhi_chan[ring->ch_id];
+
+ mutex_lock(&chan->lock);
+ dev_dbg(dev, "Processing the ring for channel (%u)\n", ring->ch_id);
+ ret = mhi_ep_process_ch_ring(ring, el);
+ if (ret) {
+ dev_err(dev, "Error processing ring for channel (%u): %d\n",
+ ring->ch_id, ret);
+ mutex_unlock(&chan->lock);
+ kfree(itr);
+ continue;
+ }
+
+ mutex_unlock(&chan->lock);
+ kfree(itr);
+ }
+}
+
+static void mhi_ep_state_worker(struct work_struct *work)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, state_work);
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct mhi_ep_state_transition *itr, *tmp;
+ unsigned long flags;
+ LIST_HEAD(head);
+ int ret;
+
+ spin_lock_irqsave(&mhi_cntrl->list_lock, flags);
+ list_splice_tail_init(&mhi_cntrl->st_transition_list, &head);
+ spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags);
+
+ list_for_each_entry_safe(itr, tmp, &head, node) {
+ list_del(&itr->node);
+ dev_dbg(dev, "Handling MHI state transition to %s\n",
+ mhi_state_str(itr->state));
+
+ switch (itr->state) {
+ case MHI_STATE_M0:
+ ret = mhi_ep_set_m0_state(mhi_cntrl);
+ if (ret)
+ dev_err(dev, "Failed to transition to M0 state\n");
+ break;
+ case MHI_STATE_M3:
+ ret = mhi_ep_set_m3_state(mhi_cntrl);
+ if (ret)
+ dev_err(dev, "Failed to transition to M3 state\n");
+ break;
+ default:
+ dev_err(dev, "Invalid MHI state transition: %d\n", itr->state);
+ break;
+ }
+ kfree(itr);
+ }
+}
+
+static void mhi_ep_queue_channel_db(struct mhi_ep_cntrl *mhi_cntrl, unsigned long ch_int,
+ u32 ch_idx)
+{
+ struct mhi_ep_ring_item *item;
+ struct mhi_ep_ring *ring;
+ bool work = !!ch_int;
+ LIST_HEAD(head);
+ u32 i;
+
+ /* First add the ring items to a local list */
+ for_each_set_bit(i, &ch_int, 32) {
+ /* Channel index varies for each register: 0, 32, 64, 96 */
+ u32 ch_id = ch_idx + i;
+
+ ring = &mhi_cntrl->mhi_chan[ch_id].ring;
+ item = kzalloc(sizeof(*item), GFP_ATOMIC);
+ if (!item)
+ return;
+
+ item->ring = ring;
+ list_add_tail(&item->node, &head);
+ }
+
+ /* Now, splice the local list into ch_db_list and queue the work item */
+ if (work) {
+ spin_lock(&mhi_cntrl->list_lock);
+ list_splice_tail_init(&head, &mhi_cntrl->ch_db_list);
+ spin_unlock(&mhi_cntrl->list_lock);
+
+ queue_work(mhi_cntrl->wq, &mhi_cntrl->ch_ring_work);
+ }
+}
+
+/*
+ * Channel interrupt statuses are contained in 4 registers each of 32bit length.
+ * For checking all interrupts, we need to loop through each registers and then
+ * check for bits set.
+ */
+static void mhi_ep_check_channel_interrupt(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ u32 ch_int, ch_idx, i;
+
+ /* Bail out if there is no channel doorbell interrupt */
+ if (!mhi_ep_mmio_read_chdb_status_interrupts(mhi_cntrl))
+ return;
+
+ for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) {
+ ch_idx = i * MHI_MASK_CH_LEN;
+
+ /* Only process channel interrupt if the mask is enabled */
+ ch_int = mhi_cntrl->chdb[i].status & mhi_cntrl->chdb[i].mask;
+ if (ch_int) {
+ mhi_ep_queue_channel_db(mhi_cntrl, ch_int, ch_idx);
+ mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_CLEAR_n(i),
+ mhi_cntrl->chdb[i].status);
+ }
+ }
+}
+
+static void mhi_ep_process_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl,
+ enum mhi_state state)
+{
+ struct mhi_ep_state_transition *item;
+
+ item = kzalloc(sizeof(*item), GFP_ATOMIC);
+ if (!item)
+ return;
+
+ item->state = state;
+ spin_lock(&mhi_cntrl->list_lock);
+ list_add_tail(&item->node, &mhi_cntrl->st_transition_list);
+ spin_unlock(&mhi_cntrl->list_lock);
+
+ queue_work(mhi_cntrl->wq, &mhi_cntrl->state_work);
+}
+
+/*
+ * Interrupt handler that services interrupts raised by the host writing to
+ * MHICTRL and Command ring doorbell (CRDB) registers for state change and
+ * channel interrupts.
+ */
+static irqreturn_t mhi_ep_irq(int irq, void *data)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = data;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_state state;
+ u32 int_value;
+ bool mhi_reset;
+
+ /* Acknowledge the ctrl interrupt */
+ int_value = mhi_ep_mmio_read(mhi_cntrl, MHI_CTRL_INT_STATUS);
+ mhi_ep_mmio_write(mhi_cntrl, MHI_CTRL_INT_CLEAR, int_value);
+
+ /* Check for ctrl interrupt */
+ if (FIELD_GET(MHI_CTRL_INT_STATUS_MSK, int_value)) {
+ dev_dbg(dev, "Processing ctrl interrupt\n");
+ mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset);
+ if (mhi_reset) {
+ dev_info(dev, "Host triggered MHI reset!\n");
+ disable_irq_nosync(mhi_cntrl->irq);
+ schedule_work(&mhi_cntrl->reset_work);
+ return IRQ_HANDLED;
+ }
+
+ mhi_ep_process_ctrl_interrupt(mhi_cntrl, state);
+ }
+
+ /* Check for command doorbell interrupt */
+ if (FIELD_GET(MHI_CTRL_INT_STATUS_CRDB_MSK, int_value)) {
+ dev_dbg(dev, "Processing command doorbell interrupt\n");
+ queue_work(mhi_cntrl->wq, &mhi_cntrl->cmd_ring_work);
+ }
+
+ /* Check for channel interrupts */
+ mhi_ep_check_channel_interrupt(mhi_cntrl);
+
+ return IRQ_HANDLED;
+}
+
+static void mhi_ep_abort_transfer(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct mhi_ep_ring *ch_ring, *ev_ring;
+ struct mhi_result result = {};
+ struct mhi_ep_chan *mhi_chan;
+ int i;
+
+ /* Stop all the channels */
+ for (i = 0; i < mhi_cntrl->max_chan; i++) {
+ mhi_chan = &mhi_cntrl->mhi_chan[i];
+ if (!mhi_chan->ring.started)
+ continue;
+
+ mutex_lock(&mhi_chan->lock);
+ /* Send channel disconnect status to client drivers */
+ if (mhi_chan->xfer_cb) {
+ result.transaction_status = -ENOTCONN;
+ result.bytes_xferd = 0;
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+ }
+
+ mhi_chan->state = MHI_CH_STATE_DISABLED;
+ mutex_unlock(&mhi_chan->lock);
+ }
+
+ flush_workqueue(mhi_cntrl->wq);
+
+ /* Destroy devices associated with all channels */
+ device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_ep_destroy_device);
+
+ /* Stop and reset the transfer rings */
+ for (i = 0; i < mhi_cntrl->max_chan; i++) {
+ mhi_chan = &mhi_cntrl->mhi_chan[i];
+ if (!mhi_chan->ring.started)
+ continue;
+
+ ch_ring = &mhi_cntrl->mhi_chan[i].ring;
+ mutex_lock(&mhi_chan->lock);
+ mhi_ep_ring_reset(mhi_cntrl, ch_ring);
+ mutex_unlock(&mhi_chan->lock);
+ }
+
+ /* Stop and reset the event rings */
+ for (i = 0; i < mhi_cntrl->event_rings; i++) {
+ ev_ring = &mhi_cntrl->mhi_event[i].ring;
+ if (!ev_ring->started)
+ continue;
+
+ mutex_lock(&mhi_cntrl->event_lock);
+ mhi_ep_ring_reset(mhi_cntrl, ev_ring);
+ mutex_unlock(&mhi_cntrl->event_lock);
+ }
+
+ /* Stop and reset the command ring */
+ mhi_ep_ring_reset(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring);
+
+ mhi_ep_free_host_cfg(mhi_cntrl);
+ mhi_ep_mmio_mask_interrupts(mhi_cntrl);
+
+ mhi_cntrl->enabled = false;
+}
+
+static void mhi_ep_reset_worker(struct work_struct *work)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, reset_work);
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_state cur_state;
+ int ret;
+
+ mhi_ep_abort_transfer(mhi_cntrl);
+
+ spin_lock_bh(&mhi_cntrl->state_lock);
+ /* Reset MMIO to signal host that the MHI_RESET is completed in endpoint */
+ mhi_ep_mmio_reset(mhi_cntrl);
+ cur_state = mhi_cntrl->mhi_state;
+ spin_unlock_bh(&mhi_cntrl->state_lock);
+
+ /*
+ * Only proceed further if the reset is due to SYS_ERR. The host will
+ * issue reset during shutdown also and we don't need to do re-init in
+ * that case.
+ */
+ if (cur_state == MHI_STATE_SYS_ERR) {
+ mhi_ep_mmio_init(mhi_cntrl);
+
+ /* Set AMSS EE before signaling ready state */
+ mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS);
+
+ /* All set, notify the host that we are ready */
+ ret = mhi_ep_set_ready_state(mhi_cntrl);
+ if (ret)
+ return;
+
+ dev_dbg(dev, "READY state notification sent to the host\n");
+
+ ret = mhi_ep_enable(mhi_cntrl);
+ if (ret) {
+ dev_err(dev, "Failed to enable MHI endpoint: %d\n", ret);
+ return;
+ }
+
+ enable_irq(mhi_cntrl->irq);
+ }
+}
+
+/*
+ * We don't need to do anything special other than setting the MHI SYS_ERR
+ * state. The host will reset all contexts and issue MHI RESET so that we
+ * could also recover from error state.
+ */
+void mhi_ep_handle_syserr(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int ret;
+
+ ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
+ if (ret)
+ return;
+
+ /* Signal host that the device went to SYS_ERR state */
+ ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_SYS_ERR);
+ if (ret)
+ dev_err(dev, "Failed sending SYS_ERR state change event: %d\n", ret);
+}
+
+int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int ret, i;
+
+ /*
+ * Mask all interrupts until the state machine is ready. Interrupts will
+ * be enabled later with mhi_ep_enable().
+ */
+ mhi_ep_mmio_mask_interrupts(mhi_cntrl);
+ mhi_ep_mmio_init(mhi_cntrl);
+
+ mhi_cntrl->mhi_event = kzalloc(mhi_cntrl->event_rings * (sizeof(*mhi_cntrl->mhi_event)),
+ GFP_KERNEL);
+ if (!mhi_cntrl->mhi_event)
+ return -ENOMEM;
+
+ /* Initialize command, channel and event rings */
+ mhi_ep_ring_init(&mhi_cntrl->mhi_cmd->ring, RING_TYPE_CMD, 0);
+ for (i = 0; i < mhi_cntrl->max_chan; i++)
+ mhi_ep_ring_init(&mhi_cntrl->mhi_chan[i].ring, RING_TYPE_CH, i);
+ for (i = 0; i < mhi_cntrl->event_rings; i++)
+ mhi_ep_ring_init(&mhi_cntrl->mhi_event[i].ring, RING_TYPE_ER, i);
+
+ mhi_cntrl->mhi_state = MHI_STATE_RESET;
+
+ /* Set AMSS EE before signaling ready state */
+ mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS);
+
+ /* All set, notify the host that we are ready */
+ ret = mhi_ep_set_ready_state(mhi_cntrl);
+ if (ret)
+ goto err_free_event;
+
+ dev_dbg(dev, "READY state notification sent to the host\n");
+
+ ret = mhi_ep_enable(mhi_cntrl);
+ if (ret) {
+ dev_err(dev, "Failed to enable MHI endpoint\n");
+ goto err_free_event;
+ }
+
+ enable_irq(mhi_cntrl->irq);
+ mhi_cntrl->enabled = true;
+
+ return 0;
+
+err_free_event:
+ kfree(mhi_cntrl->mhi_event);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_ep_power_up);
+
+void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ if (mhi_cntrl->enabled)
+ mhi_ep_abort_transfer(mhi_cntrl);
+
+ kfree(mhi_cntrl->mhi_event);
+ disable_irq(mhi_cntrl->irq);
+}
+EXPORT_SYMBOL_GPL(mhi_ep_power_down);
+
+void mhi_ep_suspend_channels(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct mhi_ep_chan *mhi_chan;
+ u32 tmp;
+ int i;
+
+ for (i = 0; i < mhi_cntrl->max_chan; i++) {
+ mhi_chan = &mhi_cntrl->mhi_chan[i];
+
+ if (!mhi_chan->mhi_dev)
+ continue;
+
+ mutex_lock(&mhi_chan->lock);
+ /* Skip if the channel is not currently running */
+ tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg);
+ if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_RUNNING) {
+ mutex_unlock(&mhi_chan->lock);
+ continue;
+ }
+
+ dev_dbg(&mhi_chan->mhi_dev->dev, "Suspending channel\n");
+ /* Set channel state to SUSPENDED */
+ tmp &= ~CHAN_CTX_CHSTATE_MASK;
+ tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_SUSPENDED);
+ mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp);
+ mutex_unlock(&mhi_chan->lock);
+ }
+}
+
+void mhi_ep_resume_channels(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct mhi_ep_chan *mhi_chan;
+ u32 tmp;
+ int i;
+
+ for (i = 0; i < mhi_cntrl->max_chan; i++) {
+ mhi_chan = &mhi_cntrl->mhi_chan[i];
+
+ if (!mhi_chan->mhi_dev)
+ continue;
+
+ mutex_lock(&mhi_chan->lock);
+ /* Skip if the channel is not currently suspended */
+ tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg);
+ if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_SUSPENDED) {
+ mutex_unlock(&mhi_chan->lock);
+ continue;
+ }
+
+ dev_dbg(&mhi_chan->mhi_dev->dev, "Resuming channel\n");
+ /* Set channel state to RUNNING */
+ tmp &= ~CHAN_CTX_CHSTATE_MASK;
+ tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING);
+ mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp);
+ mutex_unlock(&mhi_chan->lock);
+ }
+}
+
+static void mhi_ep_release_device(struct device *dev)
+{
+ struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
+
+ if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
+ mhi_dev->mhi_cntrl->mhi_dev = NULL;
+
+ /*
+ * We need to set the mhi_chan->mhi_dev to NULL here since the MHI
+ * devices for the channels will only get created in mhi_ep_create_device()
+ * if the mhi_dev associated with it is NULL.
+ */
+ if (mhi_dev->ul_chan)
+ mhi_dev->ul_chan->mhi_dev = NULL;
+
+ if (mhi_dev->dl_chan)
+ mhi_dev->dl_chan->mhi_dev = NULL;
+
+ kfree(mhi_dev);
+}
+
+static struct mhi_ep_device *mhi_ep_alloc_device(struct mhi_ep_cntrl *mhi_cntrl,
+ enum mhi_device_type dev_type)
+{
+ struct mhi_ep_device *mhi_dev;
+ struct device *dev;
+
+ mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL);
+ if (!mhi_dev)
+ return ERR_PTR(-ENOMEM);
+
+ dev = &mhi_dev->dev;
+ device_initialize(dev);
+ dev->bus = &mhi_ep_bus_type;
+ dev->release = mhi_ep_release_device;
+
+ /* Controller device is always allocated first */
+ if (dev_type == MHI_DEVICE_CONTROLLER)
+ /* for MHI controller device, parent is the bus device (e.g. PCI EPF) */
+ dev->parent = mhi_cntrl->cntrl_dev;
+ else
+ /* for MHI client devices, parent is the MHI controller device */
+ dev->parent = &mhi_cntrl->mhi_dev->dev;
+
+ mhi_dev->mhi_cntrl = mhi_cntrl;
+ mhi_dev->dev_type = dev_type;
+
+ return mhi_dev;
+}
+
+/*
+ * MHI channels are always defined in pairs with UL as the even numbered
+ * channel and DL as odd numbered one. This function gets UL channel (primary)
+ * as the ch_id and always looks after the next entry in channel list for
+ * the corresponding DL channel (secondary).
+ */
+static int mhi_ep_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id)
+{
+ struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ch_id];
+ struct device *dev = mhi_cntrl->cntrl_dev;
+ struct mhi_ep_device *mhi_dev;
+ int ret;
+
+ /* Check if the channel name is same for both UL and DL */
+ if (strcmp(mhi_chan->name, mhi_chan[1].name)) {
+ dev_err(dev, "UL and DL channel names are not same: (%s) != (%s)\n",
+ mhi_chan->name, mhi_chan[1].name);
+ return -EINVAL;
+ }
+
+ mhi_dev = mhi_ep_alloc_device(mhi_cntrl, MHI_DEVICE_XFER);
+ if (IS_ERR(mhi_dev))
+ return PTR_ERR(mhi_dev);
+
+ /* Configure primary channel */
+ mhi_dev->ul_chan = mhi_chan;
+ get_device(&mhi_dev->dev);
+ mhi_chan->mhi_dev = mhi_dev;
+
+ /* Configure secondary channel as well */
+ mhi_chan++;
+ mhi_dev->dl_chan = mhi_chan;
+ get_device(&mhi_dev->dev);
+ mhi_chan->mhi_dev = mhi_dev;
+
+ /* Channel name is same for both UL and DL */
+ mhi_dev->name = mhi_chan->name;
+ ret = dev_set_name(&mhi_dev->dev, "%s_%s",
+ dev_name(&mhi_cntrl->mhi_dev->dev),
+ mhi_dev->name);
+ if (ret) {
+ put_device(&mhi_dev->dev);
+ return ret;
+ }
+
+ ret = device_add(&mhi_dev->dev);
+ if (ret)
+ put_device(&mhi_dev->dev);
+
+ return ret;
+}
+
+static int mhi_ep_destroy_device(struct device *dev, void *data)
+{
+ struct mhi_ep_device *mhi_dev;
+ struct mhi_ep_cntrl *mhi_cntrl;
+ struct mhi_ep_chan *ul_chan, *dl_chan;
+
+ if (dev->bus != &mhi_ep_bus_type)
+ return 0;
+
+ mhi_dev = to_mhi_ep_device(dev);
+ mhi_cntrl = mhi_dev->mhi_cntrl;
+
+ /* Only destroy devices created for channels */
+ if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
+ return 0;
+
+ ul_chan = mhi_dev->ul_chan;
+ dl_chan = mhi_dev->dl_chan;
+
+ if (ul_chan)
+ put_device(&ul_chan->mhi_dev->dev);
+
+ if (dl_chan)
+ put_device(&dl_chan->mhi_dev->dev);
+
+ dev_dbg(&mhi_cntrl->mhi_dev->dev, "Destroying device for chan:%s\n",
+ mhi_dev->name);
+
+ /* Notify the client and remove the device from MHI bus */
+ device_del(dev);
+ put_device(dev);
+
+ return 0;
+}
+
+static int mhi_ep_chan_init(struct mhi_ep_cntrl *mhi_cntrl,
+ const struct mhi_ep_cntrl_config *config)
+{
+ const struct mhi_ep_channel_config *ch_cfg;
+ struct device *dev = mhi_cntrl->cntrl_dev;
+ u32 chan, i;
+ int ret = -EINVAL;
+
+ mhi_cntrl->max_chan = config->max_channels;
+
+ /*
+ * Allocate max_channels supported by the MHI endpoint and populate
+ * only the defined channels
+ */
+ mhi_cntrl->mhi_chan = kcalloc(mhi_cntrl->max_chan, sizeof(*mhi_cntrl->mhi_chan),
+ GFP_KERNEL);
+ if (!mhi_cntrl->mhi_chan)
+ return -ENOMEM;
+
+ for (i = 0; i < config->num_channels; i++) {
+ struct mhi_ep_chan *mhi_chan;
+
+ ch_cfg = &config->ch_cfg[i];
+
+ chan = ch_cfg->num;
+ if (chan >= mhi_cntrl->max_chan) {
+ dev_err(dev, "Channel (%u) exceeds maximum available channels (%u)\n",
+ chan, mhi_cntrl->max_chan);
+ goto error_chan_cfg;
+ }
+
+ /* Bi-directional and direction less channels are not supported */
+ if (ch_cfg->dir == DMA_BIDIRECTIONAL || ch_cfg->dir == DMA_NONE) {
+ dev_err(dev, "Invalid direction (%u) for channel (%u)\n",
+ ch_cfg->dir, chan);
+ goto error_chan_cfg;
+ }
+
+ mhi_chan = &mhi_cntrl->mhi_chan[chan];
+ mhi_chan->name = ch_cfg->name;
+ mhi_chan->chan = chan;
+ mhi_chan->dir = ch_cfg->dir;
+ mutex_init(&mhi_chan->lock);
+ }
+
+ return 0;
+
+error_chan_cfg:
+ kfree(mhi_cntrl->mhi_chan);
+
+ return ret;
+}
+
+/*
+ * Allocate channel and command rings here. Event rings will be allocated
+ * in mhi_ep_power_up() as the config comes from the host.
+ */
+int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
+ const struct mhi_ep_cntrl_config *config)
+{
+ struct mhi_ep_device *mhi_dev;
+ int ret;
+
+ if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->mmio || !mhi_cntrl->irq)
+ return -EINVAL;
+
+ ret = mhi_ep_chan_init(mhi_cntrl, config);
+ if (ret)
+ return ret;
+
+ mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS, sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL);
+ if (!mhi_cntrl->mhi_cmd) {
+ ret = -ENOMEM;
+ goto err_free_ch;
+ }
+
+ INIT_WORK(&mhi_cntrl->state_work, mhi_ep_state_worker);
+ INIT_WORK(&mhi_cntrl->reset_work, mhi_ep_reset_worker);
+ INIT_WORK(&mhi_cntrl->cmd_ring_work, mhi_ep_cmd_ring_worker);
+ INIT_WORK(&mhi_cntrl->ch_ring_work, mhi_ep_ch_ring_worker);
+
+ mhi_cntrl->wq = alloc_workqueue("mhi_ep_wq", 0, 0);
+ if (!mhi_cntrl->wq) {
+ ret = -ENOMEM;
+ goto err_free_cmd;
+ }
+
+ INIT_LIST_HEAD(&mhi_cntrl->st_transition_list);
+ INIT_LIST_HEAD(&mhi_cntrl->ch_db_list);
+ spin_lock_init(&mhi_cntrl->state_lock);
+ spin_lock_init(&mhi_cntrl->list_lock);
+ mutex_init(&mhi_cntrl->event_lock);
+
+ /* Set MHI version and AMSS EE before enumeration */
+ mhi_ep_mmio_write(mhi_cntrl, EP_MHIVER, config->mhi_version);
+ mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS);
+
+ /* Set controller index */
+ ret = ida_alloc(&mhi_ep_cntrl_ida, GFP_KERNEL);
+ if (ret < 0)
+ goto err_destroy_wq;
+
+ mhi_cntrl->index = ret;
+
+ irq_set_status_flags(mhi_cntrl->irq, IRQ_NOAUTOEN);
+ ret = request_irq(mhi_cntrl->irq, mhi_ep_irq, IRQF_TRIGGER_HIGH,
+ "doorbell_irq", mhi_cntrl);
+ if (ret) {
+ dev_err(mhi_cntrl->cntrl_dev, "Failed to request Doorbell IRQ\n");
+ goto err_ida_free;
+ }
+
+ /* Allocate the controller device */
+ mhi_dev = mhi_ep_alloc_device(mhi_cntrl, MHI_DEVICE_CONTROLLER);
+ if (IS_ERR(mhi_dev)) {
+ dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate controller device\n");
+ ret = PTR_ERR(mhi_dev);
+ goto err_free_irq;
+ }
+
+ ret = dev_set_name(&mhi_dev->dev, "mhi_ep%u", mhi_cntrl->index);
+ if (ret)
+ goto err_put_dev;
+
+ mhi_dev->name = dev_name(&mhi_dev->dev);
+ mhi_cntrl->mhi_dev = mhi_dev;
+
+ ret = device_add(&mhi_dev->dev);
+ if (ret)
+ goto err_put_dev;
+
+ dev_dbg(&mhi_dev->dev, "MHI EP Controller registered\n");
+
+ return 0;
+
+err_put_dev:
+ put_device(&mhi_dev->dev);
+err_free_irq:
+ free_irq(mhi_cntrl->irq, mhi_cntrl);
+err_ida_free:
+ ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index);
+err_destroy_wq:
+ destroy_workqueue(mhi_cntrl->wq);
+err_free_cmd:
+ kfree(mhi_cntrl->mhi_cmd);
+err_free_ch:
+ kfree(mhi_cntrl->mhi_chan);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_ep_register_controller);
+
+/*
+ * It is expected that the controller drivers will power down the MHI EP stack
+ * using "mhi_ep_power_down()" before calling this function to unregister themselves.
+ */
+void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct mhi_ep_device *mhi_dev = mhi_cntrl->mhi_dev;
+
+ destroy_workqueue(mhi_cntrl->wq);
+
+ free_irq(mhi_cntrl->irq, mhi_cntrl);
+
+ kfree(mhi_cntrl->mhi_cmd);
+ kfree(mhi_cntrl->mhi_chan);
+
+ device_del(&mhi_dev->dev);
+ put_device(&mhi_dev->dev);
+
+ ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index);
+}
+EXPORT_SYMBOL_GPL(mhi_ep_unregister_controller);
+
+static int mhi_ep_driver_probe(struct device *dev)
+{
+ struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
+ struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(dev->driver);
+ struct mhi_ep_chan *ul_chan = mhi_dev->ul_chan;
+ struct mhi_ep_chan *dl_chan = mhi_dev->dl_chan;
+
+ ul_chan->xfer_cb = mhi_drv->ul_xfer_cb;
+ dl_chan->xfer_cb = mhi_drv->dl_xfer_cb;
+
+ return mhi_drv->probe(mhi_dev, mhi_dev->id);
+}
+
+static int mhi_ep_driver_remove(struct device *dev)
+{
+ struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
+ struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(dev->driver);
+ struct mhi_result result = {};
+ struct mhi_ep_chan *mhi_chan;
+ int dir;
+
+ /* Skip if it is a controller device */
+ if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
+ return 0;
+
+ /* Disconnect the channels associated with the driver */
+ for (dir = 0; dir < 2; dir++) {
+ mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
+
+ if (!mhi_chan)
+ continue;
+
+ mutex_lock(&mhi_chan->lock);
+ /* Send channel disconnect status to the client driver */
+ if (mhi_chan->xfer_cb) {
+ result.transaction_status = -ENOTCONN;
+ result.bytes_xferd = 0;
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+ }
+
+ mhi_chan->state = MHI_CH_STATE_DISABLED;
+ mhi_chan->xfer_cb = NULL;
+ mutex_unlock(&mhi_chan->lock);
+ }
+
+ /* Remove the client driver now */
+ mhi_drv->remove(mhi_dev);
+
+ return 0;
+}
+
+int __mhi_ep_driver_register(struct mhi_ep_driver *mhi_drv, struct module *owner)
+{
+ struct device_driver *driver = &mhi_drv->driver;
+
+ if (!mhi_drv->probe || !mhi_drv->remove)
+ return -EINVAL;
+
+ /* Client drivers should have callbacks defined for both channels */
+ if (!mhi_drv->ul_xfer_cb || !mhi_drv->dl_xfer_cb)
+ return -EINVAL;
+
+ driver->bus = &mhi_ep_bus_type;
+ driver->owner = owner;
+ driver->probe = mhi_ep_driver_probe;
+ driver->remove = mhi_ep_driver_remove;
+
+ return driver_register(driver);
+}
+EXPORT_SYMBOL_GPL(__mhi_ep_driver_register);
+
+void mhi_ep_driver_unregister(struct mhi_ep_driver *mhi_drv)
+{
+ driver_unregister(&mhi_drv->driver);
+}
+EXPORT_SYMBOL_GPL(mhi_ep_driver_unregister);
+
+static int mhi_ep_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
+
+ return add_uevent_var(env, "MODALIAS=" MHI_EP_DEVICE_MODALIAS_FMT,
+ mhi_dev->name);
+}
+
+static int mhi_ep_match(struct device *dev, struct device_driver *drv)
+{
+ struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
+ struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(drv);
+ const struct mhi_device_id *id;
+
+ /*
+ * If the device is a controller type then there is no client driver
+ * associated with it
+ */
+ if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
+ return 0;
+
+ for (id = mhi_drv->id_table; id->chan[0]; id++)
+ if (!strcmp(mhi_dev->name, id->chan)) {
+ mhi_dev->id = id;
+ return 1;
+ }
+
+ return 0;
+};
+
+struct bus_type mhi_ep_bus_type = {
+ .name = "mhi_ep",
+ .dev_name = "mhi_ep",
+ .match = mhi_ep_match,
+ .uevent = mhi_ep_uevent,
+};
+
+static int __init mhi_ep_init(void)
+{
+ return bus_register(&mhi_ep_bus_type);
+}
+
+static void __exit mhi_ep_exit(void)
+{
+ bus_unregister(&mhi_ep_bus_type);
+}
+
+postcore_initcall(mhi_ep_init);
+module_exit(mhi_ep_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MHI Bus Endpoint stack");
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
diff --git a/drivers/bus/mhi/ep/mmio.c b/drivers/bus/mhi/ep/mmio.c
new file mode 100644
index 000000000000..b5bfd22f2c8e
--- /dev/null
+++ b/drivers/bus/mhi/ep/mmio.c
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/io.h>
+#include <linux/mhi_ep.h>
+
+#include "internal.h"
+
+u32 mhi_ep_mmio_read(struct mhi_ep_cntrl *mhi_cntrl, u32 offset)
+{
+ return readl(mhi_cntrl->mmio + offset);
+}
+
+void mhi_ep_mmio_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 val)
+{
+ writel(val, mhi_cntrl->mmio + offset);
+}
+
+void mhi_ep_mmio_masked_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 mask, u32 val)
+{
+ u32 regval;
+
+ regval = mhi_ep_mmio_read(mhi_cntrl, offset);
+ regval &= ~mask;
+ regval |= (val << __ffs(mask)) & mask;
+ mhi_ep_mmio_write(mhi_cntrl, offset, regval);
+}
+
+u32 mhi_ep_mmio_masked_read(struct mhi_ep_cntrl *dev, u32 offset, u32 mask)
+{
+ u32 regval;
+
+ regval = mhi_ep_mmio_read(dev, offset);
+ regval &= mask;
+ regval >>= __ffs(mask);
+
+ return regval;
+}
+
+void mhi_ep_mmio_get_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state *state,
+ bool *mhi_reset)
+{
+ u32 regval;
+
+ regval = mhi_ep_mmio_read(mhi_cntrl, EP_MHICTRL);
+ *state = FIELD_GET(MHICTRL_MHISTATE_MASK, regval);
+ *mhi_reset = !!FIELD_GET(MHICTRL_RESET_MASK, regval);
+}
+
+static void mhi_ep_mmio_set_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id, bool enable)
+{
+ u32 chid_mask, chid_shift, chdb_idx, val;
+
+ chid_shift = ch_id % 32;
+ chid_mask = BIT(chid_shift);
+ chdb_idx = ch_id / 32;
+
+ val = enable ? 1 : 0;
+
+ mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CHDB_INT_MASK_n(chdb_idx), chid_mask, val);
+
+ /* Update the local copy of the channel mask */
+ mhi_cntrl->chdb[chdb_idx].mask &= ~chid_mask;
+ mhi_cntrl->chdb[chdb_idx].mask |= val << chid_shift;
+}
+
+void mhi_ep_mmio_enable_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id)
+{
+ mhi_ep_mmio_set_chdb(mhi_cntrl, ch_id, true);
+}
+
+void mhi_ep_mmio_disable_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id)
+{
+ mhi_ep_mmio_set_chdb(mhi_cntrl, ch_id, false);
+}
+
+static void mhi_ep_mmio_set_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl, bool enable)
+{
+ u32 val, i;
+
+ val = enable ? MHI_CHDB_INT_MASK_n_EN_ALL : 0;
+
+ for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) {
+ mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_MASK_n(i), val);
+ mhi_cntrl->chdb[i].mask = val;
+ }
+}
+
+void mhi_ep_mmio_enable_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_set_chdb_interrupts(mhi_cntrl, true);
+}
+
+static void mhi_ep_mmio_mask_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_set_chdb_interrupts(mhi_cntrl, false);
+}
+
+bool mhi_ep_mmio_read_chdb_status_interrupts(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ bool chdb = false;
+ u32 i;
+
+ for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) {
+ mhi_cntrl->chdb[i].status = mhi_ep_mmio_read(mhi_cntrl, MHI_CHDB_INT_STATUS_n(i));
+ if (mhi_cntrl->chdb[i].status)
+ chdb = true;
+ }
+
+ /* Return whether a channel doorbell interrupt occurred or not */
+ return chdb;
+}
+
+static void mhi_ep_mmio_set_erdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl, bool enable)
+{
+ u32 val, i;
+
+ val = enable ? MHI_ERDB_INT_MASK_n_EN_ALL : 0;
+
+ for (i = 0; i < MHI_MASK_ROWS_EV_DB; i++)
+ mhi_ep_mmio_write(mhi_cntrl, MHI_ERDB_INT_MASK_n(i), val);
+}
+
+static void mhi_ep_mmio_mask_erdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_set_erdb_interrupts(mhi_cntrl, false);
+}
+
+void mhi_ep_mmio_enable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK,
+ MHI_CTRL_MHICTRL_MASK, 1);
+}
+
+void mhi_ep_mmio_disable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK,
+ MHI_CTRL_MHICTRL_MASK, 0);
+}
+
+void mhi_ep_mmio_enable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK,
+ MHI_CTRL_CRDB_MASK, 1);
+}
+
+void mhi_ep_mmio_disable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK,
+ MHI_CTRL_CRDB_MASK, 0);
+}
+
+void mhi_ep_mmio_mask_interrupts(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_disable_ctrl_interrupt(mhi_cntrl);
+ mhi_ep_mmio_disable_cmdb_interrupt(mhi_cntrl);
+ mhi_ep_mmio_mask_chdb_interrupts(mhi_cntrl);
+ mhi_ep_mmio_mask_erdb_interrupts(mhi_cntrl);
+}
+
+static void mhi_ep_mmio_clear_interrupts(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ u32 i;
+
+ for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++)
+ mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_CLEAR_n(i),
+ MHI_CHDB_INT_CLEAR_n_CLEAR_ALL);
+
+ for (i = 0; i < MHI_MASK_ROWS_EV_DB; i++)
+ mhi_ep_mmio_write(mhi_cntrl, MHI_ERDB_INT_CLEAR_n(i),
+ MHI_ERDB_INT_CLEAR_n_CLEAR_ALL);
+
+ mhi_ep_mmio_write(mhi_cntrl, MHI_CTRL_INT_CLEAR,
+ MHI_CTRL_INT_MMIO_WR_CLEAR |
+ MHI_CTRL_INT_CRDB_CLEAR |
+ MHI_CTRL_INT_CRDB_MHICTRL_CLEAR);
+}
+
+void mhi_ep_mmio_get_chc_base(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ u32 regval;
+
+ regval = mhi_ep_mmio_read(mhi_cntrl, EP_CCABAP_HIGHER);
+ mhi_cntrl->ch_ctx_host_pa = regval;
+ mhi_cntrl->ch_ctx_host_pa <<= 32;
+
+ regval = mhi_ep_mmio_read(mhi_cntrl, EP_CCABAP_LOWER);
+ mhi_cntrl->ch_ctx_host_pa |= regval;
+}
+
+void mhi_ep_mmio_get_erc_base(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ u32 regval;
+
+ regval = mhi_ep_mmio_read(mhi_cntrl, EP_ECABAP_HIGHER);
+ mhi_cntrl->ev_ctx_host_pa = regval;
+ mhi_cntrl->ev_ctx_host_pa <<= 32;
+
+ regval = mhi_ep_mmio_read(mhi_cntrl, EP_ECABAP_LOWER);
+ mhi_cntrl->ev_ctx_host_pa |= regval;
+}
+
+void mhi_ep_mmio_get_crc_base(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ u32 regval;
+
+ regval = mhi_ep_mmio_read(mhi_cntrl, EP_CRCBAP_HIGHER);
+ mhi_cntrl->cmd_ctx_host_pa = regval;
+ mhi_cntrl->cmd_ctx_host_pa <<= 32;
+
+ regval = mhi_ep_mmio_read(mhi_cntrl, EP_CRCBAP_LOWER);
+ mhi_cntrl->cmd_ctx_host_pa |= regval;
+}
+
+u64 mhi_ep_mmio_get_db(struct mhi_ep_ring *ring)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
+ u64 db_offset;
+ u32 regval;
+
+ regval = mhi_ep_mmio_read(mhi_cntrl, ring->db_offset_h);
+ db_offset = regval;
+ db_offset <<= 32;
+
+ regval = mhi_ep_mmio_read(mhi_cntrl, ring->db_offset_l);
+ db_offset |= regval;
+
+ return db_offset;
+}
+
+void mhi_ep_mmio_set_env(struct mhi_ep_cntrl *mhi_cntrl, u32 value)
+{
+ mhi_ep_mmio_write(mhi_cntrl, EP_BHI_EXECENV, value);
+}
+
+void mhi_ep_mmio_clear_reset(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_masked_write(mhi_cntrl, EP_MHICTRL, MHICTRL_RESET_MASK, 0);
+}
+
+void mhi_ep_mmio_reset(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_write(mhi_cntrl, EP_MHICTRL, 0);
+ mhi_ep_mmio_write(mhi_cntrl, EP_MHISTATUS, 0);
+ mhi_ep_mmio_clear_interrupts(mhi_cntrl);
+}
+
+void mhi_ep_mmio_init(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ u32 regval;
+
+ mhi_cntrl->chdb_offset = mhi_ep_mmio_read(mhi_cntrl, EP_CHDBOFF);
+ mhi_cntrl->erdb_offset = mhi_ep_mmio_read(mhi_cntrl, EP_ERDBOFF);
+
+ regval = mhi_ep_mmio_read(mhi_cntrl, EP_MHICFG);
+ mhi_cntrl->event_rings = FIELD_GET(MHICFG_NER_MASK, regval);
+ mhi_cntrl->hw_event_rings = FIELD_GET(MHICFG_NHWER_MASK, regval);
+
+ mhi_ep_mmio_reset(mhi_cntrl);
+}
+
+void mhi_ep_mmio_update_ner(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ u32 regval;
+
+ regval = mhi_ep_mmio_read(mhi_cntrl, EP_MHICFG);
+ mhi_cntrl->event_rings = FIELD_GET(MHICFG_NER_MASK, regval);
+ mhi_cntrl->hw_event_rings = FIELD_GET(MHICFG_NHWER_MASK, regval);
+}
diff --git a/drivers/bus/mhi/ep/ring.c b/drivers/bus/mhi/ep/ring.c
new file mode 100644
index 000000000000..115518ec76a4
--- /dev/null
+++ b/drivers/bus/mhi/ep/ring.c
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/mhi_ep.h>
+#include "internal.h"
+
+size_t mhi_ep_ring_addr2offset(struct mhi_ep_ring *ring, u64 ptr)
+{
+ return (ptr - ring->rbase) / sizeof(struct mhi_ring_element);
+}
+
+static u32 mhi_ep_ring_num_elems(struct mhi_ep_ring *ring)
+{
+ __le64 rlen;
+
+ memcpy_fromio(&rlen, (void __iomem *) &ring->ring_ctx->generic.rlen, sizeof(u64));
+
+ return le64_to_cpu(rlen) / sizeof(struct mhi_ring_element);
+}
+
+void mhi_ep_ring_inc_index(struct mhi_ep_ring *ring)
+{
+ ring->rd_offset = (ring->rd_offset + 1) % ring->ring_size;
+}
+
+static int __mhi_ep_cache_ring(struct mhi_ep_ring *ring, size_t end)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ size_t start, copy_size;
+ int ret;
+
+ /* Don't proceed in the case of event ring. This happens during mhi_ep_ring_start(). */
+ if (ring->type == RING_TYPE_ER)
+ return 0;
+
+ /* No need to cache the ring if write pointer is unmodified */
+ if (ring->wr_offset == end)
+ return 0;
+
+ start = ring->wr_offset;
+ if (start < end) {
+ copy_size = (end - start) * sizeof(struct mhi_ring_element);
+ ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase +
+ (start * sizeof(struct mhi_ring_element)),
+ &ring->ring_cache[start], copy_size);
+ if (ret < 0)
+ return ret;
+ } else {
+ copy_size = (ring->ring_size - start) * sizeof(struct mhi_ring_element);
+ ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase +
+ (start * sizeof(struct mhi_ring_element)),
+ &ring->ring_cache[start], copy_size);
+ if (ret < 0)
+ return ret;
+
+ if (end) {
+ ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase,
+ &ring->ring_cache[0],
+ end * sizeof(struct mhi_ring_element));
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ dev_dbg(dev, "Cached ring: start %zu end %zu size %zu\n", start, end, copy_size);
+
+ return 0;
+}
+
+static int mhi_ep_cache_ring(struct mhi_ep_ring *ring, u64 wr_ptr)
+{
+ size_t wr_offset;
+ int ret;
+
+ wr_offset = mhi_ep_ring_addr2offset(ring, wr_ptr);
+
+ /* Cache the host ring till write offset */
+ ret = __mhi_ep_cache_ring(ring, wr_offset);
+ if (ret)
+ return ret;
+
+ ring->wr_offset = wr_offset;
+
+ return 0;
+}
+
+int mhi_ep_update_wr_offset(struct mhi_ep_ring *ring)
+{
+ u64 wr_ptr;
+
+ wr_ptr = mhi_ep_mmio_get_db(ring);
+
+ return mhi_ep_cache_ring(ring, wr_ptr);
+}
+
+/* TODO: Support for adding multiple ring elements to the ring */
+int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *el)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ size_t old_offset = 0;
+ u32 num_free_elem;
+ __le64 rp;
+ int ret;
+
+ ret = mhi_ep_update_wr_offset(ring);
+ if (ret) {
+ dev_err(dev, "Error updating write pointer\n");
+ return ret;
+ }
+
+ if (ring->rd_offset < ring->wr_offset)
+ num_free_elem = (ring->wr_offset - ring->rd_offset) - 1;
+ else
+ num_free_elem = ((ring->ring_size - ring->rd_offset) + ring->wr_offset) - 1;
+
+ /* Check if there is space in ring for adding at least an element */
+ if (!num_free_elem) {
+ dev_err(dev, "No space left in the ring\n");
+ return -ENOSPC;
+ }
+
+ old_offset = ring->rd_offset;
+ mhi_ep_ring_inc_index(ring);
+
+ dev_dbg(dev, "Adding an element to ring at offset (%zu)\n", ring->rd_offset);
+
+ /* Update rp in ring context */
+ rp = cpu_to_le64(ring->rd_offset * sizeof(*el) + ring->rbase);
+ memcpy_toio((void __iomem *) &ring->ring_ctx->generic.rp, &rp, sizeof(u64));
+
+ ret = mhi_cntrl->write_to_host(mhi_cntrl, el, ring->rbase + (old_offset * sizeof(*el)),
+ sizeof(*el));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 id)
+{
+ ring->type = type;
+ if (ring->type == RING_TYPE_CMD) {
+ ring->db_offset_h = EP_CRDB_HIGHER;
+ ring->db_offset_l = EP_CRDB_LOWER;
+ } else if (ring->type == RING_TYPE_CH) {
+ ring->db_offset_h = CHDB_HIGHER_n(id);
+ ring->db_offset_l = CHDB_LOWER_n(id);
+ ring->ch_id = id;
+ } else {
+ ring->db_offset_h = ERDB_HIGHER_n(id);
+ ring->db_offset_l = ERDB_LOWER_n(id);
+ }
+}
+
+int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
+ union mhi_ep_ring_ctx *ctx)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ __le64 val;
+ int ret;
+
+ ring->mhi_cntrl = mhi_cntrl;
+ ring->ring_ctx = ctx;
+ ring->ring_size = mhi_ep_ring_num_elems(ring);
+ memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.rbase, sizeof(u64));
+ ring->rbase = le64_to_cpu(val);
+
+ if (ring->type == RING_TYPE_CH)
+ ring->er_index = le32_to_cpu(ring->ring_ctx->ch.erindex);
+
+ if (ring->type == RING_TYPE_ER)
+ ring->irq_vector = le32_to_cpu(ring->ring_ctx->ev.msivec);
+
+ /* During ring init, both rp and wp are equal */
+ memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.rp, sizeof(u64));
+ ring->rd_offset = mhi_ep_ring_addr2offset(ring, le64_to_cpu(val));
+ ring->wr_offset = mhi_ep_ring_addr2offset(ring, le64_to_cpu(val));
+
+ /* Allocate ring cache memory for holding the copy of host ring */
+ ring->ring_cache = kcalloc(ring->ring_size, sizeof(struct mhi_ring_element), GFP_KERNEL);
+ if (!ring->ring_cache)
+ return -ENOMEM;
+
+ memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.wp, sizeof(u64));
+ ret = mhi_ep_cache_ring(ring, le64_to_cpu(val));
+ if (ret) {
+ dev_err(dev, "Failed to cache ring\n");
+ kfree(ring->ring_cache);
+ return ret;
+ }
+
+ ring->started = true;
+
+ return 0;
+}
+
+void mhi_ep_ring_reset(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring)
+{
+ ring->started = false;
+ kfree(ring->ring_cache);
+ ring->ring_cache = NULL;
+}
diff --git a/drivers/bus/mhi/ep/sm.c b/drivers/bus/mhi/ep/sm.c
new file mode 100644
index 000000000000..3655c19e23c7
--- /dev/null
+++ b/drivers/bus/mhi/ep/sm.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/errno.h>
+#include <linux/mhi_ep.h>
+#include "internal.h"
+
+bool __must_check mhi_ep_check_mhi_state(struct mhi_ep_cntrl *mhi_cntrl,
+ enum mhi_state cur_mhi_state,
+ enum mhi_state mhi_state)
+{
+ if (mhi_state == MHI_STATE_SYS_ERR)
+ return true; /* Allowed in any state */
+
+ if (mhi_state == MHI_STATE_READY)
+ return cur_mhi_state == MHI_STATE_RESET;
+
+ if (mhi_state == MHI_STATE_M0)
+ return cur_mhi_state == MHI_STATE_M3 || cur_mhi_state == MHI_STATE_READY;
+
+ if (mhi_state == MHI_STATE_M3)
+ return cur_mhi_state == MHI_STATE_M0;
+
+ return false;
+}
+
+int mhi_ep_set_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state mhi_state)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+
+ if (!mhi_ep_check_mhi_state(mhi_cntrl, mhi_cntrl->mhi_state, mhi_state)) {
+ dev_err(dev, "MHI state change to %s from %s is not allowed!\n",
+ mhi_state_str(mhi_state),
+ mhi_state_str(mhi_cntrl->mhi_state));
+ return -EACCES;
+ }
+
+ /* TODO: Add support for M1 and M2 states */
+ if (mhi_state == MHI_STATE_M1 || mhi_state == MHI_STATE_M2) {
+ dev_err(dev, "MHI state (%s) not supported\n", mhi_state_str(mhi_state));
+ return -EOPNOTSUPP;
+ }
+
+ mhi_ep_mmio_masked_write(mhi_cntrl, EP_MHISTATUS, MHISTATUS_MHISTATE_MASK, mhi_state);
+ mhi_cntrl->mhi_state = mhi_state;
+
+ if (mhi_state == MHI_STATE_READY)
+ mhi_ep_mmio_masked_write(mhi_cntrl, EP_MHISTATUS, MHISTATUS_READY_MASK, 1);
+
+ if (mhi_state == MHI_STATE_SYS_ERR)
+ mhi_ep_mmio_masked_write(mhi_cntrl, EP_MHISTATUS, MHISTATUS_SYSERR_MASK, 1);
+
+ return 0;
+}
+
+int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_state old_state;
+ int ret;
+
+ /* If MHI is in M3, resume suspended channels */
+ spin_lock_bh(&mhi_cntrl->state_lock);
+ old_state = mhi_cntrl->mhi_state;
+ if (old_state == MHI_STATE_M3)
+ mhi_ep_resume_channels(mhi_cntrl);
+
+ ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
+ spin_unlock_bh(&mhi_cntrl->state_lock);
+
+ if (ret) {
+ mhi_ep_handle_syserr(mhi_cntrl);
+ return ret;
+ }
+
+ /* Signal host that the device moved to M0 */
+ ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M0);
+ if (ret) {
+ dev_err(dev, "Failed sending M0 state change event\n");
+ return ret;
+ }
+
+ if (old_state == MHI_STATE_READY) {
+ /* Send AMSS EE event to host */
+ ret = mhi_ep_send_ee_event(mhi_cntrl, MHI_EE_AMSS);
+ if (ret) {
+ dev_err(dev, "Failed sending AMSS EE event\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int ret;
+
+ spin_lock_bh(&mhi_cntrl->state_lock);
+ ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M3);
+ spin_unlock_bh(&mhi_cntrl->state_lock);
+
+ if (ret) {
+ mhi_ep_handle_syserr(mhi_cntrl);
+ return ret;
+ }
+
+ mhi_ep_suspend_channels(mhi_cntrl);
+
+ /* Signal host that the device moved to M3 */
+ ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M3);
+ if (ret) {
+ dev_err(dev, "Failed sending M3 state change event\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_state mhi_state;
+ int ret, is_ready;
+
+ spin_lock_bh(&mhi_cntrl->state_lock);
+ /* Ensure that the MHISTATUS is set to RESET by host */
+ mhi_state = mhi_ep_mmio_masked_read(mhi_cntrl, EP_MHISTATUS, MHISTATUS_MHISTATE_MASK);
+ is_ready = mhi_ep_mmio_masked_read(mhi_cntrl, EP_MHISTATUS, MHISTATUS_READY_MASK);
+
+ if (mhi_state != MHI_STATE_RESET || is_ready) {
+ dev_err(dev, "READY state transition failed. MHI host not in RESET state\n");
+ spin_unlock_bh(&mhi_cntrl->state_lock);
+ return -EIO;
+ }
+
+ ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_READY);
+ spin_unlock_bh(&mhi_cntrl->state_lock);
+
+ if (ret)
+ mhi_ep_handle_syserr(mhi_cntrl);
+
+ return ret;
+}
diff --git a/drivers/bus/mhi/host/Kconfig b/drivers/bus/mhi/host/Kconfig
new file mode 100644
index 000000000000..da5cd0c9fc62
--- /dev/null
+++ b/drivers/bus/mhi/host/Kconfig
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# MHI bus
+#
+# Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+#
+
+config MHI_BUS
+ tristate "Modem Host Interface (MHI) bus"
+ help
+ Bus driver for MHI protocol. Modem Host Interface (MHI) is a
+ communication protocol used by the host processors to control
+ and communicate with modem devices over a high speed peripheral
+ bus or shared memory.
+
+config MHI_BUS_DEBUG
+ bool "Debugfs support for the MHI bus"
+ depends on MHI_BUS && DEBUG_FS
+ help
+ Enable debugfs support for use with the MHI transport. Allows
+ reading and/or modifying some values within the MHI controller
+ for debug and test purposes.
+
+config MHI_BUS_PCI_GENERIC
+ tristate "MHI PCI controller driver"
+ depends on MHI_BUS
+ depends on PCI
+ help
+ This driver provides MHI PCI controller driver for devices such as
+ Qualcomm SDX55 based PCIe modems.
+
diff --git a/drivers/bus/mhi/host/Makefile b/drivers/bus/mhi/host/Makefile
new file mode 100644
index 000000000000..859c2f38451c
--- /dev/null
+++ b/drivers/bus/mhi/host/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_MHI_BUS) += mhi.o
+mhi-y := init.o main.o pm.o boot.o
+mhi-$(CONFIG_MHI_BUS_DEBUG) += debugfs.o
+
+obj-$(CONFIG_MHI_BUS_PCI_GENERIC) += mhi_pci_generic.o
+mhi_pci_generic-y += pci_generic.o
diff --git a/drivers/bus/mhi/host/boot.c b/drivers/bus/mhi/host/boot.c
new file mode 100644
index 000000000000..26d0eddb1477
--- /dev/null
+++ b/drivers/bus/mhi/host/boot.c
@@ -0,0 +1,536 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/mhi.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include "internal.h"
+
+/* Setup RDDM vector table for RDDM transfer and program RXVEC */
+int mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
+ struct image_info *img_info)
+{
+ struct mhi_buf *mhi_buf = img_info->mhi_buf;
+ struct bhi_vec_entry *bhi_vec = img_info->bhi_vec;
+ void __iomem *base = mhi_cntrl->bhie;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ u32 sequence_id;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < img_info->entries - 1; i++, mhi_buf++, bhi_vec++) {
+ bhi_vec->dma_addr = mhi_buf->dma_addr;
+ bhi_vec->size = mhi_buf->len;
+ }
+
+ dev_dbg(dev, "BHIe programming for RDDM\n");
+
+ mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_HIGH_OFFS,
+ upper_32_bits(mhi_buf->dma_addr));
+
+ mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_LOW_OFFS,
+ lower_32_bits(mhi_buf->dma_addr));
+
+ mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len);
+ sequence_id = MHI_RANDOM_U32_NONZERO(BHIE_RXVECSTATUS_SEQNUM_BMSK);
+
+ ret = mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS,
+ BHIE_RXVECDB_SEQNUM_BMSK, sequence_id);
+ if (ret) {
+ dev_err(dev, "Failed to write sequence ID for BHIE_RXVECDB\n");
+ return ret;
+ }
+
+ dev_dbg(dev, "Address: %p and len: 0x%zx sequence: %u\n",
+ &mhi_buf->dma_addr, mhi_buf->len, sequence_id);
+
+ return 0;
+}
+
+/* Collect RDDM buffer during kernel panic */
+static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl)
+{
+ int ret;
+ u32 rx_status;
+ enum mhi_ee_type ee;
+ const u32 delayus = 2000;
+ u32 retry = (mhi_cntrl->timeout_ms * 1000) / delayus;
+ const u32 rddm_timeout_us = 200000;
+ int rddm_retry = rddm_timeout_us / delayus;
+ void __iomem *base = mhi_cntrl->bhie;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+
+ dev_dbg(dev, "Entered with pm_state:%s dev_state:%s ee:%s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ mhi_state_str(mhi_cntrl->dev_state),
+ TO_MHI_EXEC_STR(mhi_cntrl->ee));
+
+ /*
+ * This should only be executing during a kernel panic, we expect all
+ * other cores to shutdown while we're collecting RDDM buffer. After
+ * returning from this function, we expect the device to reset.
+ *
+ * Normaly, we read/write pm_state only after grabbing the
+ * pm_lock, since we're in a panic, skipping it. Also there is no
+ * gurantee that this state change would take effect since
+ * we're setting it w/o grabbing pm_lock
+ */
+ mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
+ /* update should take the effect immediately */
+ smp_wmb();
+
+ /*
+ * Make sure device is not already in RDDM. In case the device asserts
+ * and a kernel panic follows, device will already be in RDDM.
+ * Do not trigger SYS ERR again and proceed with waiting for
+ * image download completion.
+ */
+ ee = mhi_get_exec_env(mhi_cntrl);
+ if (ee == MHI_EE_MAX)
+ goto error_exit_rddm;
+
+ if (ee != MHI_EE_RDDM) {
+ dev_dbg(dev, "Trigger device into RDDM mode using SYS ERR\n");
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
+
+ dev_dbg(dev, "Waiting for device to enter RDDM\n");
+ while (rddm_retry--) {
+ ee = mhi_get_exec_env(mhi_cntrl);
+ if (ee == MHI_EE_RDDM)
+ break;
+
+ udelay(delayus);
+ }
+
+ if (rddm_retry <= 0) {
+ /* Hardware reset so force device to enter RDDM */
+ dev_dbg(dev,
+ "Did not enter RDDM, do a host req reset\n");
+ mhi_write_reg(mhi_cntrl, mhi_cntrl->regs,
+ MHI_SOC_RESET_REQ_OFFSET,
+ MHI_SOC_RESET_REQ);
+ udelay(delayus);
+ }
+
+ ee = mhi_get_exec_env(mhi_cntrl);
+ }
+
+ dev_dbg(dev,
+ "Waiting for RDDM image download via BHIe, current EE:%s\n",
+ TO_MHI_EXEC_STR(ee));
+
+ while (retry--) {
+ ret = mhi_read_reg_field(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS,
+ BHIE_RXVECSTATUS_STATUS_BMSK, &rx_status);
+ if (ret)
+ return -EIO;
+
+ if (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL)
+ return 0;
+
+ udelay(delayus);
+ }
+
+ ee = mhi_get_exec_env(mhi_cntrl);
+ ret = mhi_read_reg(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, &rx_status);
+
+ dev_err(dev, "RXVEC_STATUS: 0x%x\n", rx_status);
+
+error_exit_rddm:
+ dev_err(dev, "RDDM transfer failed. Current EE: %s\n",
+ TO_MHI_EXEC_STR(ee));
+
+ return -EIO;
+}
+
+/* Download RDDM image from device */
+int mhi_download_rddm_image(struct mhi_controller *mhi_cntrl, bool in_panic)
+{
+ void __iomem *base = mhi_cntrl->bhie;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ u32 rx_status;
+
+ if (in_panic)
+ return __mhi_download_rddm_in_panic(mhi_cntrl);
+
+ dev_dbg(dev, "Waiting for RDDM image download via BHIe\n");
+
+ /* Wait for the image download to complete */
+ wait_event_timeout(mhi_cntrl->state_event,
+ mhi_read_reg_field(mhi_cntrl, base,
+ BHIE_RXVECSTATUS_OFFS,
+ BHIE_RXVECSTATUS_STATUS_BMSK,
+ &rx_status) || rx_status,
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ return (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO;
+}
+EXPORT_SYMBOL_GPL(mhi_download_rddm_image);
+
+static int mhi_fw_load_bhie(struct mhi_controller *mhi_cntrl,
+ const struct mhi_buf *mhi_buf)
+{
+ void __iomem *base = mhi_cntrl->bhie;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
+ u32 tx_status, sequence_id;
+ int ret;
+
+ read_lock_bh(pm_lock);
+ if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+ read_unlock_bh(pm_lock);
+ return -EIO;
+ }
+
+ sequence_id = MHI_RANDOM_U32_NONZERO(BHIE_TXVECSTATUS_SEQNUM_BMSK);
+ dev_dbg(dev, "Starting image download via BHIe. Sequence ID: %u\n",
+ sequence_id);
+ mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_HIGH_OFFS,
+ upper_32_bits(mhi_buf->dma_addr));
+
+ mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_LOW_OFFS,
+ lower_32_bits(mhi_buf->dma_addr));
+
+ mhi_write_reg(mhi_cntrl, base, BHIE_TXVECSIZE_OFFS, mhi_buf->len);
+
+ ret = mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS,
+ BHIE_TXVECDB_SEQNUM_BMSK, sequence_id);
+ read_unlock_bh(pm_lock);
+
+ if (ret)
+ return ret;
+
+ /* Wait for the image download to complete */
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
+ mhi_read_reg_field(mhi_cntrl, base,
+ BHIE_TXVECSTATUS_OFFS,
+ BHIE_TXVECSTATUS_STATUS_BMSK,
+ &tx_status) || tx_status,
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
+ tx_status != BHIE_TXVECSTATUS_STATUS_XFER_COMPL)
+ return -EIO;
+
+ return (!ret) ? -ETIMEDOUT : 0;
+}
+
+static int mhi_fw_load_bhi(struct mhi_controller *mhi_cntrl,
+ dma_addr_t dma_addr,
+ size_t size)
+{
+ u32 tx_status, val, session_id;
+ int i, ret;
+ void __iomem *base = mhi_cntrl->bhi;
+ rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct {
+ char *name;
+ u32 offset;
+ } error_reg[] = {
+ { "ERROR_CODE", BHI_ERRCODE },
+ { "ERROR_DBG1", BHI_ERRDBG1 },
+ { "ERROR_DBG2", BHI_ERRDBG2 },
+ { "ERROR_DBG3", BHI_ERRDBG3 },
+ { NULL },
+ };
+
+ read_lock_bh(pm_lock);
+ if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+ read_unlock_bh(pm_lock);
+ goto invalid_pm_state;
+ }
+
+ session_id = MHI_RANDOM_U32_NONZERO(BHI_TXDB_SEQNUM_BMSK);
+ dev_dbg(dev, "Starting image download via BHI. Session ID: %u\n",
+ session_id);
+ mhi_write_reg(mhi_cntrl, base, BHI_STATUS, 0);
+ mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH,
+ upper_32_bits(dma_addr));
+ mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW,
+ lower_32_bits(dma_addr));
+ mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, size);
+ mhi_write_reg(mhi_cntrl, base, BHI_IMGTXDB, session_id);
+ read_unlock_bh(pm_lock);
+
+ /* Wait for the image download to complete */
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
+ mhi_read_reg_field(mhi_cntrl, base, BHI_STATUS,
+ BHI_STATUS_MASK, &tx_status) || tx_status,
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
+ goto invalid_pm_state;
+
+ if (tx_status == BHI_STATUS_ERROR) {
+ dev_err(dev, "Image transfer failed\n");
+ read_lock_bh(pm_lock);
+ if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+ for (i = 0; error_reg[i].name; i++) {
+ ret = mhi_read_reg(mhi_cntrl, base,
+ error_reg[i].offset, &val);
+ if (ret)
+ break;
+ dev_err(dev, "Reg: %s value: 0x%x\n",
+ error_reg[i].name, val);
+ }
+ }
+ read_unlock_bh(pm_lock);
+ goto invalid_pm_state;
+ }
+
+ return (!ret) ? -ETIMEDOUT : 0;
+
+invalid_pm_state:
+
+ return -EIO;
+}
+
+void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
+ struct image_info *image_info)
+{
+ int i;
+ struct mhi_buf *mhi_buf = image_info->mhi_buf;
+
+ for (i = 0; i < image_info->entries; i++, mhi_buf++)
+ dma_free_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len,
+ mhi_buf->buf, mhi_buf->dma_addr);
+
+ kfree(image_info->mhi_buf);
+ kfree(image_info);
+}
+
+int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
+ struct image_info **image_info,
+ size_t alloc_size)
+{
+ size_t seg_size = mhi_cntrl->seg_len;
+ int segments = DIV_ROUND_UP(alloc_size, seg_size) + 1;
+ int i;
+ struct image_info *img_info;
+ struct mhi_buf *mhi_buf;
+
+ img_info = kzalloc(sizeof(*img_info), GFP_KERNEL);
+ if (!img_info)
+ return -ENOMEM;
+
+ /* Allocate memory for entries */
+ img_info->mhi_buf = kcalloc(segments, sizeof(*img_info->mhi_buf),
+ GFP_KERNEL);
+ if (!img_info->mhi_buf)
+ goto error_alloc_mhi_buf;
+
+ /* Allocate and populate vector table */
+ mhi_buf = img_info->mhi_buf;
+ for (i = 0; i < segments; i++, mhi_buf++) {
+ size_t vec_size = seg_size;
+
+ /* Vector table is the last entry */
+ if (i == segments - 1)
+ vec_size = sizeof(struct bhi_vec_entry) * i;
+
+ mhi_buf->len = vec_size;
+ mhi_buf->buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev,
+ vec_size, &mhi_buf->dma_addr,
+ GFP_KERNEL);
+ if (!mhi_buf->buf)
+ goto error_alloc_segment;
+ }
+
+ img_info->bhi_vec = img_info->mhi_buf[segments - 1].buf;
+ img_info->entries = segments;
+ *image_info = img_info;
+
+ return 0;
+
+error_alloc_segment:
+ for (--i, --mhi_buf; i >= 0; i--, mhi_buf--)
+ dma_free_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len,
+ mhi_buf->buf, mhi_buf->dma_addr);
+
+error_alloc_mhi_buf:
+ kfree(img_info);
+
+ return -ENOMEM;
+}
+
+static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl,
+ const struct firmware *firmware,
+ struct image_info *img_info)
+{
+ size_t remainder = firmware->size;
+ size_t to_cpy;
+ const u8 *buf = firmware->data;
+ struct mhi_buf *mhi_buf = img_info->mhi_buf;
+ struct bhi_vec_entry *bhi_vec = img_info->bhi_vec;
+
+ while (remainder) {
+ to_cpy = min(remainder, mhi_buf->len);
+ memcpy(mhi_buf->buf, buf, to_cpy);
+ bhi_vec->dma_addr = mhi_buf->dma_addr;
+ bhi_vec->size = to_cpy;
+
+ buf += to_cpy;
+ remainder -= to_cpy;
+ bhi_vec++;
+ mhi_buf++;
+ }
+}
+
+void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
+{
+ const struct firmware *firmware = NULL;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ const char *fw_name;
+ void *buf;
+ dma_addr_t dma_addr;
+ size_t size;
+ int i, ret;
+
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ dev_err(dev, "Device MHI is not in valid state\n");
+ return;
+ }
+
+ /* save hardware info from BHI */
+ ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_SERIALNU,
+ &mhi_cntrl->serial_number);
+ if (ret)
+ dev_err(dev, "Could not capture serial number via BHI\n");
+
+ for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++) {
+ ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_OEMPKHASH(i),
+ &mhi_cntrl->oem_pk_hash[i]);
+ if (ret) {
+ dev_err(dev, "Could not capture OEM PK HASH via BHI\n");
+ break;
+ }
+ }
+
+ /* wait for ready on pass through or any other execution environment */
+ if (!MHI_FW_LOAD_CAPABLE(mhi_cntrl->ee))
+ goto fw_load_ready_state;
+
+ fw_name = (mhi_cntrl->ee == MHI_EE_EDL) ?
+ mhi_cntrl->edl_image : mhi_cntrl->fw_image;
+
+ if (!fw_name || (mhi_cntrl->fbc_download && (!mhi_cntrl->sbl_size ||
+ !mhi_cntrl->seg_len))) {
+ dev_err(dev,
+ "No firmware image defined or !sbl_size || !seg_len\n");
+ goto error_fw_load;
+ }
+
+ ret = request_firmware(&firmware, fw_name, dev);
+ if (ret) {
+ dev_err(dev, "Error loading firmware: %d\n", ret);
+ goto error_fw_load;
+ }
+
+ size = (mhi_cntrl->fbc_download) ? mhi_cntrl->sbl_size : firmware->size;
+
+ /* SBL size provided is maximum size, not necessarily the image size */
+ if (size > firmware->size)
+ size = firmware->size;
+
+ buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, size, &dma_addr,
+ GFP_KERNEL);
+ if (!buf) {
+ release_firmware(firmware);
+ goto error_fw_load;
+ }
+
+ /* Download image using BHI */
+ memcpy(buf, firmware->data, size);
+ ret = mhi_fw_load_bhi(mhi_cntrl, dma_addr, size);
+ dma_free_coherent(mhi_cntrl->cntrl_dev, size, buf, dma_addr);
+
+ /* Error or in EDL mode, we're done */
+ if (ret) {
+ dev_err(dev, "MHI did not load image over BHI, ret: %d\n", ret);
+ release_firmware(firmware);
+ goto error_fw_load;
+ }
+
+ /* Wait for ready since EDL image was loaded */
+ if (fw_name == mhi_cntrl->edl_image) {
+ release_firmware(firmware);
+ goto fw_load_ready_state;
+ }
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ mhi_cntrl->dev_state = MHI_STATE_RESET;
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ /*
+ * If we're doing fbc, populate vector tables while
+ * device transitioning into MHI READY state
+ */
+ if (mhi_cntrl->fbc_download) {
+ ret = mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image,
+ firmware->size);
+ if (ret) {
+ release_firmware(firmware);
+ goto error_fw_load;
+ }
+
+ /* Load the firmware into BHIE vec table */
+ mhi_firmware_copy(mhi_cntrl, firmware, mhi_cntrl->fbc_image);
+ }
+
+ release_firmware(firmware);
+
+fw_load_ready_state:
+ /* Transitioning into MHI RESET->READY state */
+ ret = mhi_ready_state_transition(mhi_cntrl);
+ if (ret) {
+ dev_err(dev, "MHI did not enter READY state\n");
+ goto error_ready_state;
+ }
+
+ dev_info(dev, "Wait for device to enter SBL or Mission mode\n");
+ return;
+
+error_ready_state:
+ if (mhi_cntrl->fbc_download) {
+ mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
+ mhi_cntrl->fbc_image = NULL;
+ }
+
+error_fw_load:
+ mhi_cntrl->pm_state = MHI_PM_FW_DL_ERR;
+ wake_up_all(&mhi_cntrl->state_event);
+}
+
+int mhi_download_amss_image(struct mhi_controller *mhi_cntrl)
+{
+ struct image_info *image_info = mhi_cntrl->fbc_image;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int ret;
+
+ if (!image_info)
+ return -EIO;
+
+ ret = mhi_fw_load_bhie(mhi_cntrl,
+ /* Vector table is the last entry */
+ &image_info->mhi_buf[image_info->entries - 1]);
+ if (ret) {
+ dev_err(dev, "MHI did not load AMSS, ret:%d\n", ret);
+ mhi_cntrl->pm_state = MHI_PM_FW_DL_ERR;
+ wake_up_all(&mhi_cntrl->state_event);
+ }
+
+ return ret;
+}
diff --git a/drivers/bus/mhi/host/debugfs.c b/drivers/bus/mhi/host/debugfs.c
new file mode 100644
index 000000000000..cfec7811dfbb
--- /dev/null
+++ b/drivers/bus/mhi/host/debugfs.c
@@ -0,0 +1,413 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/mhi.h>
+#include <linux/module.h>
+#include "internal.h"
+
+static int mhi_debugfs_states_show(struct seq_file *m, void *d)
+{
+ struct mhi_controller *mhi_cntrl = m->private;
+
+ /* states */
+ seq_printf(m, "PM state: %s Device: %s MHI state: %s EE: %s wake: %s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ mhi_is_active(mhi_cntrl) ? "Active" : "Inactive",
+ mhi_state_str(mhi_cntrl->dev_state),
+ TO_MHI_EXEC_STR(mhi_cntrl->ee),
+ mhi_cntrl->wake_set ? "true" : "false");
+
+ /* counters */
+ seq_printf(m, "M0: %u M2: %u M3: %u", mhi_cntrl->M0, mhi_cntrl->M2,
+ mhi_cntrl->M3);
+
+ seq_printf(m, " device wake: %u pending packets: %u\n",
+ atomic_read(&mhi_cntrl->dev_wake),
+ atomic_read(&mhi_cntrl->pending_pkts));
+
+ return 0;
+}
+
+static int mhi_debugfs_events_show(struct seq_file *m, void *d)
+{
+ struct mhi_controller *mhi_cntrl = m->private;
+ struct mhi_event *mhi_event;
+ struct mhi_event_ctxt *er_ctxt;
+ int i;
+
+ if (!mhi_is_active(mhi_cntrl)) {
+ seq_puts(m, "Device not ready\n");
+ return -ENODEV;
+ }
+
+ er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < mhi_cntrl->total_ev_rings;
+ i++, er_ctxt++, mhi_event++) {
+ struct mhi_ring *ring = &mhi_event->ring;
+
+ if (mhi_event->offload_ev) {
+ seq_printf(m, "Index: %d is an offload event ring\n",
+ i);
+ continue;
+ }
+
+ seq_printf(m, "Index: %d intmod count: %lu time: %lu",
+ i, (le32_to_cpu(er_ctxt->intmod) & EV_CTX_INTMODC_MASK) >>
+ __ffs(EV_CTX_INTMODC_MASK),
+ (le32_to_cpu(er_ctxt->intmod) & EV_CTX_INTMODT_MASK) >>
+ __ffs(EV_CTX_INTMODT_MASK));
+
+ seq_printf(m, " base: 0x%0llx len: 0x%llx", le64_to_cpu(er_ctxt->rbase),
+ le64_to_cpu(er_ctxt->rlen));
+
+ seq_printf(m, " rp: 0x%llx wp: 0x%llx", le64_to_cpu(er_ctxt->rp),
+ le64_to_cpu(er_ctxt->wp));
+
+ seq_printf(m, " local rp: 0x%pK db: 0x%pad\n", ring->rp,
+ &mhi_event->db_cfg.db_val);
+ }
+
+ return 0;
+}
+
+static int mhi_debugfs_channels_show(struct seq_file *m, void *d)
+{
+ struct mhi_controller *mhi_cntrl = m->private;
+ struct mhi_chan *mhi_chan;
+ struct mhi_chan_ctxt *chan_ctxt;
+ int i;
+
+ if (!mhi_is_active(mhi_cntrl)) {
+ seq_puts(m, "Device not ready\n");
+ return -ENODEV;
+ }
+
+ mhi_chan = mhi_cntrl->mhi_chan;
+ chan_ctxt = mhi_cntrl->mhi_ctxt->chan_ctxt;
+ for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) {
+ struct mhi_ring *ring = &mhi_chan->tre_ring;
+
+ if (mhi_chan->offload_ch) {
+ seq_printf(m, "%s(%u) is an offload channel\n",
+ mhi_chan->name, mhi_chan->chan);
+ continue;
+ }
+
+ if (!mhi_chan->mhi_dev)
+ continue;
+
+ seq_printf(m,
+ "%s(%u) state: 0x%lx brstmode: 0x%lx pollcfg: 0x%lx",
+ mhi_chan->name, mhi_chan->chan, (le32_to_cpu(chan_ctxt->chcfg) &
+ CHAN_CTX_CHSTATE_MASK) >> __ffs(CHAN_CTX_CHSTATE_MASK),
+ (le32_to_cpu(chan_ctxt->chcfg) & CHAN_CTX_BRSTMODE_MASK) >>
+ __ffs(CHAN_CTX_BRSTMODE_MASK), (le32_to_cpu(chan_ctxt->chcfg) &
+ CHAN_CTX_POLLCFG_MASK) >> __ffs(CHAN_CTX_POLLCFG_MASK));
+
+ seq_printf(m, " type: 0x%x event ring: %u", le32_to_cpu(chan_ctxt->chtype),
+ le32_to_cpu(chan_ctxt->erindex));
+
+ seq_printf(m, " base: 0x%llx len: 0x%llx rp: 0x%llx wp: 0x%llx",
+ le64_to_cpu(chan_ctxt->rbase), le64_to_cpu(chan_ctxt->rlen),
+ le64_to_cpu(chan_ctxt->rp), le64_to_cpu(chan_ctxt->wp));
+
+ seq_printf(m, " local rp: 0x%pK local wp: 0x%pK db: 0x%pad\n",
+ ring->rp, ring->wp,
+ &mhi_chan->db_cfg.db_val);
+ }
+
+ return 0;
+}
+
+static int mhi_device_info_show(struct device *dev, void *data)
+{
+ struct mhi_device *mhi_dev;
+
+ if (dev->bus != &mhi_bus_type)
+ return 0;
+
+ mhi_dev = to_mhi_device(dev);
+
+ seq_printf((struct seq_file *)data, "%s: type: %s dev_wake: %u",
+ mhi_dev->name, mhi_dev->dev_type ? "Controller" : "Transfer",
+ mhi_dev->dev_wake);
+
+ /* for transfer device types only */
+ if (mhi_dev->dev_type == MHI_DEVICE_XFER)
+ seq_printf((struct seq_file *)data, " channels: %u(UL)/%u(DL)",
+ mhi_dev->ul_chan_id, mhi_dev->dl_chan_id);
+
+ seq_puts((struct seq_file *)data, "\n");
+
+ return 0;
+}
+
+static int mhi_debugfs_devices_show(struct seq_file *m, void *d)
+{
+ struct mhi_controller *mhi_cntrl = m->private;
+
+ if (!mhi_is_active(mhi_cntrl)) {
+ seq_puts(m, "Device not ready\n");
+ return -ENODEV;
+ }
+
+ /* Show controller and client(s) info */
+ mhi_device_info_show(&mhi_cntrl->mhi_dev->dev, m);
+ device_for_each_child(&mhi_cntrl->mhi_dev->dev, m, mhi_device_info_show);
+
+ return 0;
+}
+
+static int mhi_debugfs_regdump_show(struct seq_file *m, void *d)
+{
+ struct mhi_controller *mhi_cntrl = m->private;
+ enum mhi_state state;
+ enum mhi_ee_type ee;
+ int i, ret = -EIO;
+ u32 val;
+ void __iomem *mhi_base = mhi_cntrl->regs;
+ void __iomem *bhi_base = mhi_cntrl->bhi;
+ void __iomem *bhie_base = mhi_cntrl->bhie;
+ void __iomem *wake_db = mhi_cntrl->wake_db;
+ struct {
+ const char *name;
+ int offset;
+ void __iomem *base;
+ } regs[] = {
+ { "MHI_REGLEN", MHIREGLEN, mhi_base},
+ { "MHI_VER", MHIVER, mhi_base},
+ { "MHI_CFG", MHICFG, mhi_base},
+ { "MHI_CTRL", MHICTRL, mhi_base},
+ { "MHI_STATUS", MHISTATUS, mhi_base},
+ { "MHI_WAKE_DB", 0, wake_db},
+ { "BHI_EXECENV", BHI_EXECENV, bhi_base},
+ { "BHI_STATUS", BHI_STATUS, bhi_base},
+ { "BHI_ERRCODE", BHI_ERRCODE, bhi_base},
+ { "BHI_ERRDBG1", BHI_ERRDBG1, bhi_base},
+ { "BHI_ERRDBG2", BHI_ERRDBG2, bhi_base},
+ { "BHI_ERRDBG3", BHI_ERRDBG3, bhi_base},
+ { "BHIE_TXVEC_DB", BHIE_TXVECDB_OFFS, bhie_base},
+ { "BHIE_TXVEC_STATUS", BHIE_TXVECSTATUS_OFFS, bhie_base},
+ { "BHIE_RXVEC_DB", BHIE_RXVECDB_OFFS, bhie_base},
+ { "BHIE_RXVEC_STATUS", BHIE_RXVECSTATUS_OFFS, bhie_base},
+ { NULL },
+ };
+
+ if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
+ return ret;
+
+ seq_printf(m, "Host PM state: %s Device state: %s EE: %s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ mhi_state_str(mhi_cntrl->dev_state),
+ TO_MHI_EXEC_STR(mhi_cntrl->ee));
+
+ state = mhi_get_mhi_state(mhi_cntrl);
+ ee = mhi_get_exec_env(mhi_cntrl);
+ seq_printf(m, "Device EE: %s state: %s\n", TO_MHI_EXEC_STR(ee),
+ mhi_state_str(state));
+
+ for (i = 0; regs[i].name; i++) {
+ if (!regs[i].base)
+ continue;
+ ret = mhi_read_reg(mhi_cntrl, regs[i].base, regs[i].offset,
+ &val);
+ if (ret)
+ continue;
+
+ seq_printf(m, "%s: 0x%x\n", regs[i].name, val);
+ }
+
+ return 0;
+}
+
+static int mhi_debugfs_device_wake_show(struct seq_file *m, void *d)
+{
+ struct mhi_controller *mhi_cntrl = m->private;
+ struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;
+
+ if (!mhi_is_active(mhi_cntrl)) {
+ seq_puts(m, "Device not ready\n");
+ return -ENODEV;
+ }
+
+ seq_printf(m,
+ "Wake count: %d\n%s\n", mhi_dev->dev_wake,
+ "Usage: echo get/put > device_wake to vote/unvote for M0");
+
+ return 0;
+}
+
+static ssize_t mhi_debugfs_device_wake_write(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *m = file->private_data;
+ struct mhi_controller *mhi_cntrl = m->private;
+ struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;
+ char buf[16];
+ int ret = -EINVAL;
+
+ if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+ return -EFAULT;
+
+ if (!strncmp(buf, "get", 3)) {
+ ret = mhi_device_get_sync(mhi_dev);
+ } else if (!strncmp(buf, "put", 3)) {
+ mhi_device_put(mhi_dev);
+ ret = 0;
+ }
+
+ return ret ? ret : count;
+}
+
+static int mhi_debugfs_timeout_ms_show(struct seq_file *m, void *d)
+{
+ struct mhi_controller *mhi_cntrl = m->private;
+
+ seq_printf(m, "%u ms\n", mhi_cntrl->timeout_ms);
+
+ return 0;
+}
+
+static ssize_t mhi_debugfs_timeout_ms_write(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *m = file->private_data;
+ struct mhi_controller *mhi_cntrl = m->private;
+ u32 timeout_ms;
+
+ if (kstrtou32_from_user(ubuf, count, 0, &timeout_ms))
+ return -EINVAL;
+
+ mhi_cntrl->timeout_ms = timeout_ms;
+
+ return count;
+}
+
+static int mhi_debugfs_states_open(struct inode *inode, struct file *fp)
+{
+ return single_open(fp, mhi_debugfs_states_show, inode->i_private);
+}
+
+static int mhi_debugfs_events_open(struct inode *inode, struct file *fp)
+{
+ return single_open(fp, mhi_debugfs_events_show, inode->i_private);
+}
+
+static int mhi_debugfs_channels_open(struct inode *inode, struct file *fp)
+{
+ return single_open(fp, mhi_debugfs_channels_show, inode->i_private);
+}
+
+static int mhi_debugfs_devices_open(struct inode *inode, struct file *fp)
+{
+ return single_open(fp, mhi_debugfs_devices_show, inode->i_private);
+}
+
+static int mhi_debugfs_regdump_open(struct inode *inode, struct file *fp)
+{
+ return single_open(fp, mhi_debugfs_regdump_show, inode->i_private);
+}
+
+static int mhi_debugfs_device_wake_open(struct inode *inode, struct file *fp)
+{
+ return single_open(fp, mhi_debugfs_device_wake_show, inode->i_private);
+}
+
+static int mhi_debugfs_timeout_ms_open(struct inode *inode, struct file *fp)
+{
+ return single_open(fp, mhi_debugfs_timeout_ms_show, inode->i_private);
+}
+
+static const struct file_operations debugfs_states_fops = {
+ .open = mhi_debugfs_states_open,
+ .release = single_release,
+ .read = seq_read,
+};
+
+static const struct file_operations debugfs_events_fops = {
+ .open = mhi_debugfs_events_open,
+ .release = single_release,
+ .read = seq_read,
+};
+
+static const struct file_operations debugfs_channels_fops = {
+ .open = mhi_debugfs_channels_open,
+ .release = single_release,
+ .read = seq_read,
+};
+
+static const struct file_operations debugfs_devices_fops = {
+ .open = mhi_debugfs_devices_open,
+ .release = single_release,
+ .read = seq_read,
+};
+
+static const struct file_operations debugfs_regdump_fops = {
+ .open = mhi_debugfs_regdump_open,
+ .release = single_release,
+ .read = seq_read,
+};
+
+static const struct file_operations debugfs_device_wake_fops = {
+ .open = mhi_debugfs_device_wake_open,
+ .write = mhi_debugfs_device_wake_write,
+ .release = single_release,
+ .read = seq_read,
+};
+
+static const struct file_operations debugfs_timeout_ms_fops = {
+ .open = mhi_debugfs_timeout_ms_open,
+ .write = mhi_debugfs_timeout_ms_write,
+ .release = single_release,
+ .read = seq_read,
+};
+
+static struct dentry *mhi_debugfs_root;
+
+void mhi_create_debugfs(struct mhi_controller *mhi_cntrl)
+{
+ mhi_cntrl->debugfs_dentry =
+ debugfs_create_dir(dev_name(&mhi_cntrl->mhi_dev->dev),
+ mhi_debugfs_root);
+
+ debugfs_create_file("states", 0444, mhi_cntrl->debugfs_dentry,
+ mhi_cntrl, &debugfs_states_fops);
+ debugfs_create_file("events", 0444, mhi_cntrl->debugfs_dentry,
+ mhi_cntrl, &debugfs_events_fops);
+ debugfs_create_file("channels", 0444, mhi_cntrl->debugfs_dentry,
+ mhi_cntrl, &debugfs_channels_fops);
+ debugfs_create_file("devices", 0444, mhi_cntrl->debugfs_dentry,
+ mhi_cntrl, &debugfs_devices_fops);
+ debugfs_create_file("regdump", 0444, mhi_cntrl->debugfs_dentry,
+ mhi_cntrl, &debugfs_regdump_fops);
+ debugfs_create_file("device_wake", 0644, mhi_cntrl->debugfs_dentry,
+ mhi_cntrl, &debugfs_device_wake_fops);
+ debugfs_create_file("timeout_ms", 0644, mhi_cntrl->debugfs_dentry,
+ mhi_cntrl, &debugfs_timeout_ms_fops);
+}
+
+void mhi_destroy_debugfs(struct mhi_controller *mhi_cntrl)
+{
+ debugfs_remove_recursive(mhi_cntrl->debugfs_dentry);
+ mhi_cntrl->debugfs_dentry = NULL;
+}
+
+void mhi_debugfs_init(void)
+{
+ mhi_debugfs_root = debugfs_create_dir(mhi_bus_type.name, NULL);
+}
+
+void mhi_debugfs_exit(void)
+{
+ debugfs_remove_recursive(mhi_debugfs_root);
+}
diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c
new file mode 100644
index 000000000000..bf672de35131
--- /dev/null
+++ b/drivers/bus/mhi/host/init.c
@@ -0,0 +1,1452 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#include <linux/bitfield.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/mhi.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/wait.h>
+#include "internal.h"
+
+static DEFINE_IDA(mhi_controller_ida);
+
+const char * const mhi_ee_str[MHI_EE_MAX] = {
+ [MHI_EE_PBL] = "PRIMARY BOOTLOADER",
+ [MHI_EE_SBL] = "SECONDARY BOOTLOADER",
+ [MHI_EE_AMSS] = "MISSION MODE",
+ [MHI_EE_RDDM] = "RAMDUMP DOWNLOAD MODE",
+ [MHI_EE_WFW] = "WLAN FIRMWARE",
+ [MHI_EE_PTHRU] = "PASS THROUGH",
+ [MHI_EE_EDL] = "EMERGENCY DOWNLOAD",
+ [MHI_EE_FP] = "FLASH PROGRAMMER",
+ [MHI_EE_DISABLE_TRANSITION] = "DISABLE",
+ [MHI_EE_NOT_SUPPORTED] = "NOT SUPPORTED",
+};
+
+const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX] = {
+ [DEV_ST_TRANSITION_PBL] = "PBL",
+ [DEV_ST_TRANSITION_READY] = "READY",
+ [DEV_ST_TRANSITION_SBL] = "SBL",
+ [DEV_ST_TRANSITION_MISSION_MODE] = "MISSION MODE",
+ [DEV_ST_TRANSITION_FP] = "FLASH PROGRAMMER",
+ [DEV_ST_TRANSITION_SYS_ERR] = "SYS ERROR",
+ [DEV_ST_TRANSITION_DISABLE] = "DISABLE",
+};
+
+const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX] = {
+ [MHI_CH_STATE_TYPE_RESET] = "RESET",
+ [MHI_CH_STATE_TYPE_STOP] = "STOP",
+ [MHI_CH_STATE_TYPE_START] = "START",
+};
+
+static const char * const mhi_pm_state_str[] = {
+ [MHI_PM_STATE_DISABLE] = "DISABLE",
+ [MHI_PM_STATE_POR] = "POWER ON RESET",
+ [MHI_PM_STATE_M0] = "M0",
+ [MHI_PM_STATE_M2] = "M2",
+ [MHI_PM_STATE_M3_ENTER] = "M?->M3",
+ [MHI_PM_STATE_M3] = "M3",
+ [MHI_PM_STATE_M3_EXIT] = "M3->M0",
+ [MHI_PM_STATE_FW_DL_ERR] = "Firmware Download Error",
+ [MHI_PM_STATE_SYS_ERR_DETECT] = "SYS ERROR Detect",
+ [MHI_PM_STATE_SYS_ERR_PROCESS] = "SYS ERROR Process",
+ [MHI_PM_STATE_SHUTDOWN_PROCESS] = "SHUTDOWN Process",
+ [MHI_PM_STATE_LD_ERR_FATAL_DETECT] = "Linkdown or Error Fatal Detect",
+};
+
+const char *to_mhi_pm_state_str(u32 state)
+{
+ int index;
+
+ if (state)
+ index = __fls(state);
+
+ if (!state || index >= ARRAY_SIZE(mhi_pm_state_str))
+ return "Invalid State";
+
+ return mhi_pm_state_str[index];
+}
+
+static ssize_t serial_number_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+
+ return sysfs_emit(buf, "Serial Number: %u\n",
+ mhi_cntrl->serial_number);
+}
+static DEVICE_ATTR_RO(serial_number);
+
+static ssize_t oem_pk_hash_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ int i, cnt = 0;
+
+ for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++)
+ cnt += sysfs_emit_at(buf, cnt, "OEMPKHASH[%d]: 0x%x\n",
+ i, mhi_cntrl->oem_pk_hash[i]);
+
+ return cnt;
+}
+static DEVICE_ATTR_RO(oem_pk_hash);
+
+static ssize_t soc_reset_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+
+ mhi_soc_reset(mhi_cntrl);
+ return count;
+}
+static DEVICE_ATTR_WO(soc_reset);
+
+static struct attribute *mhi_dev_attrs[] = {
+ &dev_attr_serial_number.attr,
+ &dev_attr_oem_pk_hash.attr,
+ &dev_attr_soc_reset.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(mhi_dev);
+
+/* MHI protocol requires the transfer ring to be aligned with ring length */
+static int mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl,
+ struct mhi_ring *ring,
+ u64 len)
+{
+ ring->alloc_size = len + (len - 1);
+ ring->pre_aligned = dma_alloc_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
+ &ring->dma_handle, GFP_KERNEL);
+ if (!ring->pre_aligned)
+ return -ENOMEM;
+
+ ring->iommu_base = (ring->dma_handle + (len - 1)) & ~(len - 1);
+ ring->base = ring->pre_aligned + (ring->iommu_base - ring->dma_handle);
+
+ return 0;
+}
+
+void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl)
+{
+ int i;
+ struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
+
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ if (mhi_event->offload_ev)
+ continue;
+
+ free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
+ }
+
+ free_irq(mhi_cntrl->irq[0], mhi_cntrl);
+}
+
+int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ unsigned long irq_flags = IRQF_SHARED | IRQF_NO_SUSPEND;
+ int i, ret;
+
+ /* if controller driver has set irq_flags, use it */
+ if (mhi_cntrl->irq_flags)
+ irq_flags = mhi_cntrl->irq_flags;
+
+ /* Setup BHI_INTVEC IRQ */
+ ret = request_threaded_irq(mhi_cntrl->irq[0], mhi_intvec_handler,
+ mhi_intvec_threaded_handler,
+ irq_flags,
+ "bhi", mhi_cntrl);
+ if (ret)
+ return ret;
+ /*
+ * IRQs should be enabled during mhi_async_power_up(), so disable them explicitly here.
+ * Due to the use of IRQF_SHARED flag as default while requesting IRQs, we assume that
+ * IRQ_NOAUTOEN is not applicable.
+ */
+ disable_irq(mhi_cntrl->irq[0]);
+
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ if (mhi_event->offload_ev)
+ continue;
+
+ if (mhi_event->irq >= mhi_cntrl->nr_irqs) {
+ dev_err(dev, "irq %d not available for event ring\n",
+ mhi_event->irq);
+ ret = -EINVAL;
+ goto error_request;
+ }
+
+ ret = request_irq(mhi_cntrl->irq[mhi_event->irq],
+ mhi_irq_handler,
+ irq_flags,
+ "mhi", mhi_event);
+ if (ret) {
+ dev_err(dev, "Error requesting irq:%d for ev:%d\n",
+ mhi_cntrl->irq[mhi_event->irq], i);
+ goto error_request;
+ }
+
+ disable_irq(mhi_cntrl->irq[mhi_event->irq]);
+ }
+
+ return 0;
+
+error_request:
+ for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
+ if (mhi_event->offload_ev)
+ continue;
+
+ free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
+ }
+ free_irq(mhi_cntrl->irq[0], mhi_cntrl);
+
+ return ret;
+}
+
+void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl)
+{
+ int i;
+ struct mhi_ctxt *mhi_ctxt = mhi_cntrl->mhi_ctxt;
+ struct mhi_cmd *mhi_cmd;
+ struct mhi_event *mhi_event;
+ struct mhi_ring *ring;
+
+ mhi_cmd = mhi_cntrl->mhi_cmd;
+ for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) {
+ ring = &mhi_cmd->ring;
+ dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
+ ring->pre_aligned, ring->dma_handle);
+ ring->base = NULL;
+ ring->iommu_base = 0;
+ }
+
+ dma_free_coherent(mhi_cntrl->cntrl_dev,
+ sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
+ mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
+
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ if (mhi_event->offload_ev)
+ continue;
+
+ ring = &mhi_event->ring;
+ dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
+ ring->pre_aligned, ring->dma_handle);
+ ring->base = NULL;
+ ring->iommu_base = 0;
+ }
+
+ dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->er_ctxt) *
+ mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
+ mhi_ctxt->er_ctxt_addr);
+
+ dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->chan_ctxt) *
+ mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
+ mhi_ctxt->chan_ctxt_addr);
+
+ kfree(mhi_ctxt);
+ mhi_cntrl->mhi_ctxt = NULL;
+}
+
+int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_ctxt *mhi_ctxt;
+ struct mhi_chan_ctxt *chan_ctxt;
+ struct mhi_event_ctxt *er_ctxt;
+ struct mhi_cmd_ctxt *cmd_ctxt;
+ struct mhi_chan *mhi_chan;
+ struct mhi_event *mhi_event;
+ struct mhi_cmd *mhi_cmd;
+ u32 tmp;
+ int ret = -ENOMEM, i;
+
+ atomic_set(&mhi_cntrl->dev_wake, 0);
+ atomic_set(&mhi_cntrl->pending_pkts, 0);
+
+ mhi_ctxt = kzalloc(sizeof(*mhi_ctxt), GFP_KERNEL);
+ if (!mhi_ctxt)
+ return -ENOMEM;
+
+ /* Setup channel ctxt */
+ mhi_ctxt->chan_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev,
+ sizeof(*mhi_ctxt->chan_ctxt) *
+ mhi_cntrl->max_chan,
+ &mhi_ctxt->chan_ctxt_addr,
+ GFP_KERNEL);
+ if (!mhi_ctxt->chan_ctxt)
+ goto error_alloc_chan_ctxt;
+
+ mhi_chan = mhi_cntrl->mhi_chan;
+ chan_ctxt = mhi_ctxt->chan_ctxt;
+ for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) {
+ /* Skip if it is an offload channel */
+ if (mhi_chan->offload_ch)
+ continue;
+
+ tmp = le32_to_cpu(chan_ctxt->chcfg);
+ tmp &= ~CHAN_CTX_CHSTATE_MASK;
+ tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED);
+ tmp &= ~CHAN_CTX_BRSTMODE_MASK;
+ tmp |= FIELD_PREP(CHAN_CTX_BRSTMODE_MASK, mhi_chan->db_cfg.brstmode);
+ tmp &= ~CHAN_CTX_POLLCFG_MASK;
+ tmp |= FIELD_PREP(CHAN_CTX_POLLCFG_MASK, mhi_chan->db_cfg.pollcfg);
+ chan_ctxt->chcfg = cpu_to_le32(tmp);
+
+ chan_ctxt->chtype = cpu_to_le32(mhi_chan->type);
+ chan_ctxt->erindex = cpu_to_le32(mhi_chan->er_index);
+
+ mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
+ mhi_chan->tre_ring.db_addr = (void __iomem *)&chan_ctxt->wp;
+ }
+
+ /* Setup event context */
+ mhi_ctxt->er_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev,
+ sizeof(*mhi_ctxt->er_ctxt) *
+ mhi_cntrl->total_ev_rings,
+ &mhi_ctxt->er_ctxt_addr,
+ GFP_KERNEL);
+ if (!mhi_ctxt->er_ctxt)
+ goto error_alloc_er_ctxt;
+
+ er_ctxt = mhi_ctxt->er_ctxt;
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
+ mhi_event++) {
+ struct mhi_ring *ring = &mhi_event->ring;
+
+ /* Skip if it is an offload event */
+ if (mhi_event->offload_ev)
+ continue;
+
+ tmp = le32_to_cpu(er_ctxt->intmod);
+ tmp &= ~EV_CTX_INTMODC_MASK;
+ tmp &= ~EV_CTX_INTMODT_MASK;
+ tmp |= FIELD_PREP(EV_CTX_INTMODT_MASK, mhi_event->intmod);
+ er_ctxt->intmod = cpu_to_le32(tmp);
+
+ er_ctxt->ertype = cpu_to_le32(MHI_ER_TYPE_VALID);
+ er_ctxt->msivec = cpu_to_le32(mhi_event->irq);
+ mhi_event->db_cfg.db_mode = true;
+
+ ring->el_size = sizeof(struct mhi_ring_element);
+ ring->len = ring->el_size * ring->elements;
+ ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
+ if (ret)
+ goto error_alloc_er;
+
+ /*
+ * If the read pointer equals to the write pointer, then the
+ * ring is empty
+ */
+ ring->rp = ring->wp = ring->base;
+ er_ctxt->rbase = cpu_to_le64(ring->iommu_base);
+ er_ctxt->rp = er_ctxt->wp = er_ctxt->rbase;
+ er_ctxt->rlen = cpu_to_le64(ring->len);
+ ring->ctxt_wp = &er_ctxt->wp;
+ }
+
+ /* Setup cmd context */
+ ret = -ENOMEM;
+ mhi_ctxt->cmd_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev,
+ sizeof(*mhi_ctxt->cmd_ctxt) *
+ NR_OF_CMD_RINGS,
+ &mhi_ctxt->cmd_ctxt_addr,
+ GFP_KERNEL);
+ if (!mhi_ctxt->cmd_ctxt)
+ goto error_alloc_er;
+
+ mhi_cmd = mhi_cntrl->mhi_cmd;
+ cmd_ctxt = mhi_ctxt->cmd_ctxt;
+ for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
+ struct mhi_ring *ring = &mhi_cmd->ring;
+
+ ring->el_size = sizeof(struct mhi_ring_element);
+ ring->elements = CMD_EL_PER_RING;
+ ring->len = ring->el_size * ring->elements;
+ ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
+ if (ret)
+ goto error_alloc_cmd;
+
+ ring->rp = ring->wp = ring->base;
+ cmd_ctxt->rbase = cpu_to_le64(ring->iommu_base);
+ cmd_ctxt->rp = cmd_ctxt->wp = cmd_ctxt->rbase;
+ cmd_ctxt->rlen = cpu_to_le64(ring->len);
+ ring->ctxt_wp = &cmd_ctxt->wp;
+ }
+
+ mhi_cntrl->mhi_ctxt = mhi_ctxt;
+
+ return 0;
+
+error_alloc_cmd:
+ for (--i, --mhi_cmd; i >= 0; i--, mhi_cmd--) {
+ struct mhi_ring *ring = &mhi_cmd->ring;
+
+ dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
+ ring->pre_aligned, ring->dma_handle);
+ }
+ dma_free_coherent(mhi_cntrl->cntrl_dev,
+ sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
+ mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
+ i = mhi_cntrl->total_ev_rings;
+ mhi_event = mhi_cntrl->mhi_event + i;
+
+error_alloc_er:
+ for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
+ struct mhi_ring *ring = &mhi_event->ring;
+
+ if (mhi_event->offload_ev)
+ continue;
+
+ dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
+ ring->pre_aligned, ring->dma_handle);
+ }
+ dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->er_ctxt) *
+ mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
+ mhi_ctxt->er_ctxt_addr);
+
+error_alloc_er_ctxt:
+ dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->chan_ctxt) *
+ mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
+ mhi_ctxt->chan_ctxt_addr);
+
+error_alloc_chan_ctxt:
+ kfree(mhi_ctxt);
+
+ return ret;
+}
+
+int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
+{
+ u32 val;
+ int i, ret;
+ struct mhi_chan *mhi_chan;
+ struct mhi_event *mhi_event;
+ void __iomem *base = mhi_cntrl->regs;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct {
+ u32 offset;
+ u32 val;
+ } reg_info[] = {
+ {
+ CCABAP_HIGHER,
+ upper_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
+ },
+ {
+ CCABAP_LOWER,
+ lower_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
+ },
+ {
+ ECABAP_HIGHER,
+ upper_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
+ },
+ {
+ ECABAP_LOWER,
+ lower_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
+ },
+ {
+ CRCBAP_HIGHER,
+ upper_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
+ },
+ {
+ CRCBAP_LOWER,
+ lower_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
+ },
+ {
+ MHICTRLBASE_HIGHER,
+ upper_32_bits(mhi_cntrl->iova_start),
+ },
+ {
+ MHICTRLBASE_LOWER,
+ lower_32_bits(mhi_cntrl->iova_start),
+ },
+ {
+ MHIDATABASE_HIGHER,
+ upper_32_bits(mhi_cntrl->iova_start),
+ },
+ {
+ MHIDATABASE_LOWER,
+ lower_32_bits(mhi_cntrl->iova_start),
+ },
+ {
+ MHICTRLLIMIT_HIGHER,
+ upper_32_bits(mhi_cntrl->iova_stop),
+ },
+ {
+ MHICTRLLIMIT_LOWER,
+ lower_32_bits(mhi_cntrl->iova_stop),
+ },
+ {
+ MHIDATALIMIT_HIGHER,
+ upper_32_bits(mhi_cntrl->iova_stop),
+ },
+ {
+ MHIDATALIMIT_LOWER,
+ lower_32_bits(mhi_cntrl->iova_stop),
+ },
+ {0, 0}
+ };
+
+ dev_dbg(dev, "Initializing MHI registers\n");
+
+ /* Read channel db offset */
+ ret = mhi_read_reg(mhi_cntrl, base, CHDBOFF, &val);
+ if (ret) {
+ dev_err(dev, "Unable to read CHDBOFF register\n");
+ return -EIO;
+ }
+
+ /* Setup wake db */
+ mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB);
+ mhi_cntrl->wake_set = false;
+
+ /* Setup channel db address for each channel in tre_ring */
+ mhi_chan = mhi_cntrl->mhi_chan;
+ for (i = 0; i < mhi_cntrl->max_chan; i++, val += 8, mhi_chan++)
+ mhi_chan->tre_ring.db_addr = base + val;
+
+ /* Read event ring db offset */
+ ret = mhi_read_reg(mhi_cntrl, base, ERDBOFF, &val);
+ if (ret) {
+ dev_err(dev, "Unable to read ERDBOFF register\n");
+ return -EIO;
+ }
+
+ /* Setup event db address for each ev_ring */
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, val += 8, mhi_event++) {
+ if (mhi_event->offload_ev)
+ continue;
+
+ mhi_event->ring.db_addr = base + val;
+ }
+
+ /* Setup DB register for primary CMD rings */
+ mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING].ring.db_addr = base + CRDB_LOWER;
+
+ /* Write to MMIO registers */
+ for (i = 0; reg_info[i].offset; i++)
+ mhi_write_reg(mhi_cntrl, base, reg_info[i].offset,
+ reg_info[i].val);
+
+ ret = mhi_write_reg_field(mhi_cntrl, base, MHICFG, MHICFG_NER_MASK,
+ mhi_cntrl->total_ev_rings);
+ if (ret) {
+ dev_err(dev, "Unable to write MHICFG register\n");
+ return ret;
+ }
+
+ ret = mhi_write_reg_field(mhi_cntrl, base, MHICFG, MHICFG_NHWER_MASK,
+ mhi_cntrl->hw_ev_rings);
+ if (ret) {
+ dev_err(dev, "Unable to write MHICFG register\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan)
+{
+ struct mhi_ring *buf_ring;
+ struct mhi_ring *tre_ring;
+ struct mhi_chan_ctxt *chan_ctxt;
+ u32 tmp;
+
+ buf_ring = &mhi_chan->buf_ring;
+ tre_ring = &mhi_chan->tre_ring;
+ chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
+
+ if (!chan_ctxt->rbase) /* Already uninitialized */
+ return;
+
+ dma_free_coherent(mhi_cntrl->cntrl_dev, tre_ring->alloc_size,
+ tre_ring->pre_aligned, tre_ring->dma_handle);
+ vfree(buf_ring->base);
+
+ buf_ring->base = tre_ring->base = NULL;
+ tre_ring->ctxt_wp = NULL;
+ chan_ctxt->rbase = 0;
+ chan_ctxt->rlen = 0;
+ chan_ctxt->rp = 0;
+ chan_ctxt->wp = 0;
+
+ tmp = le32_to_cpu(chan_ctxt->chcfg);
+ tmp &= ~CHAN_CTX_CHSTATE_MASK;
+ tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED);
+ chan_ctxt->chcfg = cpu_to_le32(tmp);
+
+ /* Update to all cores */
+ smp_wmb();
+}
+
+int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan)
+{
+ struct mhi_ring *buf_ring;
+ struct mhi_ring *tre_ring;
+ struct mhi_chan_ctxt *chan_ctxt;
+ u32 tmp;
+ int ret;
+
+ buf_ring = &mhi_chan->buf_ring;
+ tre_ring = &mhi_chan->tre_ring;
+ tre_ring->el_size = sizeof(struct mhi_ring_element);
+ tre_ring->len = tre_ring->el_size * tre_ring->elements;
+ chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
+ ret = mhi_alloc_aligned_ring(mhi_cntrl, tre_ring, tre_ring->len);
+ if (ret)
+ return -ENOMEM;
+
+ buf_ring->el_size = sizeof(struct mhi_buf_info);
+ buf_ring->len = buf_ring->el_size * buf_ring->elements;
+ buf_ring->base = vzalloc(buf_ring->len);
+
+ if (!buf_ring->base) {
+ dma_free_coherent(mhi_cntrl->cntrl_dev, tre_ring->alloc_size,
+ tre_ring->pre_aligned, tre_ring->dma_handle);
+ return -ENOMEM;
+ }
+
+ tmp = le32_to_cpu(chan_ctxt->chcfg);
+ tmp &= ~CHAN_CTX_CHSTATE_MASK;
+ tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_ENABLED);
+ chan_ctxt->chcfg = cpu_to_le32(tmp);
+
+ chan_ctxt->rbase = cpu_to_le64(tre_ring->iommu_base);
+ chan_ctxt->rp = chan_ctxt->wp = chan_ctxt->rbase;
+ chan_ctxt->rlen = cpu_to_le64(tre_ring->len);
+ tre_ring->ctxt_wp = &chan_ctxt->wp;
+
+ tre_ring->rp = tre_ring->wp = tre_ring->base;
+ buf_ring->rp = buf_ring->wp = buf_ring->base;
+ mhi_chan->db_cfg.db_mode = 1;
+
+ /* Update to all cores */
+ smp_wmb();
+
+ return 0;
+}
+
+static int parse_ev_cfg(struct mhi_controller *mhi_cntrl,
+ const struct mhi_controller_config *config)
+{
+ struct mhi_event *mhi_event;
+ const struct mhi_event_config *event_cfg;
+ struct device *dev = mhi_cntrl->cntrl_dev;
+ int i, num;
+
+ num = config->num_events;
+ mhi_cntrl->total_ev_rings = num;
+ mhi_cntrl->mhi_event = kcalloc(num, sizeof(*mhi_cntrl->mhi_event),
+ GFP_KERNEL);
+ if (!mhi_cntrl->mhi_event)
+ return -ENOMEM;
+
+ /* Populate event ring */
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < num; i++) {
+ event_cfg = &config->event_cfg[i];
+
+ mhi_event->er_index = i;
+ mhi_event->ring.elements = event_cfg->num_elements;
+ mhi_event->intmod = event_cfg->irq_moderation_ms;
+ mhi_event->irq = event_cfg->irq;
+
+ if (event_cfg->channel != U32_MAX) {
+ /* This event ring has a dedicated channel */
+ mhi_event->chan = event_cfg->channel;
+ if (mhi_event->chan >= mhi_cntrl->max_chan) {
+ dev_err(dev,
+ "Event Ring channel not available\n");
+ goto error_ev_cfg;
+ }
+
+ mhi_event->mhi_chan =
+ &mhi_cntrl->mhi_chan[mhi_event->chan];
+ }
+
+ /* Priority is fixed to 1 for now */
+ mhi_event->priority = 1;
+
+ mhi_event->db_cfg.brstmode = event_cfg->mode;
+ if (MHI_INVALID_BRSTMODE(mhi_event->db_cfg.brstmode))
+ goto error_ev_cfg;
+
+ if (mhi_event->db_cfg.brstmode == MHI_DB_BRST_ENABLE)
+ mhi_event->db_cfg.process_db = mhi_db_brstmode;
+ else
+ mhi_event->db_cfg.process_db = mhi_db_brstmode_disable;
+
+ mhi_event->data_type = event_cfg->data_type;
+
+ switch (mhi_event->data_type) {
+ case MHI_ER_DATA:
+ mhi_event->process_event = mhi_process_data_event_ring;
+ break;
+ case MHI_ER_CTRL:
+ mhi_event->process_event = mhi_process_ctrl_ev_ring;
+ break;
+ default:
+ dev_err(dev, "Event Ring type not supported\n");
+ goto error_ev_cfg;
+ }
+
+ mhi_event->hw_ring = event_cfg->hardware_event;
+ if (mhi_event->hw_ring)
+ mhi_cntrl->hw_ev_rings++;
+ else
+ mhi_cntrl->sw_ev_rings++;
+
+ mhi_event->cl_manage = event_cfg->client_managed;
+ mhi_event->offload_ev = event_cfg->offload_channel;
+ mhi_event++;
+ }
+
+ return 0;
+
+error_ev_cfg:
+
+ kfree(mhi_cntrl->mhi_event);
+ return -EINVAL;
+}
+
+static int parse_ch_cfg(struct mhi_controller *mhi_cntrl,
+ const struct mhi_controller_config *config)
+{
+ const struct mhi_channel_config *ch_cfg;
+ struct device *dev = mhi_cntrl->cntrl_dev;
+ int i;
+ u32 chan;
+
+ mhi_cntrl->max_chan = config->max_channels;
+
+ /*
+ * The allocation of MHI channels can exceed 32KB in some scenarios,
+ * so to avoid any memory possible allocation failures, vzalloc is
+ * used here
+ */
+ mhi_cntrl->mhi_chan = vzalloc(mhi_cntrl->max_chan *
+ sizeof(*mhi_cntrl->mhi_chan));
+ if (!mhi_cntrl->mhi_chan)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&mhi_cntrl->lpm_chans);
+
+ /* Populate channel configurations */
+ for (i = 0; i < config->num_channels; i++) {
+ struct mhi_chan *mhi_chan;
+
+ ch_cfg = &config->ch_cfg[i];
+
+ chan = ch_cfg->num;
+ if (chan >= mhi_cntrl->max_chan) {
+ dev_err(dev, "Channel %d not available\n", chan);
+ goto error_chan_cfg;
+ }
+
+ mhi_chan = &mhi_cntrl->mhi_chan[chan];
+ mhi_chan->name = ch_cfg->name;
+ mhi_chan->chan = chan;
+
+ mhi_chan->tre_ring.elements = ch_cfg->num_elements;
+ if (!mhi_chan->tre_ring.elements)
+ goto error_chan_cfg;
+
+ /*
+ * For some channels, local ring length should be bigger than
+ * the transfer ring length due to internal logical channels
+ * in device. So host can queue much more buffers than transfer
+ * ring length. Example, RSC channels should have a larger local
+ * channel length than transfer ring length.
+ */
+ mhi_chan->buf_ring.elements = ch_cfg->local_elements;
+ if (!mhi_chan->buf_ring.elements)
+ mhi_chan->buf_ring.elements = mhi_chan->tre_ring.elements;
+ mhi_chan->er_index = ch_cfg->event_ring;
+ mhi_chan->dir = ch_cfg->dir;
+
+ /*
+ * For most channels, chtype is identical to channel directions.
+ * So, if it is not defined then assign channel direction to
+ * chtype
+ */
+ mhi_chan->type = ch_cfg->type;
+ if (!mhi_chan->type)
+ mhi_chan->type = (enum mhi_ch_type)mhi_chan->dir;
+
+ mhi_chan->ee_mask = ch_cfg->ee_mask;
+ mhi_chan->db_cfg.pollcfg = ch_cfg->pollcfg;
+ mhi_chan->lpm_notify = ch_cfg->lpm_notify;
+ mhi_chan->offload_ch = ch_cfg->offload_channel;
+ mhi_chan->db_cfg.reset_req = ch_cfg->doorbell_mode_switch;
+ mhi_chan->pre_alloc = ch_cfg->auto_queue;
+ mhi_chan->wake_capable = ch_cfg->wake_capable;
+
+ /*
+ * If MHI host allocates buffers, then the channel direction
+ * should be DMA_FROM_DEVICE
+ */
+ if (mhi_chan->pre_alloc && mhi_chan->dir != DMA_FROM_DEVICE) {
+ dev_err(dev, "Invalid channel configuration\n");
+ goto error_chan_cfg;
+ }
+
+ /*
+ * Bi-directional and direction less channel must be an
+ * offload channel
+ */
+ if ((mhi_chan->dir == DMA_BIDIRECTIONAL ||
+ mhi_chan->dir == DMA_NONE) && !mhi_chan->offload_ch) {
+ dev_err(dev, "Invalid channel configuration\n");
+ goto error_chan_cfg;
+ }
+
+ if (!mhi_chan->offload_ch) {
+ mhi_chan->db_cfg.brstmode = ch_cfg->doorbell;
+ if (MHI_INVALID_BRSTMODE(mhi_chan->db_cfg.brstmode)) {
+ dev_err(dev, "Invalid Door bell mode\n");
+ goto error_chan_cfg;
+ }
+ }
+
+ if (mhi_chan->db_cfg.brstmode == MHI_DB_BRST_ENABLE)
+ mhi_chan->db_cfg.process_db = mhi_db_brstmode;
+ else
+ mhi_chan->db_cfg.process_db = mhi_db_brstmode_disable;
+
+ mhi_chan->configured = true;
+
+ if (mhi_chan->lpm_notify)
+ list_add_tail(&mhi_chan->node, &mhi_cntrl->lpm_chans);
+ }
+
+ return 0;
+
+error_chan_cfg:
+ vfree(mhi_cntrl->mhi_chan);
+
+ return -EINVAL;
+}
+
+static int parse_config(struct mhi_controller *mhi_cntrl,
+ const struct mhi_controller_config *config)
+{
+ int ret;
+
+ /* Parse MHI channel configuration */
+ ret = parse_ch_cfg(mhi_cntrl, config);
+ if (ret)
+ return ret;
+
+ /* Parse MHI event configuration */
+ ret = parse_ev_cfg(mhi_cntrl, config);
+ if (ret)
+ goto error_ev_cfg;
+
+ mhi_cntrl->timeout_ms = config->timeout_ms;
+ if (!mhi_cntrl->timeout_ms)
+ mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS;
+
+ mhi_cntrl->bounce_buf = config->use_bounce_buf;
+ mhi_cntrl->buffer_len = config->buf_len;
+ if (!mhi_cntrl->buffer_len)
+ mhi_cntrl->buffer_len = MHI_MAX_MTU;
+
+ /* By default, host is allowed to ring DB in both M0 and M2 states */
+ mhi_cntrl->db_access = MHI_PM_M0 | MHI_PM_M2;
+ if (config->m2_no_db)
+ mhi_cntrl->db_access &= ~MHI_PM_M2;
+
+ return 0;
+
+error_ev_cfg:
+ vfree(mhi_cntrl->mhi_chan);
+
+ return ret;
+}
+
+int mhi_register_controller(struct mhi_controller *mhi_cntrl,
+ const struct mhi_controller_config *config)
+{
+ struct mhi_event *mhi_event;
+ struct mhi_chan *mhi_chan;
+ struct mhi_cmd *mhi_cmd;
+ struct mhi_device *mhi_dev;
+ u32 soc_info;
+ int ret, i;
+
+ if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->regs ||
+ !mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put ||
+ !mhi_cntrl->status_cb || !mhi_cntrl->read_reg ||
+ !mhi_cntrl->write_reg || !mhi_cntrl->nr_irqs ||
+ !mhi_cntrl->irq || !mhi_cntrl->reg_len)
+ return -EINVAL;
+
+ ret = parse_config(mhi_cntrl, config);
+ if (ret)
+ return -EINVAL;
+
+ mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS,
+ sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL);
+ if (!mhi_cntrl->mhi_cmd) {
+ ret = -ENOMEM;
+ goto err_free_event;
+ }
+
+ INIT_LIST_HEAD(&mhi_cntrl->transition_list);
+ mutex_init(&mhi_cntrl->pm_mutex);
+ rwlock_init(&mhi_cntrl->pm_lock);
+ spin_lock_init(&mhi_cntrl->transition_lock);
+ spin_lock_init(&mhi_cntrl->wlock);
+ INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker);
+ init_waitqueue_head(&mhi_cntrl->state_event);
+
+ mhi_cntrl->hiprio_wq = alloc_ordered_workqueue("mhi_hiprio_wq", WQ_HIGHPRI);
+ if (!mhi_cntrl->hiprio_wq) {
+ dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate workqueue\n");
+ ret = -ENOMEM;
+ goto err_free_cmd;
+ }
+
+ mhi_cmd = mhi_cntrl->mhi_cmd;
+ for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++)
+ spin_lock_init(&mhi_cmd->lock);
+
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ /* Skip for offload events */
+ if (mhi_event->offload_ev)
+ continue;
+
+ mhi_event->mhi_cntrl = mhi_cntrl;
+ spin_lock_init(&mhi_event->lock);
+ if (mhi_event->data_type == MHI_ER_CTRL)
+ tasklet_init(&mhi_event->task, mhi_ctrl_ev_task,
+ (ulong)mhi_event);
+ else
+ tasklet_init(&mhi_event->task, mhi_ev_task,
+ (ulong)mhi_event);
+ }
+
+ mhi_chan = mhi_cntrl->mhi_chan;
+ for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
+ mutex_init(&mhi_chan->mutex);
+ init_completion(&mhi_chan->completion);
+ rwlock_init(&mhi_chan->lock);
+
+ /* used in setting bei field of TRE */
+ mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
+ mhi_chan->intmod = mhi_event->intmod;
+ }
+
+ if (mhi_cntrl->bounce_buf) {
+ mhi_cntrl->map_single = mhi_map_single_use_bb;
+ mhi_cntrl->unmap_single = mhi_unmap_single_use_bb;
+ } else {
+ mhi_cntrl->map_single = mhi_map_single_no_bb;
+ mhi_cntrl->unmap_single = mhi_unmap_single_no_bb;
+ }
+
+ /* Read the MHI device info */
+ ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs,
+ SOC_HW_VERSION_OFFS, &soc_info);
+ if (ret)
+ goto err_destroy_wq;
+
+ mhi_cntrl->family_number = FIELD_GET(SOC_HW_VERSION_FAM_NUM_BMSK, soc_info);
+ mhi_cntrl->device_number = FIELD_GET(SOC_HW_VERSION_DEV_NUM_BMSK, soc_info);
+ mhi_cntrl->major_version = FIELD_GET(SOC_HW_VERSION_MAJOR_VER_BMSK, soc_info);
+ mhi_cntrl->minor_version = FIELD_GET(SOC_HW_VERSION_MINOR_VER_BMSK, soc_info);
+
+ mhi_cntrl->index = ida_alloc(&mhi_controller_ida, GFP_KERNEL);
+ if (mhi_cntrl->index < 0) {
+ ret = mhi_cntrl->index;
+ goto err_destroy_wq;
+ }
+
+ ret = mhi_init_irq_setup(mhi_cntrl);
+ if (ret)
+ goto err_ida_free;
+
+ /* Register controller with MHI bus */
+ mhi_dev = mhi_alloc_device(mhi_cntrl);
+ if (IS_ERR(mhi_dev)) {
+ dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate MHI device\n");
+ ret = PTR_ERR(mhi_dev);
+ goto error_setup_irq;
+ }
+
+ mhi_dev->dev_type = MHI_DEVICE_CONTROLLER;
+ mhi_dev->mhi_cntrl = mhi_cntrl;
+ dev_set_name(&mhi_dev->dev, "mhi%d", mhi_cntrl->index);
+ mhi_dev->name = dev_name(&mhi_dev->dev);
+
+ /* Init wakeup source */
+ device_init_wakeup(&mhi_dev->dev, true);
+
+ ret = device_add(&mhi_dev->dev);
+ if (ret)
+ goto err_release_dev;
+
+ mhi_cntrl->mhi_dev = mhi_dev;
+
+ mhi_create_debugfs(mhi_cntrl);
+
+ return 0;
+
+err_release_dev:
+ put_device(&mhi_dev->dev);
+error_setup_irq:
+ mhi_deinit_free_irq(mhi_cntrl);
+err_ida_free:
+ ida_free(&mhi_controller_ida, mhi_cntrl->index);
+err_destroy_wq:
+ destroy_workqueue(mhi_cntrl->hiprio_wq);
+err_free_cmd:
+ kfree(mhi_cntrl->mhi_cmd);
+err_free_event:
+ kfree(mhi_cntrl->mhi_event);
+ vfree(mhi_cntrl->mhi_chan);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_register_controller);
+
+void mhi_unregister_controller(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;
+ struct mhi_chan *mhi_chan = mhi_cntrl->mhi_chan;
+ unsigned int i;
+
+ mhi_deinit_free_irq(mhi_cntrl);
+ mhi_destroy_debugfs(mhi_cntrl);
+
+ destroy_workqueue(mhi_cntrl->hiprio_wq);
+ kfree(mhi_cntrl->mhi_cmd);
+ kfree(mhi_cntrl->mhi_event);
+
+ /* Drop the references to MHI devices created for channels */
+ for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
+ if (!mhi_chan->mhi_dev)
+ continue;
+
+ put_device(&mhi_chan->mhi_dev->dev);
+ }
+ vfree(mhi_cntrl->mhi_chan);
+
+ device_del(&mhi_dev->dev);
+ put_device(&mhi_dev->dev);
+
+ ida_free(&mhi_controller_ida, mhi_cntrl->index);
+}
+EXPORT_SYMBOL_GPL(mhi_unregister_controller);
+
+struct mhi_controller *mhi_alloc_controller(void)
+{
+ struct mhi_controller *mhi_cntrl;
+
+ mhi_cntrl = kzalloc(sizeof(*mhi_cntrl), GFP_KERNEL);
+
+ return mhi_cntrl;
+}
+EXPORT_SYMBOL_GPL(mhi_alloc_controller);
+
+void mhi_free_controller(struct mhi_controller *mhi_cntrl)
+{
+ kfree(mhi_cntrl);
+}
+EXPORT_SYMBOL_GPL(mhi_free_controller);
+
+int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ u32 bhi_off, bhie_off;
+ int ret;
+
+ mutex_lock(&mhi_cntrl->pm_mutex);
+
+ ret = mhi_init_dev_ctxt(mhi_cntrl);
+ if (ret)
+ goto error_dev_ctxt;
+
+ ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIOFF, &bhi_off);
+ if (ret) {
+ dev_err(dev, "Error getting BHI offset\n");
+ goto error_reg_offset;
+ }
+
+ if (bhi_off >= mhi_cntrl->reg_len) {
+ dev_err(dev, "BHI offset: 0x%x is out of range: 0x%zx\n",
+ bhi_off, mhi_cntrl->reg_len);
+ ret = -EINVAL;
+ goto error_reg_offset;
+ }
+ mhi_cntrl->bhi = mhi_cntrl->regs + bhi_off;
+
+ if (mhi_cntrl->fbc_download || mhi_cntrl->rddm_size) {
+ ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF,
+ &bhie_off);
+ if (ret) {
+ dev_err(dev, "Error getting BHIE offset\n");
+ goto error_reg_offset;
+ }
+
+ if (bhie_off >= mhi_cntrl->reg_len) {
+ dev_err(dev,
+ "BHIe offset: 0x%x is out of range: 0x%zx\n",
+ bhie_off, mhi_cntrl->reg_len);
+ ret = -EINVAL;
+ goto error_reg_offset;
+ }
+ mhi_cntrl->bhie = mhi_cntrl->regs + bhie_off;
+ }
+
+ if (mhi_cntrl->rddm_size) {
+ /*
+ * This controller supports RDDM, so we need to manually clear
+ * BHIE RX registers since POR values are undefined.
+ */
+ memset_io(mhi_cntrl->bhie + BHIE_RXVECADDR_LOW_OFFS,
+ 0, BHIE_RXVECSTATUS_OFFS - BHIE_RXVECADDR_LOW_OFFS +
+ 4);
+ /*
+ * Allocate RDDM table for debugging purpose if specified
+ */
+ mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image,
+ mhi_cntrl->rddm_size);
+ if (mhi_cntrl->rddm_image) {
+ ret = mhi_rddm_prepare(mhi_cntrl,
+ mhi_cntrl->rddm_image);
+ if (ret) {
+ mhi_free_bhie_table(mhi_cntrl,
+ mhi_cntrl->rddm_image);
+ goto error_reg_offset;
+ }
+ }
+ }
+
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+
+ return 0;
+
+error_reg_offset:
+ mhi_deinit_dev_ctxt(mhi_cntrl);
+
+error_dev_ctxt:
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_prepare_for_power_up);
+
+void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl)
+{
+ if (mhi_cntrl->fbc_image) {
+ mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
+ mhi_cntrl->fbc_image = NULL;
+ }
+
+ if (mhi_cntrl->rddm_image) {
+ mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image);
+ mhi_cntrl->rddm_image = NULL;
+ }
+
+ mhi_cntrl->bhi = NULL;
+ mhi_cntrl->bhie = NULL;
+
+ mhi_deinit_dev_ctxt(mhi_cntrl);
+}
+EXPORT_SYMBOL_GPL(mhi_unprepare_after_power_down);
+
+static void mhi_release_device(struct device *dev)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+
+ /*
+ * We need to set the mhi_chan->mhi_dev to NULL here since the MHI
+ * devices for the channels will only get created if the mhi_dev
+ * associated with it is NULL. This scenario will happen during the
+ * controller suspend and resume.
+ */
+ if (mhi_dev->ul_chan)
+ mhi_dev->ul_chan->mhi_dev = NULL;
+
+ if (mhi_dev->dl_chan)
+ mhi_dev->dl_chan->mhi_dev = NULL;
+
+ kfree(mhi_dev);
+}
+
+struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_device *mhi_dev;
+ struct device *dev;
+
+ mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL);
+ if (!mhi_dev)
+ return ERR_PTR(-ENOMEM);
+
+ dev = &mhi_dev->dev;
+ device_initialize(dev);
+ dev->bus = &mhi_bus_type;
+ dev->release = mhi_release_device;
+
+ if (mhi_cntrl->mhi_dev) {
+ /* for MHI client devices, parent is the MHI controller device */
+ dev->parent = &mhi_cntrl->mhi_dev->dev;
+ } else {
+ /* for MHI controller device, parent is the bus device (e.g. pci device) */
+ dev->parent = mhi_cntrl->cntrl_dev;
+ }
+
+ mhi_dev->mhi_cntrl = mhi_cntrl;
+ mhi_dev->dev_wake = 0;
+
+ return mhi_dev;
+}
+
+static int mhi_driver_probe(struct device *dev)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct device_driver *drv = dev->driver;
+ struct mhi_driver *mhi_drv = to_mhi_driver(drv);
+ struct mhi_event *mhi_event;
+ struct mhi_chan *ul_chan = mhi_dev->ul_chan;
+ struct mhi_chan *dl_chan = mhi_dev->dl_chan;
+ int ret;
+
+ /* Bring device out of LPM */
+ ret = mhi_device_get_sync(mhi_dev);
+ if (ret)
+ return ret;
+
+ ret = -EINVAL;
+
+ if (ul_chan) {
+ /*
+ * If channel supports LPM notifications then status_cb should
+ * be provided
+ */
+ if (ul_chan->lpm_notify && !mhi_drv->status_cb)
+ goto exit_probe;
+
+ /* For non-offload channels then xfer_cb should be provided */
+ if (!ul_chan->offload_ch && !mhi_drv->ul_xfer_cb)
+ goto exit_probe;
+
+ ul_chan->xfer_cb = mhi_drv->ul_xfer_cb;
+ }
+
+ ret = -EINVAL;
+ if (dl_chan) {
+ /*
+ * If channel supports LPM notifications then status_cb should
+ * be provided
+ */
+ if (dl_chan->lpm_notify && !mhi_drv->status_cb)
+ goto exit_probe;
+
+ /* For non-offload channels then xfer_cb should be provided */
+ if (!dl_chan->offload_ch && !mhi_drv->dl_xfer_cb)
+ goto exit_probe;
+
+ mhi_event = &mhi_cntrl->mhi_event[dl_chan->er_index];
+
+ /*
+ * If the channel event ring is managed by client, then
+ * status_cb must be provided so that the framework can
+ * notify pending data
+ */
+ if (mhi_event->cl_manage && !mhi_drv->status_cb)
+ goto exit_probe;
+
+ dl_chan->xfer_cb = mhi_drv->dl_xfer_cb;
+ }
+
+ /* Call the user provided probe function */
+ ret = mhi_drv->probe(mhi_dev, mhi_dev->id);
+ if (ret)
+ goto exit_probe;
+
+ mhi_device_put(mhi_dev);
+
+ return ret;
+
+exit_probe:
+ mhi_unprepare_from_transfer(mhi_dev);
+
+ mhi_device_put(mhi_dev);
+
+ return ret;
+}
+
+static int mhi_driver_remove(struct device *dev)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+ struct mhi_driver *mhi_drv = to_mhi_driver(dev->driver);
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_chan *mhi_chan;
+ enum mhi_ch_state ch_state[] = {
+ MHI_CH_STATE_DISABLED,
+ MHI_CH_STATE_DISABLED
+ };
+ int dir;
+
+ /* Skip if it is a controller device */
+ if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
+ return 0;
+
+ /* Reset both channels */
+ for (dir = 0; dir < 2; dir++) {
+ mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
+
+ if (!mhi_chan)
+ continue;
+
+ /* Wake all threads waiting for completion */
+ write_lock_irq(&mhi_chan->lock);
+ mhi_chan->ccs = MHI_EV_CC_INVALID;
+ complete_all(&mhi_chan->completion);
+ write_unlock_irq(&mhi_chan->lock);
+
+ /* Set the channel state to disabled */
+ mutex_lock(&mhi_chan->mutex);
+ write_lock_irq(&mhi_chan->lock);
+ ch_state[dir] = mhi_chan->ch_state;
+ mhi_chan->ch_state = MHI_CH_STATE_SUSPENDED;
+ write_unlock_irq(&mhi_chan->lock);
+
+ /* Reset the non-offload channel */
+ if (!mhi_chan->offload_ch)
+ mhi_reset_chan(mhi_cntrl, mhi_chan);
+
+ mutex_unlock(&mhi_chan->mutex);
+ }
+
+ mhi_drv->remove(mhi_dev);
+
+ /* De-init channel if it was enabled */
+ for (dir = 0; dir < 2; dir++) {
+ mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
+
+ if (!mhi_chan)
+ continue;
+
+ mutex_lock(&mhi_chan->mutex);
+
+ if ((ch_state[dir] == MHI_CH_STATE_ENABLED ||
+ ch_state[dir] == MHI_CH_STATE_STOP) &&
+ !mhi_chan->offload_ch)
+ mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
+
+ mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
+
+ mutex_unlock(&mhi_chan->mutex);
+ }
+
+ while (mhi_dev->dev_wake)
+ mhi_device_put(mhi_dev);
+
+ return 0;
+}
+
+int __mhi_driver_register(struct mhi_driver *mhi_drv, struct module *owner)
+{
+ struct device_driver *driver = &mhi_drv->driver;
+
+ if (!mhi_drv->probe || !mhi_drv->remove)
+ return -EINVAL;
+
+ driver->bus = &mhi_bus_type;
+ driver->owner = owner;
+ driver->probe = mhi_driver_probe;
+ driver->remove = mhi_driver_remove;
+
+ return driver_register(driver);
+}
+EXPORT_SYMBOL_GPL(__mhi_driver_register);
+
+void mhi_driver_unregister(struct mhi_driver *mhi_drv)
+{
+ driver_unregister(&mhi_drv->driver);
+}
+EXPORT_SYMBOL_GPL(mhi_driver_unregister);
+
+static int mhi_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+
+ return add_uevent_var(env, "MODALIAS=" MHI_DEVICE_MODALIAS_FMT,
+ mhi_dev->name);
+}
+
+static int mhi_match(struct device *dev, struct device_driver *drv)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+ struct mhi_driver *mhi_drv = to_mhi_driver(drv);
+ const struct mhi_device_id *id;
+
+ /*
+ * If the device is a controller type then there is no client driver
+ * associated with it
+ */
+ if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
+ return 0;
+
+ for (id = mhi_drv->id_table; id->chan[0]; id++)
+ if (!strcmp(mhi_dev->name, id->chan)) {
+ mhi_dev->id = id;
+ return 1;
+ }
+
+ return 0;
+};
+
+struct bus_type mhi_bus_type = {
+ .name = "mhi",
+ .dev_name = "mhi",
+ .match = mhi_match,
+ .uevent = mhi_uevent,
+ .dev_groups = mhi_dev_groups,
+};
+
+static int __init mhi_init(void)
+{
+ mhi_debugfs_init();
+ return bus_register(&mhi_bus_type);
+}
+
+static void __exit mhi_exit(void)
+{
+ mhi_debugfs_exit();
+ bus_unregister(&mhi_bus_type);
+}
+
+postcore_initcall(mhi_init);
+module_exit(mhi_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MHI Host Interface");
diff --git a/drivers/bus/mhi/host/internal.h b/drivers/bus/mhi/host/internal.h
new file mode 100644
index 000000000000..01fd10a399b6
--- /dev/null
+++ b/drivers/bus/mhi/host/internal.h
@@ -0,0 +1,383 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#ifndef _MHI_INT_H
+#define _MHI_INT_H
+
+#include "../common.h"
+
+extern struct bus_type mhi_bus_type;
+
+/* Host request register */
+#define MHI_SOC_RESET_REQ_OFFSET 0xb0
+#define MHI_SOC_RESET_REQ BIT(0)
+
+#define SOC_HW_VERSION_OFFS 0x224
+#define SOC_HW_VERSION_FAM_NUM_BMSK GENMASK(31, 28)
+#define SOC_HW_VERSION_DEV_NUM_BMSK GENMASK(27, 16)
+#define SOC_HW_VERSION_MAJOR_VER_BMSK GENMASK(15, 8)
+#define SOC_HW_VERSION_MINOR_VER_BMSK GENMASK(7, 0)
+
+struct mhi_ctxt {
+ struct mhi_event_ctxt *er_ctxt;
+ struct mhi_chan_ctxt *chan_ctxt;
+ struct mhi_cmd_ctxt *cmd_ctxt;
+ dma_addr_t er_ctxt_addr;
+ dma_addr_t chan_ctxt_addr;
+ dma_addr_t cmd_ctxt_addr;
+};
+
+struct bhi_vec_entry {
+ u64 dma_addr;
+ u64 size;
+};
+
+enum mhi_ch_state_type {
+ MHI_CH_STATE_TYPE_RESET,
+ MHI_CH_STATE_TYPE_STOP,
+ MHI_CH_STATE_TYPE_START,
+ MHI_CH_STATE_TYPE_MAX,
+};
+
+extern const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX];
+#define TO_CH_STATE_TYPE_STR(state) (((state) >= MHI_CH_STATE_TYPE_MAX) ? \
+ "INVALID_STATE" : \
+ mhi_ch_state_type_str[(state)])
+
+#define MHI_INVALID_BRSTMODE(mode) (mode != MHI_DB_BRST_DISABLE && \
+ mode != MHI_DB_BRST_ENABLE)
+
+extern const char * const mhi_ee_str[MHI_EE_MAX];
+#define TO_MHI_EXEC_STR(ee) (((ee) >= MHI_EE_MAX) ? \
+ "INVALID_EE" : mhi_ee_str[ee])
+
+#define MHI_IN_PBL(ee) (ee == MHI_EE_PBL || ee == MHI_EE_PTHRU || \
+ ee == MHI_EE_EDL)
+#define MHI_POWER_UP_CAPABLE(ee) (MHI_IN_PBL(ee) || ee == MHI_EE_AMSS)
+#define MHI_FW_LOAD_CAPABLE(ee) (ee == MHI_EE_PBL || ee == MHI_EE_EDL)
+#define MHI_IN_MISSION_MODE(ee) (ee == MHI_EE_AMSS || ee == MHI_EE_WFW || \
+ ee == MHI_EE_FP)
+
+enum dev_st_transition {
+ DEV_ST_TRANSITION_PBL,
+ DEV_ST_TRANSITION_READY,
+ DEV_ST_TRANSITION_SBL,
+ DEV_ST_TRANSITION_MISSION_MODE,
+ DEV_ST_TRANSITION_FP,
+ DEV_ST_TRANSITION_SYS_ERR,
+ DEV_ST_TRANSITION_DISABLE,
+ DEV_ST_TRANSITION_MAX,
+};
+
+extern const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX];
+#define TO_DEV_STATE_TRANS_STR(state) (((state) >= DEV_ST_TRANSITION_MAX) ? \
+ "INVALID_STATE" : dev_state_tran_str[state])
+
+/* internal power states */
+enum mhi_pm_state {
+ MHI_PM_STATE_DISABLE,
+ MHI_PM_STATE_POR,
+ MHI_PM_STATE_M0,
+ MHI_PM_STATE_M2,
+ MHI_PM_STATE_M3_ENTER,
+ MHI_PM_STATE_M3,
+ MHI_PM_STATE_M3_EXIT,
+ MHI_PM_STATE_FW_DL_ERR,
+ MHI_PM_STATE_SYS_ERR_DETECT,
+ MHI_PM_STATE_SYS_ERR_PROCESS,
+ MHI_PM_STATE_SHUTDOWN_PROCESS,
+ MHI_PM_STATE_LD_ERR_FATAL_DETECT,
+ MHI_PM_STATE_MAX
+};
+
+#define MHI_PM_DISABLE BIT(0)
+#define MHI_PM_POR BIT(1)
+#define MHI_PM_M0 BIT(2)
+#define MHI_PM_M2 BIT(3)
+#define MHI_PM_M3_ENTER BIT(4)
+#define MHI_PM_M3 BIT(5)
+#define MHI_PM_M3_EXIT BIT(6)
+/* firmware download failure state */
+#define MHI_PM_FW_DL_ERR BIT(7)
+#define MHI_PM_SYS_ERR_DETECT BIT(8)
+#define MHI_PM_SYS_ERR_PROCESS BIT(9)
+#define MHI_PM_SHUTDOWN_PROCESS BIT(10)
+/* link not accessible */
+#define MHI_PM_LD_ERR_FATAL_DETECT BIT(11)
+
+#define MHI_REG_ACCESS_VALID(pm_state) ((pm_state & (MHI_PM_POR | MHI_PM_M0 | \
+ MHI_PM_M2 | MHI_PM_M3_ENTER | MHI_PM_M3_EXIT | \
+ MHI_PM_SYS_ERR_DETECT | MHI_PM_SYS_ERR_PROCESS | \
+ MHI_PM_SHUTDOWN_PROCESS | MHI_PM_FW_DL_ERR)))
+#define MHI_PM_IN_ERROR_STATE(pm_state) (pm_state >= MHI_PM_FW_DL_ERR)
+#define MHI_PM_IN_FATAL_STATE(pm_state) (pm_state == MHI_PM_LD_ERR_FATAL_DETECT)
+#define MHI_DB_ACCESS_VALID(mhi_cntrl) (mhi_cntrl->pm_state & mhi_cntrl->db_access)
+#define MHI_WAKE_DB_CLEAR_VALID(pm_state) (pm_state & (MHI_PM_M0 | \
+ MHI_PM_M2 | MHI_PM_M3_EXIT))
+#define MHI_WAKE_DB_SET_VALID(pm_state) (pm_state & MHI_PM_M2)
+#define MHI_WAKE_DB_FORCE_SET_VALID(pm_state) MHI_WAKE_DB_CLEAR_VALID(pm_state)
+#define MHI_EVENT_ACCESS_INVALID(pm_state) (pm_state == MHI_PM_DISABLE || \
+ MHI_PM_IN_ERROR_STATE(pm_state))
+#define MHI_PM_IN_SUSPEND_STATE(pm_state) (pm_state & \
+ (MHI_PM_M3_ENTER | MHI_PM_M3))
+
+#define NR_OF_CMD_RINGS 1
+#define CMD_EL_PER_RING 128
+#define PRIMARY_CMD_RING 0
+#define MHI_DEV_WAKE_DB 127
+#define MHI_MAX_MTU 0xffff
+#define MHI_RANDOM_U32_NONZERO(bmsk) (prandom_u32_max(bmsk) + 1)
+
+enum mhi_er_type {
+ MHI_ER_TYPE_INVALID = 0x0,
+ MHI_ER_TYPE_VALID = 0x1,
+};
+
+struct db_cfg {
+ bool reset_req;
+ bool db_mode;
+ u32 pollcfg;
+ enum mhi_db_brst_mode brstmode;
+ dma_addr_t db_val;
+ void (*process_db)(struct mhi_controller *mhi_cntrl,
+ struct db_cfg *db_cfg, void __iomem *io_addr,
+ dma_addr_t db_val);
+};
+
+struct mhi_pm_transitions {
+ enum mhi_pm_state from_state;
+ u32 to_states;
+};
+
+struct state_transition {
+ struct list_head node;
+ enum dev_st_transition state;
+};
+
+struct mhi_ring {
+ dma_addr_t dma_handle;
+ dma_addr_t iommu_base;
+ __le64 *ctxt_wp; /* point to ctxt wp */
+ void *pre_aligned;
+ void *base;
+ void *rp;
+ void *wp;
+ size_t el_size;
+ size_t len;
+ size_t elements;
+ size_t alloc_size;
+ void __iomem *db_addr;
+};
+
+struct mhi_cmd {
+ struct mhi_ring ring;
+ spinlock_t lock;
+};
+
+struct mhi_buf_info {
+ void *v_addr;
+ void *bb_addr;
+ void *wp;
+ void *cb_buf;
+ dma_addr_t p_addr;
+ size_t len;
+ enum dma_data_direction dir;
+ bool used; /* Indicates whether the buffer is used or not */
+ bool pre_mapped; /* Already pre-mapped by client */
+};
+
+struct mhi_event {
+ struct mhi_controller *mhi_cntrl;
+ struct mhi_chan *mhi_chan; /* dedicated to channel */
+ u32 er_index;
+ u32 intmod;
+ u32 irq;
+ int chan; /* this event ring is dedicated to a channel (optional) */
+ u32 priority;
+ enum mhi_er_data_type data_type;
+ struct mhi_ring ring;
+ struct db_cfg db_cfg;
+ struct tasklet_struct task;
+ spinlock_t lock;
+ int (*process_event)(struct mhi_controller *mhi_cntrl,
+ struct mhi_event *mhi_event,
+ u32 event_quota);
+ bool hw_ring;
+ bool cl_manage;
+ bool offload_ev; /* managed by a device driver */
+};
+
+struct mhi_chan {
+ const char *name;
+ /*
+ * Important: When consuming, increment tre_ring first and when
+ * releasing, decrement buf_ring first. If tre_ring has space, buf_ring
+ * is guranteed to have space so we do not need to check both rings.
+ */
+ struct mhi_ring buf_ring;
+ struct mhi_ring tre_ring;
+ u32 chan;
+ u32 er_index;
+ u32 intmod;
+ enum mhi_ch_type type;
+ enum dma_data_direction dir;
+ struct db_cfg db_cfg;
+ enum mhi_ch_ee_mask ee_mask;
+ enum mhi_ch_state ch_state;
+ enum mhi_ev_ccs ccs;
+ struct mhi_device *mhi_dev;
+ void (*xfer_cb)(struct mhi_device *mhi_dev, struct mhi_result *result);
+ struct mutex mutex;
+ struct completion completion;
+ rwlock_t lock;
+ struct list_head node;
+ bool lpm_notify;
+ bool configured;
+ bool offload_ch;
+ bool pre_alloc;
+ bool wake_capable;
+};
+
+/* Default MHI timeout */
+#define MHI_TIMEOUT_MS (1000)
+
+/* debugfs related functions */
+#ifdef CONFIG_MHI_BUS_DEBUG
+void mhi_create_debugfs(struct mhi_controller *mhi_cntrl);
+void mhi_destroy_debugfs(struct mhi_controller *mhi_cntrl);
+void mhi_debugfs_init(void);
+void mhi_debugfs_exit(void);
+#else
+static inline void mhi_create_debugfs(struct mhi_controller *mhi_cntrl)
+{
+}
+
+static inline void mhi_destroy_debugfs(struct mhi_controller *mhi_cntrl)
+{
+}
+
+static inline void mhi_debugfs_init(void)
+{
+}
+
+static inline void mhi_debugfs_exit(void)
+{
+}
+#endif
+
+struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl);
+
+int mhi_destroy_device(struct device *dev, void *data);
+void mhi_create_devices(struct mhi_controller *mhi_cntrl);
+
+int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
+ struct image_info **image_info, size_t alloc_size);
+void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
+ struct image_info *image_info);
+
+/* Power management APIs */
+enum mhi_pm_state __must_check mhi_tryset_pm_state(
+ struct mhi_controller *mhi_cntrl,
+ enum mhi_pm_state state);
+const char *to_mhi_pm_state_str(u32 state);
+int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
+ enum dev_st_transition state);
+void mhi_pm_st_worker(struct work_struct *work);
+void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl);
+int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl);
+int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl);
+void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl);
+int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl);
+int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl);
+int mhi_send_cmd(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
+ enum mhi_cmd_type cmd);
+int mhi_download_amss_image(struct mhi_controller *mhi_cntrl);
+static inline bool mhi_is_active(struct mhi_controller *mhi_cntrl)
+{
+ return (mhi_cntrl->dev_state >= MHI_STATE_M0 &&
+ mhi_cntrl->dev_state <= MHI_STATE_M3_FAST);
+}
+
+static inline void mhi_trigger_resume(struct mhi_controller *mhi_cntrl)
+{
+ pm_wakeup_event(&mhi_cntrl->mhi_dev->dev, 0);
+ mhi_cntrl->runtime_get(mhi_cntrl);
+ mhi_cntrl->runtime_put(mhi_cntrl);
+}
+
+/* Register access methods */
+void mhi_db_brstmode(struct mhi_controller *mhi_cntrl, struct db_cfg *db_cfg,
+ void __iomem *db_addr, dma_addr_t db_val);
+void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl,
+ struct db_cfg *db_mode, void __iomem *db_addr,
+ dma_addr_t db_val);
+int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
+ void __iomem *base, u32 offset, u32 *out);
+int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
+ void __iomem *base, u32 offset, u32 mask,
+ u32 *out);
+int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl,
+ void __iomem *base, u32 offset, u32 mask,
+ u32 val, u32 delayus);
+void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
+ u32 offset, u32 val);
+int __must_check mhi_write_reg_field(struct mhi_controller *mhi_cntrl,
+ void __iomem *base, u32 offset, u32 mask,
+ u32 val);
+void mhi_ring_er_db(struct mhi_event *mhi_event);
+void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr,
+ dma_addr_t db_val);
+void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd);
+void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan);
+
+/* Initialization methods */
+int mhi_init_mmio(struct mhi_controller *mhi_cntrl);
+int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl);
+void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl);
+int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl);
+void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl);
+int mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
+ struct image_info *img_info);
+void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl);
+
+/* Automatically allocate and queue inbound buffers */
+#define MHI_CH_INBOUND_ALLOC_BUFS BIT(0)
+int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan, unsigned int flags);
+
+int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan);
+void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan);
+void mhi_reset_chan(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan);
+
+/* Event processing methods */
+void mhi_ctrl_ev_task(unsigned long data);
+void mhi_ev_task(unsigned long data);
+int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
+ struct mhi_event *mhi_event, u32 event_quota);
+int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
+ struct mhi_event *mhi_event, u32 event_quota);
+
+/* ISR handlers */
+irqreturn_t mhi_irq_handler(int irq_number, void *dev);
+irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev);
+irqreturn_t mhi_intvec_handler(int irq_number, void *dev);
+
+int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
+ struct mhi_buf_info *info, enum mhi_flags flags);
+int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf_info);
+int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf_info);
+void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf_info);
+void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf_info);
+
+#endif /* _MHI_INT_H */
diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c
new file mode 100644
index 000000000000..df0fbfee7b78
--- /dev/null
+++ b/drivers/bus/mhi/host/main.c
@@ -0,0 +1,1696 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/mhi.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include "internal.h"
+
+int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
+ void __iomem *base, u32 offset, u32 *out)
+{
+ return mhi_cntrl->read_reg(mhi_cntrl, base + offset, out);
+}
+
+int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
+ void __iomem *base, u32 offset,
+ u32 mask, u32 *out)
+{
+ u32 tmp;
+ int ret;
+
+ ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
+ if (ret)
+ return ret;
+
+ *out = (tmp & mask) >> __ffs(mask);
+
+ return 0;
+}
+
+int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl,
+ void __iomem *base, u32 offset,
+ u32 mask, u32 val, u32 delayus)
+{
+ int ret;
+ u32 out, retry = (mhi_cntrl->timeout_ms * 1000) / delayus;
+
+ while (retry--) {
+ ret = mhi_read_reg_field(mhi_cntrl, base, offset, mask, &out);
+ if (ret)
+ return ret;
+
+ if (out == val)
+ return 0;
+
+ fsleep(delayus);
+ }
+
+ return -ETIMEDOUT;
+}
+
+void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
+ u32 offset, u32 val)
+{
+ mhi_cntrl->write_reg(mhi_cntrl, base + offset, val);
+}
+
+int __must_check mhi_write_reg_field(struct mhi_controller *mhi_cntrl,
+ void __iomem *base, u32 offset, u32 mask,
+ u32 val)
+{
+ int ret;
+ u32 tmp;
+
+ ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
+ if (ret)
+ return ret;
+
+ tmp &= ~mask;
+ tmp |= (val << __ffs(mask));
+ mhi_write_reg(mhi_cntrl, base, offset, tmp);
+
+ return 0;
+}
+
+void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr,
+ dma_addr_t db_val)
+{
+ mhi_write_reg(mhi_cntrl, db_addr, 4, upper_32_bits(db_val));
+ mhi_write_reg(mhi_cntrl, db_addr, 0, lower_32_bits(db_val));
+}
+
+void mhi_db_brstmode(struct mhi_controller *mhi_cntrl,
+ struct db_cfg *db_cfg,
+ void __iomem *db_addr,
+ dma_addr_t db_val)
+{
+ if (db_cfg->db_mode) {
+ db_cfg->db_val = db_val;
+ mhi_write_db(mhi_cntrl, db_addr, db_val);
+ db_cfg->db_mode = 0;
+ }
+}
+
+void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl,
+ struct db_cfg *db_cfg,
+ void __iomem *db_addr,
+ dma_addr_t db_val)
+{
+ db_cfg->db_val = db_val;
+ mhi_write_db(mhi_cntrl, db_addr, db_val);
+}
+
+void mhi_ring_er_db(struct mhi_event *mhi_event)
+{
+ struct mhi_ring *ring = &mhi_event->ring;
+
+ mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg,
+ ring->db_addr, le64_to_cpu(*ring->ctxt_wp));
+}
+
+void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd)
+{
+ dma_addr_t db;
+ struct mhi_ring *ring = &mhi_cmd->ring;
+
+ db = ring->iommu_base + (ring->wp - ring->base);
+ *ring->ctxt_wp = cpu_to_le64(db);
+ mhi_write_db(mhi_cntrl, ring->db_addr, db);
+}
+
+void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan)
+{
+ struct mhi_ring *ring = &mhi_chan->tre_ring;
+ dma_addr_t db;
+
+ db = ring->iommu_base + (ring->wp - ring->base);
+
+ /*
+ * Writes to the new ring element must be visible to the hardware
+ * before letting h/w know there is new element to fetch.
+ */
+ dma_wmb();
+ *ring->ctxt_wp = cpu_to_le64(db);
+
+ mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg,
+ ring->db_addr, db);
+}
+
+enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl)
+{
+ u32 exec;
+ int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &exec);
+
+ return (ret) ? MHI_EE_MAX : exec;
+}
+EXPORT_SYMBOL_GPL(mhi_get_exec_env);
+
+enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl)
+{
+ u32 state;
+ int ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
+ MHISTATUS_MHISTATE_MASK, &state);
+ return ret ? MHI_STATE_MAX : state;
+}
+EXPORT_SYMBOL_GPL(mhi_get_mhi_state);
+
+void mhi_soc_reset(struct mhi_controller *mhi_cntrl)
+{
+ if (mhi_cntrl->reset) {
+ mhi_cntrl->reset(mhi_cntrl);
+ return;
+ }
+
+ /* Generic MHI SoC reset */
+ mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, MHI_SOC_RESET_REQ_OFFSET,
+ MHI_SOC_RESET_REQ);
+}
+EXPORT_SYMBOL_GPL(mhi_soc_reset);
+
+int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf_info)
+{
+ buf_info->p_addr = dma_map_single(mhi_cntrl->cntrl_dev,
+ buf_info->v_addr, buf_info->len,
+ buf_info->dir);
+ if (dma_mapping_error(mhi_cntrl->cntrl_dev, buf_info->p_addr))
+ return -ENOMEM;
+
+ return 0;
+}
+
+int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf_info)
+{
+ void *buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, buf_info->len,
+ &buf_info->p_addr, GFP_ATOMIC);
+
+ if (!buf)
+ return -ENOMEM;
+
+ if (buf_info->dir == DMA_TO_DEVICE)
+ memcpy(buf, buf_info->v_addr, buf_info->len);
+
+ buf_info->bb_addr = buf;
+
+ return 0;
+}
+
+void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf_info)
+{
+ dma_unmap_single(mhi_cntrl->cntrl_dev, buf_info->p_addr, buf_info->len,
+ buf_info->dir);
+}
+
+void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf_info)
+{
+ if (buf_info->dir == DMA_FROM_DEVICE)
+ memcpy(buf_info->v_addr, buf_info->bb_addr, buf_info->len);
+
+ dma_free_coherent(mhi_cntrl->cntrl_dev, buf_info->len,
+ buf_info->bb_addr, buf_info->p_addr);
+}
+
+static int get_nr_avail_ring_elements(struct mhi_controller *mhi_cntrl,
+ struct mhi_ring *ring)
+{
+ int nr_el;
+
+ if (ring->wp < ring->rp) {
+ nr_el = ((ring->rp - ring->wp) / ring->el_size) - 1;
+ } else {
+ nr_el = (ring->rp - ring->base) / ring->el_size;
+ nr_el += ((ring->base + ring->len - ring->wp) /
+ ring->el_size) - 1;
+ }
+
+ return nr_el;
+}
+
+static void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr)
+{
+ return (addr - ring->iommu_base) + ring->base;
+}
+
+static void mhi_add_ring_element(struct mhi_controller *mhi_cntrl,
+ struct mhi_ring *ring)
+{
+ ring->wp += ring->el_size;
+ if (ring->wp >= (ring->base + ring->len))
+ ring->wp = ring->base;
+ /* smp update */
+ smp_wmb();
+}
+
+static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl,
+ struct mhi_ring *ring)
+{
+ ring->rp += ring->el_size;
+ if (ring->rp >= (ring->base + ring->len))
+ ring->rp = ring->base;
+ /* smp update */
+ smp_wmb();
+}
+
+static bool is_valid_ring_ptr(struct mhi_ring *ring, dma_addr_t addr)
+{
+ return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len;
+}
+
+int mhi_destroy_device(struct device *dev, void *data)
+{
+ struct mhi_chan *ul_chan, *dl_chan;
+ struct mhi_device *mhi_dev;
+ struct mhi_controller *mhi_cntrl;
+ enum mhi_ee_type ee = MHI_EE_MAX;
+
+ if (dev->bus != &mhi_bus_type)
+ return 0;
+
+ mhi_dev = to_mhi_device(dev);
+ mhi_cntrl = mhi_dev->mhi_cntrl;
+
+ /* Only destroy virtual devices thats attached to bus */
+ if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
+ return 0;
+
+ ul_chan = mhi_dev->ul_chan;
+ dl_chan = mhi_dev->dl_chan;
+
+ /*
+ * If execution environment is specified, remove only those devices that
+ * started in them based on ee_mask for the channels as we move on to a
+ * different execution environment
+ */
+ if (data)
+ ee = *(enum mhi_ee_type *)data;
+
+ /*
+ * For the suspend and resume case, this function will get called
+ * without mhi_unregister_controller(). Hence, we need to drop the
+ * references to mhi_dev created for ul and dl channels. We can
+ * be sure that there will be no instances of mhi_dev left after
+ * this.
+ */
+ if (ul_chan) {
+ if (ee != MHI_EE_MAX && !(ul_chan->ee_mask & BIT(ee)))
+ return 0;
+
+ put_device(&ul_chan->mhi_dev->dev);
+ }
+
+ if (dl_chan) {
+ if (ee != MHI_EE_MAX && !(dl_chan->ee_mask & BIT(ee)))
+ return 0;
+
+ put_device(&dl_chan->mhi_dev->dev);
+ }
+
+ dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n",
+ mhi_dev->name);
+
+ /* Notify the client and remove the device from MHI bus */
+ device_del(dev);
+ put_device(dev);
+
+ return 0;
+}
+
+int mhi_get_free_desc_count(struct mhi_device *mhi_dev,
+ enum dma_data_direction dir)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ?
+ mhi_dev->ul_chan : mhi_dev->dl_chan;
+ struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
+
+ return get_nr_avail_ring_elements(mhi_cntrl, tre_ring);
+}
+EXPORT_SYMBOL_GPL(mhi_get_free_desc_count);
+
+void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason)
+{
+ struct mhi_driver *mhi_drv;
+
+ if (!mhi_dev->dev.driver)
+ return;
+
+ mhi_drv = to_mhi_driver(mhi_dev->dev.driver);
+
+ if (mhi_drv->status_cb)
+ mhi_drv->status_cb(mhi_dev, cb_reason);
+}
+EXPORT_SYMBOL_GPL(mhi_notify);
+
+/* Bind MHI channels to MHI devices */
+void mhi_create_devices(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_chan *mhi_chan;
+ struct mhi_device *mhi_dev;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int i, ret;
+
+ mhi_chan = mhi_cntrl->mhi_chan;
+ for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
+ if (!mhi_chan->configured || mhi_chan->mhi_dev ||
+ !(mhi_chan->ee_mask & BIT(mhi_cntrl->ee)))
+ continue;
+ mhi_dev = mhi_alloc_device(mhi_cntrl);
+ if (IS_ERR(mhi_dev))
+ return;
+
+ mhi_dev->dev_type = MHI_DEVICE_XFER;
+ switch (mhi_chan->dir) {
+ case DMA_TO_DEVICE:
+ mhi_dev->ul_chan = mhi_chan;
+ mhi_dev->ul_chan_id = mhi_chan->chan;
+ break;
+ case DMA_FROM_DEVICE:
+ /* We use dl_chan as offload channels */
+ mhi_dev->dl_chan = mhi_chan;
+ mhi_dev->dl_chan_id = mhi_chan->chan;
+ break;
+ default:
+ dev_err(dev, "Direction not supported\n");
+ put_device(&mhi_dev->dev);
+ return;
+ }
+
+ get_device(&mhi_dev->dev);
+ mhi_chan->mhi_dev = mhi_dev;
+
+ /* Check next channel if it matches */
+ if ((i + 1) < mhi_cntrl->max_chan && mhi_chan[1].configured) {
+ if (!strcmp(mhi_chan[1].name, mhi_chan->name)) {
+ i++;
+ mhi_chan++;
+ if (mhi_chan->dir == DMA_TO_DEVICE) {
+ mhi_dev->ul_chan = mhi_chan;
+ mhi_dev->ul_chan_id = mhi_chan->chan;
+ } else {
+ mhi_dev->dl_chan = mhi_chan;
+ mhi_dev->dl_chan_id = mhi_chan->chan;
+ }
+ get_device(&mhi_dev->dev);
+ mhi_chan->mhi_dev = mhi_dev;
+ }
+ }
+
+ /* Channel name is same for both UL and DL */
+ mhi_dev->name = mhi_chan->name;
+ dev_set_name(&mhi_dev->dev, "%s_%s",
+ dev_name(&mhi_cntrl->mhi_dev->dev),
+ mhi_dev->name);
+
+ /* Init wakeup source if available */
+ if (mhi_dev->dl_chan && mhi_dev->dl_chan->wake_capable)
+ device_init_wakeup(&mhi_dev->dev, true);
+
+ ret = device_add(&mhi_dev->dev);
+ if (ret)
+ put_device(&mhi_dev->dev);
+ }
+}
+
+irqreturn_t mhi_irq_handler(int irq_number, void *dev)
+{
+ struct mhi_event *mhi_event = dev;
+ struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
+ struct mhi_event_ctxt *er_ctxt;
+ struct mhi_ring *ev_ring = &mhi_event->ring;
+ dma_addr_t ptr;
+ void *dev_rp;
+
+ /*
+ * If CONFIG_DEBUG_SHIRQ is set, the IRQ handler will get invoked during __free_irq()
+ * and by that time mhi_ctxt() would've freed. So check for the existence of mhi_ctxt
+ * before handling the IRQs.
+ */
+ if (!mhi_cntrl->mhi_ctxt) {
+ dev_dbg(&mhi_cntrl->mhi_dev->dev,
+ "mhi_ctxt has been freed\n");
+ return IRQ_HANDLED;
+ }
+
+ er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
+ ptr = le64_to_cpu(er_ctxt->rp);
+
+ if (!is_valid_ring_ptr(ev_ring, ptr)) {
+ dev_err(&mhi_cntrl->mhi_dev->dev,
+ "Event ring rp points outside of the event ring\n");
+ return IRQ_HANDLED;
+ }
+
+ dev_rp = mhi_to_virtual(ev_ring, ptr);
+
+ /* Only proceed if event ring has pending events */
+ if (ev_ring->rp == dev_rp)
+ return IRQ_HANDLED;
+
+ /* For client managed event ring, notify pending data */
+ if (mhi_event->cl_manage) {
+ struct mhi_chan *mhi_chan = mhi_event->mhi_chan;
+ struct mhi_device *mhi_dev = mhi_chan->mhi_dev;
+
+ if (mhi_dev)
+ mhi_notify(mhi_dev, MHI_CB_PENDING_DATA);
+ } else {
+ tasklet_schedule(&mhi_event->task);
+ }
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
+{
+ struct mhi_controller *mhi_cntrl = priv;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_state state;
+ enum mhi_pm_state pm_state = 0;
+ enum mhi_ee_type ee;
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ goto exit_intvec;
+ }
+
+ state = mhi_get_mhi_state(mhi_cntrl);
+ ee = mhi_get_exec_env(mhi_cntrl);
+ dev_dbg(dev, "local ee: %s state: %s device ee: %s state: %s\n",
+ TO_MHI_EXEC_STR(mhi_cntrl->ee),
+ mhi_state_str(mhi_cntrl->dev_state),
+ TO_MHI_EXEC_STR(ee), mhi_state_str(state));
+
+ if (state == MHI_STATE_SYS_ERR) {
+ dev_dbg(dev, "System error detected\n");
+ pm_state = mhi_tryset_pm_state(mhi_cntrl,
+ MHI_PM_SYS_ERR_DETECT);
+ }
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ if (pm_state != MHI_PM_SYS_ERR_DETECT || ee == mhi_cntrl->ee)
+ goto exit_intvec;
+
+ switch (ee) {
+ case MHI_EE_RDDM:
+ /* proceed if power down is not already in progress */
+ if (mhi_cntrl->rddm_image && mhi_is_active(mhi_cntrl)) {
+ mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
+ mhi_cntrl->ee = ee;
+ wake_up_all(&mhi_cntrl->state_event);
+ }
+ break;
+ case MHI_EE_PBL:
+ case MHI_EE_EDL:
+ case MHI_EE_PTHRU:
+ mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
+ mhi_cntrl->ee = ee;
+ wake_up_all(&mhi_cntrl->state_event);
+ mhi_pm_sys_err_handler(mhi_cntrl);
+ break;
+ default:
+ wake_up_all(&mhi_cntrl->state_event);
+ mhi_pm_sys_err_handler(mhi_cntrl);
+ break;
+ }
+
+exit_intvec:
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t mhi_intvec_handler(int irq_number, void *dev)
+{
+ struct mhi_controller *mhi_cntrl = dev;
+
+ /* Wake up events waiting for state change */
+ wake_up_all(&mhi_cntrl->state_event);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl,
+ struct mhi_ring *ring)
+{
+ /* Update the WP */
+ ring->wp += ring->el_size;
+
+ if (ring->wp >= (ring->base + ring->len))
+ ring->wp = ring->base;
+
+ *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + (ring->wp - ring->base));
+
+ /* Update the RP */
+ ring->rp += ring->el_size;
+ if (ring->rp >= (ring->base + ring->len))
+ ring->rp = ring->base;
+
+ /* Update to all cores */
+ smp_wmb();
+}
+
+static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
+ struct mhi_ring_element *event,
+ struct mhi_chan *mhi_chan)
+{
+ struct mhi_ring *buf_ring, *tre_ring;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct mhi_result result;
+ unsigned long flags = 0;
+ u32 ev_code;
+
+ ev_code = MHI_TRE_GET_EV_CODE(event);
+ buf_ring = &mhi_chan->buf_ring;
+ tre_ring = &mhi_chan->tre_ring;
+
+ result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
+ -EOVERFLOW : 0;
+
+ /*
+ * If it's a DB Event then we need to grab the lock
+ * with preemption disabled and as a write because we
+ * have to update db register and there are chances that
+ * another thread could be doing the same.
+ */
+ if (ev_code >= MHI_EV_CC_OOB)
+ write_lock_irqsave(&mhi_chan->lock, flags);
+ else
+ read_lock_bh(&mhi_chan->lock);
+
+ if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
+ goto end_process_tx_event;
+
+ switch (ev_code) {
+ case MHI_EV_CC_OVERFLOW:
+ case MHI_EV_CC_EOB:
+ case MHI_EV_CC_EOT:
+ {
+ dma_addr_t ptr = MHI_TRE_GET_EV_PTR(event);
+ struct mhi_ring_element *local_rp, *ev_tre;
+ void *dev_rp;
+ struct mhi_buf_info *buf_info;
+ u16 xfer_len;
+
+ if (!is_valid_ring_ptr(tre_ring, ptr)) {
+ dev_err(&mhi_cntrl->mhi_dev->dev,
+ "Event element points outside of the tre ring\n");
+ break;
+ }
+ /* Get the TRB this event points to */
+ ev_tre = mhi_to_virtual(tre_ring, ptr);
+
+ dev_rp = ev_tre + 1;
+ if (dev_rp >= (tre_ring->base + tre_ring->len))
+ dev_rp = tre_ring->base;
+
+ result.dir = mhi_chan->dir;
+
+ local_rp = tre_ring->rp;
+ while (local_rp != dev_rp) {
+ buf_info = buf_ring->rp;
+ /* If it's the last TRE, get length from the event */
+ if (local_rp == ev_tre)
+ xfer_len = MHI_TRE_GET_EV_LEN(event);
+ else
+ xfer_len = buf_info->len;
+
+ /* Unmap if it's not pre-mapped by client */
+ if (likely(!buf_info->pre_mapped))
+ mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
+
+ result.buf_addr = buf_info->cb_buf;
+
+ /* truncate to buf len if xfer_len is larger */
+ result.bytes_xferd =
+ min_t(u16, xfer_len, buf_info->len);
+ mhi_del_ring_element(mhi_cntrl, buf_ring);
+ mhi_del_ring_element(mhi_cntrl, tre_ring);
+ local_rp = tre_ring->rp;
+
+ /* notify client */
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+
+ if (mhi_chan->dir == DMA_TO_DEVICE) {
+ atomic_dec(&mhi_cntrl->pending_pkts);
+ /* Release the reference got from mhi_queue() */
+ mhi_cntrl->runtime_put(mhi_cntrl);
+ }
+
+ /*
+ * Recycle the buffer if buffer is pre-allocated,
+ * if there is an error, not much we can do apart
+ * from dropping the packet
+ */
+ if (mhi_chan->pre_alloc) {
+ if (mhi_queue_buf(mhi_chan->mhi_dev,
+ mhi_chan->dir,
+ buf_info->cb_buf,
+ buf_info->len, MHI_EOT)) {
+ dev_err(dev,
+ "Error recycling buffer for chan:%d\n",
+ mhi_chan->chan);
+ kfree(buf_info->cb_buf);
+ }
+ }
+ }
+ break;
+ } /* CC_EOT */
+ case MHI_EV_CC_OOB:
+ case MHI_EV_CC_DB_MODE:
+ {
+ unsigned long pm_lock_flags;
+
+ mhi_chan->db_cfg.db_mode = 1;
+ read_lock_irqsave(&mhi_cntrl->pm_lock, pm_lock_flags);
+ if (tre_ring->wp != tre_ring->rp &&
+ MHI_DB_ACCESS_VALID(mhi_cntrl)) {
+ mhi_ring_chan_db(mhi_cntrl, mhi_chan);
+ }
+ read_unlock_irqrestore(&mhi_cntrl->pm_lock, pm_lock_flags);
+ break;
+ }
+ case MHI_EV_CC_BAD_TRE:
+ default:
+ dev_err(dev, "Unknown event 0x%x\n", ev_code);
+ break;
+ } /* switch(MHI_EV_READ_CODE(EV_TRB_CODE,event)) */
+
+end_process_tx_event:
+ if (ev_code >= MHI_EV_CC_OOB)
+ write_unlock_irqrestore(&mhi_chan->lock, flags);
+ else
+ read_unlock_bh(&mhi_chan->lock);
+
+ return 0;
+}
+
+static int parse_rsc_event(struct mhi_controller *mhi_cntrl,
+ struct mhi_ring_element *event,
+ struct mhi_chan *mhi_chan)
+{
+ struct mhi_ring *buf_ring, *tre_ring;
+ struct mhi_buf_info *buf_info;
+ struct mhi_result result;
+ int ev_code;
+ u32 cookie; /* offset to local descriptor */
+ u16 xfer_len;
+
+ buf_ring = &mhi_chan->buf_ring;
+ tre_ring = &mhi_chan->tre_ring;
+
+ ev_code = MHI_TRE_GET_EV_CODE(event);
+ cookie = MHI_TRE_GET_EV_COOKIE(event);
+ xfer_len = MHI_TRE_GET_EV_LEN(event);
+
+ /* Received out of bound cookie */
+ WARN_ON(cookie >= buf_ring->len);
+
+ buf_info = buf_ring->base + cookie;
+
+ result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
+ -EOVERFLOW : 0;
+
+ /* truncate to buf len if xfer_len is larger */
+ result.bytes_xferd = min_t(u16, xfer_len, buf_info->len);
+ result.buf_addr = buf_info->cb_buf;
+ result.dir = mhi_chan->dir;
+
+ read_lock_bh(&mhi_chan->lock);
+
+ if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
+ goto end_process_rsc_event;
+
+ WARN_ON(!buf_info->used);
+
+ /* notify the client */
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+
+ /*
+ * Note: We're arbitrarily incrementing RP even though, completion
+ * packet we processed might not be the same one, reason we can do this
+ * is because device guaranteed to cache descriptors in order it
+ * receive, so even though completion event is different we can re-use
+ * all descriptors in between.
+ * Example:
+ * Transfer Ring has descriptors: A, B, C, D
+ * Last descriptor host queue is D (WP) and first descriptor
+ * host queue is A (RP).
+ * The completion event we just serviced is descriptor C.
+ * Then we can safely queue descriptors to replace A, B, and C
+ * even though host did not receive any completions.
+ */
+ mhi_del_ring_element(mhi_cntrl, tre_ring);
+ buf_info->used = false;
+
+end_process_rsc_event:
+ read_unlock_bh(&mhi_chan->lock);
+
+ return 0;
+}
+
+static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl,
+ struct mhi_ring_element *tre)
+{
+ dma_addr_t ptr = MHI_TRE_GET_EV_PTR(tre);
+ struct mhi_cmd *cmd_ring = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
+ struct mhi_ring *mhi_ring = &cmd_ring->ring;
+ struct mhi_ring_element *cmd_pkt;
+ struct mhi_chan *mhi_chan;
+ u32 chan;
+
+ if (!is_valid_ring_ptr(mhi_ring, ptr)) {
+ dev_err(&mhi_cntrl->mhi_dev->dev,
+ "Event element points outside of the cmd ring\n");
+ return;
+ }
+
+ cmd_pkt = mhi_to_virtual(mhi_ring, ptr);
+
+ chan = MHI_TRE_GET_CMD_CHID(cmd_pkt);
+
+ if (chan < mhi_cntrl->max_chan &&
+ mhi_cntrl->mhi_chan[chan].configured) {
+ mhi_chan = &mhi_cntrl->mhi_chan[chan];
+ write_lock_bh(&mhi_chan->lock);
+ mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre);
+ complete(&mhi_chan->completion);
+ write_unlock_bh(&mhi_chan->lock);
+ } else {
+ dev_err(&mhi_cntrl->mhi_dev->dev,
+ "Completion packet for invalid channel ID: %d\n", chan);
+ }
+
+ mhi_del_ring_element(mhi_cntrl, mhi_ring);
+}
+
+int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
+ struct mhi_event *mhi_event,
+ u32 event_quota)
+{
+ struct mhi_ring_element *dev_rp, *local_rp;
+ struct mhi_ring *ev_ring = &mhi_event->ring;
+ struct mhi_event_ctxt *er_ctxt =
+ &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
+ struct mhi_chan *mhi_chan;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ u32 chan;
+ int count = 0;
+ dma_addr_t ptr = le64_to_cpu(er_ctxt->rp);
+
+ /*
+ * This is a quick check to avoid unnecessary event processing
+ * in case MHI is already in error state, but it's still possible
+ * to transition to error state while processing events
+ */
+ if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
+ return -EIO;
+
+ if (!is_valid_ring_ptr(ev_ring, ptr)) {
+ dev_err(&mhi_cntrl->mhi_dev->dev,
+ "Event ring rp points outside of the event ring\n");
+ return -EIO;
+ }
+
+ dev_rp = mhi_to_virtual(ev_ring, ptr);
+ local_rp = ev_ring->rp;
+
+ while (dev_rp != local_rp) {
+ enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
+
+ switch (type) {
+ case MHI_PKT_TYPE_BW_REQ_EVENT:
+ {
+ struct mhi_link_info *link_info;
+
+ link_info = &mhi_cntrl->mhi_link_info;
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ link_info->target_link_speed =
+ MHI_TRE_GET_EV_LINKSPEED(local_rp);
+ link_info->target_link_width =
+ MHI_TRE_GET_EV_LINKWIDTH(local_rp);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ dev_dbg(dev, "Received BW_REQ event\n");
+ mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_BW_REQ);
+ break;
+ }
+ case MHI_PKT_TYPE_STATE_CHANGE_EVENT:
+ {
+ enum mhi_state new_state;
+
+ new_state = MHI_TRE_GET_EV_STATE(local_rp);
+
+ dev_dbg(dev, "State change event to state: %s\n",
+ mhi_state_str(new_state));
+
+ switch (new_state) {
+ case MHI_STATE_M0:
+ mhi_pm_m0_transition(mhi_cntrl);
+ break;
+ case MHI_STATE_M1:
+ mhi_pm_m1_transition(mhi_cntrl);
+ break;
+ case MHI_STATE_M3:
+ mhi_pm_m3_transition(mhi_cntrl);
+ break;
+ case MHI_STATE_SYS_ERR:
+ {
+ enum mhi_pm_state pm_state;
+
+ dev_dbg(dev, "System error detected\n");
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ pm_state = mhi_tryset_pm_state(mhi_cntrl,
+ MHI_PM_SYS_ERR_DETECT);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ if (pm_state == MHI_PM_SYS_ERR_DETECT)
+ mhi_pm_sys_err_handler(mhi_cntrl);
+ break;
+ }
+ default:
+ dev_err(dev, "Invalid state: %s\n",
+ mhi_state_str(new_state));
+ }
+
+ break;
+ }
+ case MHI_PKT_TYPE_CMD_COMPLETION_EVENT:
+ mhi_process_cmd_completion(mhi_cntrl, local_rp);
+ break;
+ case MHI_PKT_TYPE_EE_EVENT:
+ {
+ enum dev_st_transition st = DEV_ST_TRANSITION_MAX;
+ enum mhi_ee_type event = MHI_TRE_GET_EV_EXECENV(local_rp);
+
+ dev_dbg(dev, "Received EE event: %s\n",
+ TO_MHI_EXEC_STR(event));
+ switch (event) {
+ case MHI_EE_SBL:
+ st = DEV_ST_TRANSITION_SBL;
+ break;
+ case MHI_EE_WFW:
+ case MHI_EE_AMSS:
+ st = DEV_ST_TRANSITION_MISSION_MODE;
+ break;
+ case MHI_EE_FP:
+ st = DEV_ST_TRANSITION_FP;
+ break;
+ case MHI_EE_RDDM:
+ mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ mhi_cntrl->ee = event;
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ wake_up_all(&mhi_cntrl->state_event);
+ break;
+ default:
+ dev_err(dev,
+ "Unhandled EE event: 0x%x\n", type);
+ }
+ if (st != DEV_ST_TRANSITION_MAX)
+ mhi_queue_state_transition(mhi_cntrl, st);
+
+ break;
+ }
+ case MHI_PKT_TYPE_TX_EVENT:
+ chan = MHI_TRE_GET_EV_CHID(local_rp);
+
+ WARN_ON(chan >= mhi_cntrl->max_chan);
+
+ /*
+ * Only process the event ring elements whose channel
+ * ID is within the maximum supported range.
+ */
+ if (chan < mhi_cntrl->max_chan) {
+ mhi_chan = &mhi_cntrl->mhi_chan[chan];
+ if (!mhi_chan->configured)
+ break;
+ parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
+ event_quota--;
+ }
+ break;
+ default:
+ dev_err(dev, "Unhandled event type: %d\n", type);
+ break;
+ }
+
+ mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
+ local_rp = ev_ring->rp;
+
+ ptr = le64_to_cpu(er_ctxt->rp);
+ if (!is_valid_ring_ptr(ev_ring, ptr)) {
+ dev_err(&mhi_cntrl->mhi_dev->dev,
+ "Event ring rp points outside of the event ring\n");
+ return -EIO;
+ }
+
+ dev_rp = mhi_to_virtual(ev_ring, ptr);
+ count++;
+ }
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
+ mhi_ring_er_db(mhi_event);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ return count;
+}
+
+int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
+ struct mhi_event *mhi_event,
+ u32 event_quota)
+{
+ struct mhi_ring_element *dev_rp, *local_rp;
+ struct mhi_ring *ev_ring = &mhi_event->ring;
+ struct mhi_event_ctxt *er_ctxt =
+ &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
+ int count = 0;
+ u32 chan;
+ struct mhi_chan *mhi_chan;
+ dma_addr_t ptr = le64_to_cpu(er_ctxt->rp);
+
+ if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
+ return -EIO;
+
+ if (!is_valid_ring_ptr(ev_ring, ptr)) {
+ dev_err(&mhi_cntrl->mhi_dev->dev,
+ "Event ring rp points outside of the event ring\n");
+ return -EIO;
+ }
+
+ dev_rp = mhi_to_virtual(ev_ring, ptr);
+ local_rp = ev_ring->rp;
+
+ while (dev_rp != local_rp && event_quota > 0) {
+ enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
+
+ chan = MHI_TRE_GET_EV_CHID(local_rp);
+
+ WARN_ON(chan >= mhi_cntrl->max_chan);
+
+ /*
+ * Only process the event ring elements whose channel
+ * ID is within the maximum supported range.
+ */
+ if (chan < mhi_cntrl->max_chan &&
+ mhi_cntrl->mhi_chan[chan].configured) {
+ mhi_chan = &mhi_cntrl->mhi_chan[chan];
+
+ if (likely(type == MHI_PKT_TYPE_TX_EVENT)) {
+ parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
+ event_quota--;
+ } else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) {
+ parse_rsc_event(mhi_cntrl, local_rp, mhi_chan);
+ event_quota--;
+ }
+ }
+
+ mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
+ local_rp = ev_ring->rp;
+
+ ptr = le64_to_cpu(er_ctxt->rp);
+ if (!is_valid_ring_ptr(ev_ring, ptr)) {
+ dev_err(&mhi_cntrl->mhi_dev->dev,
+ "Event ring rp points outside of the event ring\n");
+ return -EIO;
+ }
+
+ dev_rp = mhi_to_virtual(ev_ring, ptr);
+ count++;
+ }
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
+ mhi_ring_er_db(mhi_event);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ return count;
+}
+
+void mhi_ev_task(unsigned long data)
+{
+ struct mhi_event *mhi_event = (struct mhi_event *)data;
+ struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
+
+ /* process all pending events */
+ spin_lock_bh(&mhi_event->lock);
+ mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
+ spin_unlock_bh(&mhi_event->lock);
+}
+
+void mhi_ctrl_ev_task(unsigned long data)
+{
+ struct mhi_event *mhi_event = (struct mhi_event *)data;
+ struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_state state;
+ enum mhi_pm_state pm_state = 0;
+ int ret;
+
+ /*
+ * We can check PM state w/o a lock here because there is no way
+ * PM state can change from reg access valid to no access while this
+ * thread being executed.
+ */
+ if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+ /*
+ * We may have a pending event but not allowed to
+ * process it since we are probably in a suspended state,
+ * so trigger a resume.
+ */
+ mhi_trigger_resume(mhi_cntrl);
+
+ return;
+ }
+
+ /* Process ctrl events */
+ ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
+
+ /*
+ * We received an IRQ but no events to process, maybe device went to
+ * SYS_ERR state? Check the state to confirm.
+ */
+ if (!ret) {
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ state = mhi_get_mhi_state(mhi_cntrl);
+ if (state == MHI_STATE_SYS_ERR) {
+ dev_dbg(dev, "System error detected\n");
+ pm_state = mhi_tryset_pm_state(mhi_cntrl,
+ MHI_PM_SYS_ERR_DETECT);
+ }
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ if (pm_state == MHI_PM_SYS_ERR_DETECT)
+ mhi_pm_sys_err_handler(mhi_cntrl);
+ }
+}
+
+static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,
+ struct mhi_ring *ring)
+{
+ void *tmp = ring->wp + ring->el_size;
+
+ if (tmp >= (ring->base + ring->len))
+ tmp = ring->base;
+
+ return (tmp == ring->rp);
+}
+
+static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
+ enum dma_data_direction dir, enum mhi_flags mflags)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
+ mhi_dev->dl_chan;
+ struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
+ unsigned long flags;
+ int ret;
+
+ if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
+ return -EIO;
+
+ read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
+
+ ret = mhi_is_ring_full(mhi_cntrl, tre_ring);
+ if (unlikely(ret)) {
+ ret = -EAGAIN;
+ goto exit_unlock;
+ }
+
+ ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags);
+ if (unlikely(ret))
+ goto exit_unlock;
+
+ /* Packet is queued, take a usage ref to exit M3 if necessary
+ * for host->device buffer, balanced put is done on buffer completion
+ * for device->host buffer, balanced put is after ringing the DB
+ */
+ mhi_cntrl->runtime_get(mhi_cntrl);
+
+ /* Assert dev_wake (to exit/prevent M1/M2)*/
+ mhi_cntrl->wake_toggle(mhi_cntrl);
+
+ if (mhi_chan->dir == DMA_TO_DEVICE)
+ atomic_inc(&mhi_cntrl->pending_pkts);
+
+ if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
+ mhi_ring_chan_db(mhi_cntrl, mhi_chan);
+
+ if (dir == DMA_FROM_DEVICE)
+ mhi_cntrl->runtime_put(mhi_cntrl);
+
+exit_unlock:
+ read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
+
+ return ret;
+}
+
+int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
+ struct sk_buff *skb, size_t len, enum mhi_flags mflags)
+{
+ struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
+ mhi_dev->dl_chan;
+ struct mhi_buf_info buf_info = { };
+
+ buf_info.v_addr = skb->data;
+ buf_info.cb_buf = skb;
+ buf_info.len = len;
+
+ if (unlikely(mhi_chan->pre_alloc))
+ return -EINVAL;
+
+ return mhi_queue(mhi_dev, &buf_info, dir, mflags);
+}
+EXPORT_SYMBOL_GPL(mhi_queue_skb);
+
+int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
+ struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
+{
+ struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
+ mhi_dev->dl_chan;
+ struct mhi_buf_info buf_info = { };
+
+ buf_info.p_addr = mhi_buf->dma_addr;
+ buf_info.cb_buf = mhi_buf;
+ buf_info.pre_mapped = true;
+ buf_info.len = len;
+
+ if (unlikely(mhi_chan->pre_alloc))
+ return -EINVAL;
+
+ return mhi_queue(mhi_dev, &buf_info, dir, mflags);
+}
+EXPORT_SYMBOL_GPL(mhi_queue_dma);
+
+int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
+ struct mhi_buf_info *info, enum mhi_flags flags)
+{
+ struct mhi_ring *buf_ring, *tre_ring;
+ struct mhi_ring_element *mhi_tre;
+ struct mhi_buf_info *buf_info;
+ int eot, eob, chain, bei;
+ int ret;
+
+ buf_ring = &mhi_chan->buf_ring;
+ tre_ring = &mhi_chan->tre_ring;
+
+ buf_info = buf_ring->wp;
+ WARN_ON(buf_info->used);
+ buf_info->pre_mapped = info->pre_mapped;
+ if (info->pre_mapped)
+ buf_info->p_addr = info->p_addr;
+ else
+ buf_info->v_addr = info->v_addr;
+ buf_info->cb_buf = info->cb_buf;
+ buf_info->wp = tre_ring->wp;
+ buf_info->dir = mhi_chan->dir;
+ buf_info->len = info->len;
+
+ if (!info->pre_mapped) {
+ ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
+ if (ret)
+ return ret;
+ }
+
+ eob = !!(flags & MHI_EOB);
+ eot = !!(flags & MHI_EOT);
+ chain = !!(flags & MHI_CHAIN);
+ bei = !!(mhi_chan->intmod);
+
+ mhi_tre = tre_ring->wp;
+ mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
+ mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(info->len);
+ mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain);
+
+ /* increment WP */
+ mhi_add_ring_element(mhi_cntrl, tre_ring);
+ mhi_add_ring_element(mhi_cntrl, buf_ring);
+
+ return 0;
+}
+
+int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
+ void *buf, size_t len, enum mhi_flags mflags)
+{
+ struct mhi_buf_info buf_info = { };
+
+ buf_info.v_addr = buf;
+ buf_info.cb_buf = buf;
+ buf_info.len = len;
+
+ return mhi_queue(mhi_dev, &buf_info, dir, mflags);
+}
+EXPORT_SYMBOL_GPL(mhi_queue_buf);
+
+bool mhi_queue_is_full(struct mhi_device *mhi_dev, enum dma_data_direction dir)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ?
+ mhi_dev->ul_chan : mhi_dev->dl_chan;
+ struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
+
+ return mhi_is_ring_full(mhi_cntrl, tre_ring);
+}
+EXPORT_SYMBOL_GPL(mhi_queue_is_full);
+
+int mhi_send_cmd(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan,
+ enum mhi_cmd_type cmd)
+{
+ struct mhi_ring_element *cmd_tre = NULL;
+ struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
+ struct mhi_ring *ring = &mhi_cmd->ring;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int chan = 0;
+
+ if (mhi_chan)
+ chan = mhi_chan->chan;
+
+ spin_lock_bh(&mhi_cmd->lock);
+ if (!get_nr_avail_ring_elements(mhi_cntrl, ring)) {
+ spin_unlock_bh(&mhi_cmd->lock);
+ return -ENOMEM;
+ }
+
+ /* prepare the cmd tre */
+ cmd_tre = ring->wp;
+ switch (cmd) {
+ case MHI_CMD_RESET_CHAN:
+ cmd_tre->ptr = MHI_TRE_CMD_RESET_PTR;
+ cmd_tre->dword[0] = MHI_TRE_CMD_RESET_DWORD0;
+ cmd_tre->dword[1] = MHI_TRE_CMD_RESET_DWORD1(chan);
+ break;
+ case MHI_CMD_STOP_CHAN:
+ cmd_tre->ptr = MHI_TRE_CMD_STOP_PTR;
+ cmd_tre->dword[0] = MHI_TRE_CMD_STOP_DWORD0;
+ cmd_tre->dword[1] = MHI_TRE_CMD_STOP_DWORD1(chan);
+ break;
+ case MHI_CMD_START_CHAN:
+ cmd_tre->ptr = MHI_TRE_CMD_START_PTR;
+ cmd_tre->dword[0] = MHI_TRE_CMD_START_DWORD0;
+ cmd_tre->dword[1] = MHI_TRE_CMD_START_DWORD1(chan);
+ break;
+ default:
+ dev_err(dev, "Command not supported\n");
+ break;
+ }
+
+ /* queue to hardware */
+ mhi_add_ring_element(mhi_cntrl, ring);
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
+ mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ spin_unlock_bh(&mhi_cmd->lock);
+
+ return 0;
+}
+
+static int mhi_update_channel_state(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan,
+ enum mhi_ch_state_type to_state)
+{
+ struct device *dev = &mhi_chan->mhi_dev->dev;
+ enum mhi_cmd_type cmd = MHI_CMD_NOP;
+ int ret;
+
+ dev_dbg(dev, "%d: Updating channel state to: %s\n", mhi_chan->chan,
+ TO_CH_STATE_TYPE_STR(to_state));
+
+ switch (to_state) {
+ case MHI_CH_STATE_TYPE_RESET:
+ write_lock_irq(&mhi_chan->lock);
+ if (mhi_chan->ch_state != MHI_CH_STATE_STOP &&
+ mhi_chan->ch_state != MHI_CH_STATE_ENABLED &&
+ mhi_chan->ch_state != MHI_CH_STATE_SUSPENDED) {
+ write_unlock_irq(&mhi_chan->lock);
+ return -EINVAL;
+ }
+ mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
+ write_unlock_irq(&mhi_chan->lock);
+
+ cmd = MHI_CMD_RESET_CHAN;
+ break;
+ case MHI_CH_STATE_TYPE_STOP:
+ if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
+ return -EINVAL;
+
+ cmd = MHI_CMD_STOP_CHAN;
+ break;
+ case MHI_CH_STATE_TYPE_START:
+ if (mhi_chan->ch_state != MHI_CH_STATE_STOP &&
+ mhi_chan->ch_state != MHI_CH_STATE_DISABLED)
+ return -EINVAL;
+
+ cmd = MHI_CMD_START_CHAN;
+ break;
+ default:
+ dev_err(dev, "%d: Channel state update to %s not allowed\n",
+ mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
+ return -EINVAL;
+ }
+
+ /* bring host and device out of suspended states */
+ ret = mhi_device_get_sync(mhi_cntrl->mhi_dev);
+ if (ret)
+ return ret;
+ mhi_cntrl->runtime_get(mhi_cntrl);
+
+ reinit_completion(&mhi_chan->completion);
+ ret = mhi_send_cmd(mhi_cntrl, mhi_chan, cmd);
+ if (ret) {
+ dev_err(dev, "%d: Failed to send %s channel command\n",
+ mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
+ goto exit_channel_update;
+ }
+
+ ret = wait_for_completion_timeout(&mhi_chan->completion,
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+ if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) {
+ dev_err(dev,
+ "%d: Failed to receive %s channel command completion\n",
+ mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
+ ret = -EIO;
+ goto exit_channel_update;
+ }
+
+ ret = 0;
+
+ if (to_state != MHI_CH_STATE_TYPE_RESET) {
+ write_lock_irq(&mhi_chan->lock);
+ mhi_chan->ch_state = (to_state == MHI_CH_STATE_TYPE_START) ?
+ MHI_CH_STATE_ENABLED : MHI_CH_STATE_STOP;
+ write_unlock_irq(&mhi_chan->lock);
+ }
+
+ dev_dbg(dev, "%d: Channel state change to %s successful\n",
+ mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
+
+exit_channel_update:
+ mhi_cntrl->runtime_put(mhi_cntrl);
+ mhi_device_put(mhi_cntrl->mhi_dev);
+
+ return ret;
+}
+
+static void mhi_unprepare_channel(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan)
+{
+ int ret;
+ struct device *dev = &mhi_chan->mhi_dev->dev;
+
+ mutex_lock(&mhi_chan->mutex);
+
+ if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
+ dev_dbg(dev, "Current EE: %s Required EE Mask: 0x%x\n",
+ TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask);
+ goto exit_unprepare_channel;
+ }
+
+ /* no more processing events for this channel */
+ ret = mhi_update_channel_state(mhi_cntrl, mhi_chan,
+ MHI_CH_STATE_TYPE_RESET);
+ if (ret)
+ dev_err(dev, "%d: Failed to reset channel, still resetting\n",
+ mhi_chan->chan);
+
+exit_unprepare_channel:
+ write_lock_irq(&mhi_chan->lock);
+ mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
+ write_unlock_irq(&mhi_chan->lock);
+
+ if (!mhi_chan->offload_ch) {
+ mhi_reset_chan(mhi_cntrl, mhi_chan);
+ mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
+ }
+ dev_dbg(dev, "%d: successfully reset\n", mhi_chan->chan);
+
+ mutex_unlock(&mhi_chan->mutex);
+}
+
+int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan, unsigned int flags)
+{
+ int ret = 0;
+ struct device *dev = &mhi_chan->mhi_dev->dev;
+
+ if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
+ dev_err(dev, "Current EE: %s Required EE Mask: 0x%x\n",
+ TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask);
+ return -ENOTCONN;
+ }
+
+ mutex_lock(&mhi_chan->mutex);
+
+ /* Check of client manages channel context for offload channels */
+ if (!mhi_chan->offload_ch) {
+ ret = mhi_init_chan_ctxt(mhi_cntrl, mhi_chan);
+ if (ret)
+ goto error_init_chan;
+ }
+
+ ret = mhi_update_channel_state(mhi_cntrl, mhi_chan,
+ MHI_CH_STATE_TYPE_START);
+ if (ret)
+ goto error_pm_state;
+
+ if (mhi_chan->dir == DMA_FROM_DEVICE)
+ mhi_chan->pre_alloc = !!(flags & MHI_CH_INBOUND_ALLOC_BUFS);
+
+ /* Pre-allocate buffer for xfer ring */
+ if (mhi_chan->pre_alloc) {
+ int nr_el = get_nr_avail_ring_elements(mhi_cntrl,
+ &mhi_chan->tre_ring);
+ size_t len = mhi_cntrl->buffer_len;
+
+ while (nr_el--) {
+ void *buf;
+ struct mhi_buf_info info = { };
+
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto error_pre_alloc;
+ }
+
+ /* Prepare transfer descriptors */
+ info.v_addr = buf;
+ info.cb_buf = buf;
+ info.len = len;
+ ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &info, MHI_EOT);
+ if (ret) {
+ kfree(buf);
+ goto error_pre_alloc;
+ }
+ }
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (MHI_DB_ACCESS_VALID(mhi_cntrl)) {
+ read_lock_irq(&mhi_chan->lock);
+ mhi_ring_chan_db(mhi_cntrl, mhi_chan);
+ read_unlock_irq(&mhi_chan->lock);
+ }
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ }
+
+ mutex_unlock(&mhi_chan->mutex);
+
+ return 0;
+
+error_pm_state:
+ if (!mhi_chan->offload_ch)
+ mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
+
+error_init_chan:
+ mutex_unlock(&mhi_chan->mutex);
+
+ return ret;
+
+error_pre_alloc:
+ mutex_unlock(&mhi_chan->mutex);
+ mhi_unprepare_channel(mhi_cntrl, mhi_chan);
+
+ return ret;
+}
+
+static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl,
+ struct mhi_event *mhi_event,
+ struct mhi_event_ctxt *er_ctxt,
+ int chan)
+
+{
+ struct mhi_ring_element *dev_rp, *local_rp;
+ struct mhi_ring *ev_ring;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ unsigned long flags;
+ dma_addr_t ptr;
+
+ dev_dbg(dev, "Marking all events for chan: %d as stale\n", chan);
+
+ ev_ring = &mhi_event->ring;
+
+ /* mark all stale events related to channel as STALE event */
+ spin_lock_irqsave(&mhi_event->lock, flags);
+
+ ptr = le64_to_cpu(er_ctxt->rp);
+ if (!is_valid_ring_ptr(ev_ring, ptr)) {
+ dev_err(&mhi_cntrl->mhi_dev->dev,
+ "Event ring rp points outside of the event ring\n");
+ dev_rp = ev_ring->rp;
+ } else {
+ dev_rp = mhi_to_virtual(ev_ring, ptr);
+ }
+
+ local_rp = ev_ring->rp;
+ while (dev_rp != local_rp) {
+ if (MHI_TRE_GET_EV_TYPE(local_rp) == MHI_PKT_TYPE_TX_EVENT &&
+ chan == MHI_TRE_GET_EV_CHID(local_rp))
+ local_rp->dword[1] = MHI_TRE_EV_DWORD1(chan,
+ MHI_PKT_TYPE_STALE_EVENT);
+ local_rp++;
+ if (local_rp == (ev_ring->base + ev_ring->len))
+ local_rp = ev_ring->base;
+ }
+
+ dev_dbg(dev, "Finished marking events as stale events\n");
+ spin_unlock_irqrestore(&mhi_event->lock, flags);
+}
+
+static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan)
+{
+ struct mhi_ring *buf_ring, *tre_ring;
+ struct mhi_result result;
+
+ /* Reset any pending buffers */
+ buf_ring = &mhi_chan->buf_ring;
+ tre_ring = &mhi_chan->tre_ring;
+ result.transaction_status = -ENOTCONN;
+ result.bytes_xferd = 0;
+ while (tre_ring->rp != tre_ring->wp) {
+ struct mhi_buf_info *buf_info = buf_ring->rp;
+
+ if (mhi_chan->dir == DMA_TO_DEVICE) {
+ atomic_dec(&mhi_cntrl->pending_pkts);
+ /* Release the reference got from mhi_queue() */
+ mhi_cntrl->runtime_put(mhi_cntrl);
+ }
+
+ if (!buf_info->pre_mapped)
+ mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
+
+ mhi_del_ring_element(mhi_cntrl, buf_ring);
+ mhi_del_ring_element(mhi_cntrl, tre_ring);
+
+ if (mhi_chan->pre_alloc) {
+ kfree(buf_info->cb_buf);
+ } else {
+ result.buf_addr = buf_info->cb_buf;
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+ }
+ }
+}
+
+void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan)
+{
+ struct mhi_event *mhi_event;
+ struct mhi_event_ctxt *er_ctxt;
+ int chan = mhi_chan->chan;
+
+ /* Nothing to reset, client doesn't queue buffers */
+ if (mhi_chan->offload_ch)
+ return;
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
+ er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index];
+
+ mhi_mark_stale_events(mhi_cntrl, mhi_event, er_ctxt, chan);
+
+ mhi_reset_data_chan(mhi_cntrl, mhi_chan);
+
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+}
+
+static int __mhi_prepare_for_transfer(struct mhi_device *mhi_dev, unsigned int flags)
+{
+ int ret, dir;
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_chan *mhi_chan;
+
+ for (dir = 0; dir < 2; dir++) {
+ mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
+ if (!mhi_chan)
+ continue;
+
+ ret = mhi_prepare_channel(mhi_cntrl, mhi_chan, flags);
+ if (ret)
+ goto error_open_chan;
+ }
+
+ return 0;
+
+error_open_chan:
+ for (--dir; dir >= 0; dir--) {
+ mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
+ if (!mhi_chan)
+ continue;
+
+ mhi_unprepare_channel(mhi_cntrl, mhi_chan);
+ }
+
+ return ret;
+}
+
+int mhi_prepare_for_transfer(struct mhi_device *mhi_dev)
+{
+ return __mhi_prepare_for_transfer(mhi_dev, 0);
+}
+EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer);
+
+int mhi_prepare_for_transfer_autoqueue(struct mhi_device *mhi_dev)
+{
+ return __mhi_prepare_for_transfer(mhi_dev, MHI_CH_INBOUND_ALLOC_BUFS);
+}
+EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer_autoqueue);
+
+void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_chan *mhi_chan;
+ int dir;
+
+ for (dir = 0; dir < 2; dir++) {
+ mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
+ if (!mhi_chan)
+ continue;
+
+ mhi_unprepare_channel(mhi_cntrl, mhi_chan);
+ }
+}
+EXPORT_SYMBOL_GPL(mhi_unprepare_from_transfer);
+
+int mhi_poll(struct mhi_device *mhi_dev, u32 budget)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_chan *mhi_chan = mhi_dev->dl_chan;
+ struct mhi_event *mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
+ int ret;
+
+ spin_lock_bh(&mhi_event->lock);
+ ret = mhi_event->process_event(mhi_cntrl, mhi_event, budget);
+ spin_unlock_bh(&mhi_event->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_poll);
diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c
new file mode 100644
index 000000000000..caa4ce28cf9e
--- /dev/null
+++ b/drivers/bus/mhi/host/pci_generic.c
@@ -0,0 +1,1226 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MHI PCI driver - MHI over PCI controller driver
+ *
+ * This module is a generic driver for registering MHI-over-PCI devices,
+ * such as PCIe QCOM modems.
+ *
+ * Copyright (C) 2020 Linaro Ltd <loic.poulain@linaro.org>
+ */
+
+#include <linux/aer.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/mhi.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+
+#define MHI_PCI_DEFAULT_BAR_NUM 0
+
+#define MHI_POST_RESET_DELAY_MS 2000
+
+#define HEALTH_CHECK_PERIOD (HZ * 2)
+
+/**
+ * struct mhi_pci_dev_info - MHI PCI device specific information
+ * @config: MHI controller configuration
+ * @name: name of the PCI module
+ * @fw: firmware path (if any)
+ * @edl: emergency download mode firmware path (if any)
+ * @bar_num: PCI base address register to use for MHI MMIO register space
+ * @dma_data_width: DMA transfer word size (32 or 64 bits)
+ * @mru_default: default MRU size for MBIM network packets
+ * @sideband_wake: Devices using dedicated sideband GPIO for wakeup instead
+ * of inband wake support (such as sdx24)
+ */
+struct mhi_pci_dev_info {
+ const struct mhi_controller_config *config;
+ const char *name;
+ const char *fw;
+ const char *edl;
+ unsigned int bar_num;
+ unsigned int dma_data_width;
+ unsigned int mru_default;
+ bool sideband_wake;
+};
+
+#define MHI_CHANNEL_CONFIG_UL(ch_num, ch_name, el_count, ev_ring) \
+ { \
+ .num = ch_num, \
+ .name = ch_name, \
+ .num_elements = el_count, \
+ .event_ring = ev_ring, \
+ .dir = DMA_TO_DEVICE, \
+ .ee_mask = BIT(MHI_EE_AMSS), \
+ .pollcfg = 0, \
+ .doorbell = MHI_DB_BRST_DISABLE, \
+ .lpm_notify = false, \
+ .offload_channel = false, \
+ .doorbell_mode_switch = false, \
+ } \
+
+#define MHI_CHANNEL_CONFIG_DL(ch_num, ch_name, el_count, ev_ring) \
+ { \
+ .num = ch_num, \
+ .name = ch_name, \
+ .num_elements = el_count, \
+ .event_ring = ev_ring, \
+ .dir = DMA_FROM_DEVICE, \
+ .ee_mask = BIT(MHI_EE_AMSS), \
+ .pollcfg = 0, \
+ .doorbell = MHI_DB_BRST_DISABLE, \
+ .lpm_notify = false, \
+ .offload_channel = false, \
+ .doorbell_mode_switch = false, \
+ }
+
+#define MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(ch_num, ch_name, el_count, ev_ring) \
+ { \
+ .num = ch_num, \
+ .name = ch_name, \
+ .num_elements = el_count, \
+ .event_ring = ev_ring, \
+ .dir = DMA_FROM_DEVICE, \
+ .ee_mask = BIT(MHI_EE_AMSS), \
+ .pollcfg = 0, \
+ .doorbell = MHI_DB_BRST_DISABLE, \
+ .lpm_notify = false, \
+ .offload_channel = false, \
+ .doorbell_mode_switch = false, \
+ .auto_queue = true, \
+ }
+
+#define MHI_EVENT_CONFIG_CTRL(ev_ring, el_count) \
+ { \
+ .num_elements = el_count, \
+ .irq_moderation_ms = 0, \
+ .irq = (ev_ring) + 1, \
+ .priority = 1, \
+ .mode = MHI_DB_BRST_DISABLE, \
+ .data_type = MHI_ER_CTRL, \
+ .hardware_event = false, \
+ .client_managed = false, \
+ .offload_channel = false, \
+ }
+
+#define MHI_CHANNEL_CONFIG_HW_UL(ch_num, ch_name, el_count, ev_ring) \
+ { \
+ .num = ch_num, \
+ .name = ch_name, \
+ .num_elements = el_count, \
+ .event_ring = ev_ring, \
+ .dir = DMA_TO_DEVICE, \
+ .ee_mask = BIT(MHI_EE_AMSS), \
+ .pollcfg = 0, \
+ .doorbell = MHI_DB_BRST_ENABLE, \
+ .lpm_notify = false, \
+ .offload_channel = false, \
+ .doorbell_mode_switch = true, \
+ } \
+
+#define MHI_CHANNEL_CONFIG_HW_DL(ch_num, ch_name, el_count, ev_ring) \
+ { \
+ .num = ch_num, \
+ .name = ch_name, \
+ .num_elements = el_count, \
+ .event_ring = ev_ring, \
+ .dir = DMA_FROM_DEVICE, \
+ .ee_mask = BIT(MHI_EE_AMSS), \
+ .pollcfg = 0, \
+ .doorbell = MHI_DB_BRST_ENABLE, \
+ .lpm_notify = false, \
+ .offload_channel = false, \
+ .doorbell_mode_switch = true, \
+ }
+
+#define MHI_CHANNEL_CONFIG_UL_SBL(ch_num, ch_name, el_count, ev_ring) \
+ { \
+ .num = ch_num, \
+ .name = ch_name, \
+ .num_elements = el_count, \
+ .event_ring = ev_ring, \
+ .dir = DMA_TO_DEVICE, \
+ .ee_mask = BIT(MHI_EE_SBL), \
+ .pollcfg = 0, \
+ .doorbell = MHI_DB_BRST_DISABLE, \
+ .lpm_notify = false, \
+ .offload_channel = false, \
+ .doorbell_mode_switch = false, \
+ } \
+
+#define MHI_CHANNEL_CONFIG_DL_SBL(ch_num, ch_name, el_count, ev_ring) \
+ { \
+ .num = ch_num, \
+ .name = ch_name, \
+ .num_elements = el_count, \
+ .event_ring = ev_ring, \
+ .dir = DMA_FROM_DEVICE, \
+ .ee_mask = BIT(MHI_EE_SBL), \
+ .pollcfg = 0, \
+ .doorbell = MHI_DB_BRST_DISABLE, \
+ .lpm_notify = false, \
+ .offload_channel = false, \
+ .doorbell_mode_switch = false, \
+ }
+
+#define MHI_CHANNEL_CONFIG_UL_FP(ch_num, ch_name, el_count, ev_ring) \
+ { \
+ .num = ch_num, \
+ .name = ch_name, \
+ .num_elements = el_count, \
+ .event_ring = ev_ring, \
+ .dir = DMA_TO_DEVICE, \
+ .ee_mask = BIT(MHI_EE_FP), \
+ .pollcfg = 0, \
+ .doorbell = MHI_DB_BRST_DISABLE, \
+ .lpm_notify = false, \
+ .offload_channel = false, \
+ .doorbell_mode_switch = false, \
+ } \
+
+#define MHI_CHANNEL_CONFIG_DL_FP(ch_num, ch_name, el_count, ev_ring) \
+ { \
+ .num = ch_num, \
+ .name = ch_name, \
+ .num_elements = el_count, \
+ .event_ring = ev_ring, \
+ .dir = DMA_FROM_DEVICE, \
+ .ee_mask = BIT(MHI_EE_FP), \
+ .pollcfg = 0, \
+ .doorbell = MHI_DB_BRST_DISABLE, \
+ .lpm_notify = false, \
+ .offload_channel = false, \
+ .doorbell_mode_switch = false, \
+ }
+
+#define MHI_EVENT_CONFIG_DATA(ev_ring, el_count) \
+ { \
+ .num_elements = el_count, \
+ .irq_moderation_ms = 5, \
+ .irq = (ev_ring) + 1, \
+ .priority = 1, \
+ .mode = MHI_DB_BRST_DISABLE, \
+ .data_type = MHI_ER_DATA, \
+ .hardware_event = false, \
+ .client_managed = false, \
+ .offload_channel = false, \
+ }
+
+#define MHI_EVENT_CONFIG_HW_DATA(ev_ring, el_count, ch_num) \
+ { \
+ .num_elements = el_count, \
+ .irq_moderation_ms = 1, \
+ .irq = (ev_ring) + 1, \
+ .priority = 1, \
+ .mode = MHI_DB_BRST_DISABLE, \
+ .data_type = MHI_ER_DATA, \
+ .hardware_event = true, \
+ .client_managed = false, \
+ .offload_channel = false, \
+ .channel = ch_num, \
+ }
+
+static const struct mhi_channel_config modem_qcom_v1_mhi_channels[] = {
+ MHI_CHANNEL_CONFIG_UL(4, "DIAG", 16, 1),
+ MHI_CHANNEL_CONFIG_DL(5, "DIAG", 16, 1),
+ MHI_CHANNEL_CONFIG_UL(12, "MBIM", 4, 0),
+ MHI_CHANNEL_CONFIG_DL(13, "MBIM", 4, 0),
+ MHI_CHANNEL_CONFIG_UL(14, "QMI", 4, 0),
+ MHI_CHANNEL_CONFIG_DL(15, "QMI", 4, 0),
+ MHI_CHANNEL_CONFIG_UL(20, "IPCR", 8, 0),
+ MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 8, 0),
+ MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0),
+ MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0),
+ MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 2),
+ MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 3),
+};
+
+static struct mhi_event_config modem_qcom_v1_mhi_events[] = {
+ /* first ring is control+data ring */
+ MHI_EVENT_CONFIG_CTRL(0, 64),
+ /* DIAG dedicated event ring */
+ MHI_EVENT_CONFIG_DATA(1, 128),
+ /* Hardware channels request dedicated hardware event rings */
+ MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
+ MHI_EVENT_CONFIG_HW_DATA(3, 2048, 101)
+};
+
+static const struct mhi_controller_config modem_qcom_v1_mhiv_config = {
+ .max_channels = 128,
+ .timeout_ms = 8000,
+ .num_channels = ARRAY_SIZE(modem_qcom_v1_mhi_channels),
+ .ch_cfg = modem_qcom_v1_mhi_channels,
+ .num_events = ARRAY_SIZE(modem_qcom_v1_mhi_events),
+ .event_cfg = modem_qcom_v1_mhi_events,
+};
+
+static const struct mhi_pci_dev_info mhi_qcom_sdx65_info = {
+ .name = "qcom-sdx65m",
+ .fw = "qcom/sdx65m/xbl.elf",
+ .edl = "qcom/sdx65m/edl.mbn",
+ .config = &modem_qcom_v1_mhiv_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .sideband_wake = false,
+};
+
+static const struct mhi_pci_dev_info mhi_qcom_sdx55_info = {
+ .name = "qcom-sdx55m",
+ .fw = "qcom/sdx55m/sbl1.mbn",
+ .edl = "qcom/sdx55m/edl.mbn",
+ .config = &modem_qcom_v1_mhiv_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = false,
+};
+
+static const struct mhi_pci_dev_info mhi_qcom_sdx24_info = {
+ .name = "qcom-sdx24",
+ .edl = "qcom/prog_firehose_sdx24.mbn",
+ .config = &modem_qcom_v1_mhiv_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .sideband_wake = true,
+};
+
+static const struct mhi_channel_config mhi_quectel_em1xx_channels[] = {
+ MHI_CHANNEL_CONFIG_UL(0, "NMEA", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(1, "NMEA", 32, 0),
+ MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0),
+ MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 1),
+ MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 1),
+ MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
+ /* The EDL firmware is a flash-programmer exposing firehose protocol */
+ MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0),
+ MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0),
+ MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
+ MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
+};
+
+static struct mhi_event_config mhi_quectel_em1xx_events[] = {
+ MHI_EVENT_CONFIG_CTRL(0, 128),
+ MHI_EVENT_CONFIG_DATA(1, 128),
+ MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
+ MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101)
+};
+
+static const struct mhi_controller_config modem_quectel_em1xx_config = {
+ .max_channels = 128,
+ .timeout_ms = 20000,
+ .num_channels = ARRAY_SIZE(mhi_quectel_em1xx_channels),
+ .ch_cfg = mhi_quectel_em1xx_channels,
+ .num_events = ARRAY_SIZE(mhi_quectel_em1xx_events),
+ .event_cfg = mhi_quectel_em1xx_events,
+};
+
+static const struct mhi_pci_dev_info mhi_quectel_em1xx_info = {
+ .name = "quectel-em1xx",
+ .edl = "qcom/prog_firehose_sdx24.mbn",
+ .config = &modem_quectel_em1xx_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = true,
+};
+
+static const struct mhi_channel_config mhi_foxconn_sdx55_channels[] = {
+ MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 1),
+ MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 1),
+ MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
+ MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
+ MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
+};
+
+static struct mhi_event_config mhi_foxconn_sdx55_events[] = {
+ MHI_EVENT_CONFIG_CTRL(0, 128),
+ MHI_EVENT_CONFIG_DATA(1, 128),
+ MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
+ MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101)
+};
+
+static const struct mhi_controller_config modem_foxconn_sdx55_config = {
+ .max_channels = 128,
+ .timeout_ms = 20000,
+ .num_channels = ARRAY_SIZE(mhi_foxconn_sdx55_channels),
+ .ch_cfg = mhi_foxconn_sdx55_channels,
+ .num_events = ARRAY_SIZE(mhi_foxconn_sdx55_events),
+ .event_cfg = mhi_foxconn_sdx55_events,
+};
+
+static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = {
+ .name = "foxconn-sdx55",
+ .fw = "qcom/sdx55m/sbl1.mbn",
+ .edl = "qcom/sdx55m/edl.mbn",
+ .config = &modem_foxconn_sdx55_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = false,
+};
+
+static const struct mhi_pci_dev_info mhi_foxconn_sdx65_info = {
+ .name = "foxconn-sdx65",
+ .config = &modem_foxconn_sdx55_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = false,
+};
+
+static const struct mhi_channel_config mhi_mv3x_channels[] = {
+ MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 64, 0),
+ MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 64, 0),
+ /* MBIM Control Channel */
+ MHI_CHANNEL_CONFIG_UL(12, "MBIM", 64, 0),
+ MHI_CHANNEL_CONFIG_DL(13, "MBIM", 64, 0),
+ /* MBIM Data Channel */
+ MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 512, 2),
+ MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 512, 3),
+};
+
+static struct mhi_event_config mhi_mv3x_events[] = {
+ MHI_EVENT_CONFIG_CTRL(0, 256),
+ MHI_EVENT_CONFIG_DATA(1, 256),
+ MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
+ MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101),
+};
+
+static const struct mhi_controller_config modem_mv3x_config = {
+ .max_channels = 128,
+ .timeout_ms = 20000,
+ .num_channels = ARRAY_SIZE(mhi_mv3x_channels),
+ .ch_cfg = mhi_mv3x_channels,
+ .num_events = ARRAY_SIZE(mhi_mv3x_events),
+ .event_cfg = mhi_mv3x_events,
+};
+
+static const struct mhi_pci_dev_info mhi_mv31_info = {
+ .name = "cinterion-mv31",
+ .config = &modem_mv3x_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+};
+
+static const struct mhi_pci_dev_info mhi_mv32_info = {
+ .name = "cinterion-mv32",
+ .config = &modem_mv3x_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+};
+
+static const struct mhi_channel_config mhi_sierra_em919x_channels[] = {
+ MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0),
+ MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 256, 0),
+ MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(12, "MBIM", 128, 0),
+ MHI_CHANNEL_CONFIG_DL(13, "MBIM", 128, 0),
+ MHI_CHANNEL_CONFIG_UL(14, "QMI", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(15, "QMI", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
+ MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 512, 1),
+ MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 512, 2),
+};
+
+static struct mhi_event_config modem_sierra_em919x_mhi_events[] = {
+ /* first ring is control+data and DIAG ring */
+ MHI_EVENT_CONFIG_CTRL(0, 2048),
+ /* Hardware channels request dedicated hardware event rings */
+ MHI_EVENT_CONFIG_HW_DATA(1, 2048, 100),
+ MHI_EVENT_CONFIG_HW_DATA(2, 2048, 101)
+};
+
+static const struct mhi_controller_config modem_sierra_em919x_config = {
+ .max_channels = 128,
+ .timeout_ms = 24000,
+ .num_channels = ARRAY_SIZE(mhi_sierra_em919x_channels),
+ .ch_cfg = mhi_sierra_em919x_channels,
+ .num_events = ARRAY_SIZE(modem_sierra_em919x_mhi_events),
+ .event_cfg = modem_sierra_em919x_mhi_events,
+};
+
+static const struct mhi_pci_dev_info mhi_sierra_em919x_info = {
+ .name = "sierra-em919x",
+ .config = &modem_sierra_em919x_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .sideband_wake = false,
+};
+
+static const struct mhi_channel_config mhi_telit_fn980_hw_v1_channels[] = {
+ MHI_CHANNEL_CONFIG_UL(14, "QMI", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(15, "QMI", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(20, "IPCR", 16, 0),
+ MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 16, 0),
+ MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 1),
+ MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 2),
+};
+
+static struct mhi_event_config mhi_telit_fn980_hw_v1_events[] = {
+ MHI_EVENT_CONFIG_CTRL(0, 128),
+ MHI_EVENT_CONFIG_HW_DATA(1, 1024, 100),
+ MHI_EVENT_CONFIG_HW_DATA(2, 2048, 101)
+};
+
+static struct mhi_controller_config modem_telit_fn980_hw_v1_config = {
+ .max_channels = 128,
+ .timeout_ms = 20000,
+ .num_channels = ARRAY_SIZE(mhi_telit_fn980_hw_v1_channels),
+ .ch_cfg = mhi_telit_fn980_hw_v1_channels,
+ .num_events = ARRAY_SIZE(mhi_telit_fn980_hw_v1_events),
+ .event_cfg = mhi_telit_fn980_hw_v1_events,
+};
+
+static const struct mhi_pci_dev_info mhi_telit_fn980_hw_v1_info = {
+ .name = "telit-fn980-hwv1",
+ .fw = "qcom/sdx55m/sbl1.mbn",
+ .edl = "qcom/sdx55m/edl.mbn",
+ .config = &modem_telit_fn980_hw_v1_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = false,
+};
+
+static const struct mhi_channel_config mhi_telit_fn990_channels[] = {
+ MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0),
+ MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(4, "DIAG", 64, 1),
+ MHI_CHANNEL_CONFIG_DL(5, "DIAG", 64, 1),
+ MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(92, "DUN2", 32, 1),
+ MHI_CHANNEL_CONFIG_DL(93, "DUN2", 32, 1),
+ MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
+ MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
+};
+
+static struct mhi_event_config mhi_telit_fn990_events[] = {
+ MHI_EVENT_CONFIG_CTRL(0, 128),
+ MHI_EVENT_CONFIG_DATA(1, 128),
+ MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
+ MHI_EVENT_CONFIG_HW_DATA(3, 2048, 101)
+};
+
+static const struct mhi_controller_config modem_telit_fn990_config = {
+ .max_channels = 128,
+ .timeout_ms = 20000,
+ .num_channels = ARRAY_SIZE(mhi_telit_fn990_channels),
+ .ch_cfg = mhi_telit_fn990_channels,
+ .num_events = ARRAY_SIZE(mhi_telit_fn990_events),
+ .event_cfg = mhi_telit_fn990_events,
+};
+
+static const struct mhi_pci_dev_info mhi_telit_fn990_info = {
+ .name = "telit-fn990",
+ .config = &modem_telit_fn990_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .sideband_wake = false,
+ .mru_default = 32768,
+};
+
+/* Keep the list sorted based on the PID. New VID should be added as the last entry */
+static const struct pci_device_id mhi_pci_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304),
+ .driver_data = (kernel_ulong_t) &mhi_qcom_sdx24_info },
+ /* EM919x (sdx55), use the same vid:pid as qcom-sdx55m */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x18d7, 0x0200),
+ .driver_data = (kernel_ulong_t) &mhi_sierra_em919x_info },
+ /* Telit FN980 hardware revision v1 */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x1C5D, 0x2000),
+ .driver_data = (kernel_ulong_t) &mhi_telit_fn980_hw_v1_info },
+ { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0306),
+ .driver_data = (kernel_ulong_t) &mhi_qcom_sdx55_info },
+ /* Telit FN990 */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2010),
+ .driver_data = (kernel_ulong_t) &mhi_telit_fn990_info },
+ { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308),
+ .driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info },
+ { PCI_DEVICE(0x1eac, 0x1001), /* EM120R-GL (sdx24) */
+ .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
+ { PCI_DEVICE(0x1eac, 0x1002), /* EM160R-GL (sdx24) */
+ .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
+ { PCI_DEVICE(0x1eac, 0x2001), /* EM120R-GL for FCCL (sdx24) */
+ .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
+ /* T99W175 (sdx55), Both for eSIM and Non-eSIM */
+ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0ab),
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
+ /* DW5930e (sdx55), With eSIM, It's also T99W175 */
+ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b0),
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
+ /* DW5930e (sdx55), Non-eSIM, It's also T99W175 */
+ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b1),
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
+ /* T99W175 (sdx55), Based on Qualcomm new baseline */
+ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0bf),
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
+ /* T99W175 (sdx55) */
+ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0c3),
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
+ /* T99W368 (sdx65) */
+ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0d8),
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
+ /* T99W373 (sdx62) */
+ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0d9),
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
+ /* MV31-W (Cinterion) */
+ { PCI_DEVICE(0x1269, 0x00b3),
+ .driver_data = (kernel_ulong_t) &mhi_mv31_info },
+ /* MV31-W (Cinterion), based on new baseline */
+ { PCI_DEVICE(0x1269, 0x00b4),
+ .driver_data = (kernel_ulong_t) &mhi_mv31_info },
+ /* MV32-WA (Cinterion) */
+ { PCI_DEVICE(0x1269, 0x00ba),
+ .driver_data = (kernel_ulong_t) &mhi_mv32_info },
+ /* MV32-WB (Cinterion) */
+ { PCI_DEVICE(0x1269, 0x00bb),
+ .driver_data = (kernel_ulong_t) &mhi_mv32_info },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, mhi_pci_id_table);
+
+enum mhi_pci_device_status {
+ MHI_PCI_DEV_STARTED,
+ MHI_PCI_DEV_SUSPENDED,
+};
+
+struct mhi_pci_device {
+ struct mhi_controller mhi_cntrl;
+ struct pci_saved_state *pci_state;
+ struct work_struct recovery_work;
+ struct timer_list health_check_timer;
+ unsigned long status;
+};
+
+static int mhi_pci_read_reg(struct mhi_controller *mhi_cntrl,
+ void __iomem *addr, u32 *out)
+{
+ *out = readl(addr);
+ return 0;
+}
+
+static void mhi_pci_write_reg(struct mhi_controller *mhi_cntrl,
+ void __iomem *addr, u32 val)
+{
+ writel(val, addr);
+}
+
+static void mhi_pci_status_cb(struct mhi_controller *mhi_cntrl,
+ enum mhi_callback cb)
+{
+ struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
+
+ /* Nothing to do for now */
+ switch (cb) {
+ case MHI_CB_FATAL_ERROR:
+ case MHI_CB_SYS_ERROR:
+ dev_warn(&pdev->dev, "firmware crashed (%u)\n", cb);
+ pm_runtime_forbid(&pdev->dev);
+ break;
+ case MHI_CB_EE_MISSION_MODE:
+ pm_runtime_allow(&pdev->dev);
+ break;
+ default:
+ break;
+ }
+}
+
+static void mhi_pci_wake_get_nop(struct mhi_controller *mhi_cntrl, bool force)
+{
+ /* no-op */
+}
+
+static void mhi_pci_wake_put_nop(struct mhi_controller *mhi_cntrl, bool override)
+{
+ /* no-op */
+}
+
+static void mhi_pci_wake_toggle_nop(struct mhi_controller *mhi_cntrl)
+{
+ /* no-op */
+}
+
+static bool mhi_pci_is_alive(struct mhi_controller *mhi_cntrl)
+{
+ struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
+ u16 vendor = 0;
+
+ if (pci_read_config_word(pdev, PCI_VENDOR_ID, &vendor))
+ return false;
+
+ if (vendor == (u16) ~0 || vendor == 0)
+ return false;
+
+ return true;
+}
+
+static int mhi_pci_claim(struct mhi_controller *mhi_cntrl,
+ unsigned int bar_num, u64 dma_mask)
+{
+ struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
+ int err;
+
+ err = pci_assign_resource(pdev, bar_num);
+ if (err)
+ return err;
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable pci device: %d\n", err);
+ return err;
+ }
+
+ err = pcim_iomap_regions(pdev, 1 << bar_num, pci_name(pdev));
+ if (err) {
+ dev_err(&pdev->dev, "failed to map pci region: %d\n", err);
+ return err;
+ }
+ mhi_cntrl->regs = pcim_iomap_table(pdev)[bar_num];
+ mhi_cntrl->reg_len = pci_resource_len(pdev, bar_num);
+
+ err = dma_set_mask_and_coherent(&pdev->dev, dma_mask);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot set proper DMA mask\n");
+ return err;
+ }
+
+ pci_set_master(pdev);
+
+ return 0;
+}
+
+static int mhi_pci_get_irqs(struct mhi_controller *mhi_cntrl,
+ const struct mhi_controller_config *mhi_cntrl_config)
+{
+ struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
+ int nr_vectors, i;
+ int *irq;
+
+ /*
+ * Alloc one MSI vector for BHI + one vector per event ring, ideally...
+ * No explicit pci_free_irq_vectors required, done by pcim_release.
+ */
+ mhi_cntrl->nr_irqs = 1 + mhi_cntrl_config->num_events;
+
+ nr_vectors = pci_alloc_irq_vectors(pdev, 1, mhi_cntrl->nr_irqs, PCI_IRQ_MSI);
+ if (nr_vectors < 0) {
+ dev_err(&pdev->dev, "Error allocating MSI vectors %d\n",
+ nr_vectors);
+ return nr_vectors;
+ }
+
+ if (nr_vectors < mhi_cntrl->nr_irqs) {
+ dev_warn(&pdev->dev, "using shared MSI\n");
+
+ /* Patch msi vectors, use only one (shared) */
+ for (i = 0; i < mhi_cntrl_config->num_events; i++)
+ mhi_cntrl_config->event_cfg[i].irq = 0;
+ mhi_cntrl->nr_irqs = 1;
+ }
+
+ irq = devm_kcalloc(&pdev->dev, mhi_cntrl->nr_irqs, sizeof(int), GFP_KERNEL);
+ if (!irq)
+ return -ENOMEM;
+
+ for (i = 0; i < mhi_cntrl->nr_irqs; i++) {
+ int vector = i >= nr_vectors ? (nr_vectors - 1) : i;
+
+ irq[i] = pci_irq_vector(pdev, vector);
+ }
+
+ mhi_cntrl->irq = irq;
+
+ return 0;
+}
+
+static int mhi_pci_runtime_get(struct mhi_controller *mhi_cntrl)
+{
+ /* The runtime_get() MHI callback means:
+ * Do whatever is requested to leave M3.
+ */
+ return pm_runtime_get(mhi_cntrl->cntrl_dev);
+}
+
+static void mhi_pci_runtime_put(struct mhi_controller *mhi_cntrl)
+{
+ /* The runtime_put() MHI callback means:
+ * Device can be moved in M3 state.
+ */
+ pm_runtime_mark_last_busy(mhi_cntrl->cntrl_dev);
+ pm_runtime_put(mhi_cntrl->cntrl_dev);
+}
+
+static void mhi_pci_recovery_work(struct work_struct *work)
+{
+ struct mhi_pci_device *mhi_pdev = container_of(work, struct mhi_pci_device,
+ recovery_work);
+ struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
+ struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
+ int err;
+
+ dev_warn(&pdev->dev, "device recovery started\n");
+
+ del_timer(&mhi_pdev->health_check_timer);
+ pm_runtime_forbid(&pdev->dev);
+
+ /* Clean up MHI state */
+ if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
+ mhi_power_down(mhi_cntrl, false);
+ mhi_unprepare_after_power_down(mhi_cntrl);
+ }
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_load_saved_state(pdev, mhi_pdev->pci_state);
+ pci_restore_state(pdev);
+
+ if (!mhi_pci_is_alive(mhi_cntrl))
+ goto err_try_reset;
+
+ err = mhi_prepare_for_power_up(mhi_cntrl);
+ if (err)
+ goto err_try_reset;
+
+ err = mhi_sync_power_up(mhi_cntrl);
+ if (err)
+ goto err_unprepare;
+
+ dev_dbg(&pdev->dev, "Recovery completed\n");
+
+ set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
+ mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
+ return;
+
+err_unprepare:
+ mhi_unprepare_after_power_down(mhi_cntrl);
+err_try_reset:
+ if (pci_reset_function(pdev))
+ dev_err(&pdev->dev, "Recovery failed\n");
+}
+
+static void health_check(struct timer_list *t)
+{
+ struct mhi_pci_device *mhi_pdev = from_timer(mhi_pdev, t, health_check_timer);
+ struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
+
+ if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
+ test_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status))
+ return;
+
+ if (!mhi_pci_is_alive(mhi_cntrl)) {
+ dev_err(mhi_cntrl->cntrl_dev, "Device died\n");
+ queue_work(system_long_wq, &mhi_pdev->recovery_work);
+ return;
+ }
+
+ /* reschedule in two seconds */
+ mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
+}
+
+static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ const struct mhi_pci_dev_info *info = (struct mhi_pci_dev_info *) id->driver_data;
+ const struct mhi_controller_config *mhi_cntrl_config;
+ struct mhi_pci_device *mhi_pdev;
+ struct mhi_controller *mhi_cntrl;
+ int err;
+
+ dev_info(&pdev->dev, "MHI PCI device found: %s\n", info->name);
+
+ /* mhi_pdev.mhi_cntrl must be zero-initialized */
+ mhi_pdev = devm_kzalloc(&pdev->dev, sizeof(*mhi_pdev), GFP_KERNEL);
+ if (!mhi_pdev)
+ return -ENOMEM;
+
+ INIT_WORK(&mhi_pdev->recovery_work, mhi_pci_recovery_work);
+ timer_setup(&mhi_pdev->health_check_timer, health_check, 0);
+
+ mhi_cntrl_config = info->config;
+ mhi_cntrl = &mhi_pdev->mhi_cntrl;
+
+ mhi_cntrl->cntrl_dev = &pdev->dev;
+ mhi_cntrl->iova_start = 0;
+ mhi_cntrl->iova_stop = (dma_addr_t)DMA_BIT_MASK(info->dma_data_width);
+ mhi_cntrl->fw_image = info->fw;
+ mhi_cntrl->edl_image = info->edl;
+
+ mhi_cntrl->read_reg = mhi_pci_read_reg;
+ mhi_cntrl->write_reg = mhi_pci_write_reg;
+ mhi_cntrl->status_cb = mhi_pci_status_cb;
+ mhi_cntrl->runtime_get = mhi_pci_runtime_get;
+ mhi_cntrl->runtime_put = mhi_pci_runtime_put;
+ mhi_cntrl->mru = info->mru_default;
+
+ if (info->sideband_wake) {
+ mhi_cntrl->wake_get = mhi_pci_wake_get_nop;
+ mhi_cntrl->wake_put = mhi_pci_wake_put_nop;
+ mhi_cntrl->wake_toggle = mhi_pci_wake_toggle_nop;
+ }
+
+ err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(info->dma_data_width));
+ if (err)
+ return err;
+
+ err = mhi_pci_get_irqs(mhi_cntrl, mhi_cntrl_config);
+ if (err)
+ return err;
+
+ pci_set_drvdata(pdev, mhi_pdev);
+
+ /* Have stored pci confspace at hand for restore in sudden PCI error.
+ * cache the state locally and discard the PCI core one.
+ */
+ pci_save_state(pdev);
+ mhi_pdev->pci_state = pci_store_saved_state(pdev);
+ pci_load_saved_state(pdev, NULL);
+
+ pci_enable_pcie_error_reporting(pdev);
+
+ err = mhi_register_controller(mhi_cntrl, mhi_cntrl_config);
+ if (err)
+ goto err_disable_reporting;
+
+ /* MHI bus does not power up the controller by default */
+ err = mhi_prepare_for_power_up(mhi_cntrl);
+ if (err) {
+ dev_err(&pdev->dev, "failed to prepare MHI controller\n");
+ goto err_unregister;
+ }
+
+ err = mhi_sync_power_up(mhi_cntrl);
+ if (err) {
+ dev_err(&pdev->dev, "failed to power up MHI controller\n");
+ goto err_unprepare;
+ }
+
+ set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
+
+ /* start health check */
+ mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
+
+ /* Only allow runtime-suspend if PME capable (for wakeup) */
+ if (pci_pme_capable(pdev, PCI_D3hot)) {
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 2000);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+ }
+
+ return 0;
+
+err_unprepare:
+ mhi_unprepare_after_power_down(mhi_cntrl);
+err_unregister:
+ mhi_unregister_controller(mhi_cntrl);
+err_disable_reporting:
+ pci_disable_pcie_error_reporting(pdev);
+
+ return err;
+}
+
+static void mhi_pci_remove(struct pci_dev *pdev)
+{
+ struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
+ struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
+
+ del_timer_sync(&mhi_pdev->health_check_timer);
+ cancel_work_sync(&mhi_pdev->recovery_work);
+
+ if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
+ mhi_power_down(mhi_cntrl, true);
+ mhi_unprepare_after_power_down(mhi_cntrl);
+ }
+
+ /* balancing probe put_noidle */
+ if (pci_pme_capable(pdev, PCI_D3hot))
+ pm_runtime_get_noresume(&pdev->dev);
+
+ mhi_unregister_controller(mhi_cntrl);
+ pci_disable_pcie_error_reporting(pdev);
+}
+
+static void mhi_pci_shutdown(struct pci_dev *pdev)
+{
+ mhi_pci_remove(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+}
+
+static void mhi_pci_reset_prepare(struct pci_dev *pdev)
+{
+ struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
+ struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
+
+ dev_info(&pdev->dev, "reset\n");
+
+ del_timer(&mhi_pdev->health_check_timer);
+
+ /* Clean up MHI state */
+ if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
+ mhi_power_down(mhi_cntrl, false);
+ mhi_unprepare_after_power_down(mhi_cntrl);
+ }
+
+ /* cause internal device reset */
+ mhi_soc_reset(mhi_cntrl);
+
+ /* Be sure device reset has been executed */
+ msleep(MHI_POST_RESET_DELAY_MS);
+}
+
+static void mhi_pci_reset_done(struct pci_dev *pdev)
+{
+ struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
+ struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
+ int err;
+
+ /* Restore initial known working PCI state */
+ pci_load_saved_state(pdev, mhi_pdev->pci_state);
+ pci_restore_state(pdev);
+
+ /* Is device status available ? */
+ if (!mhi_pci_is_alive(mhi_cntrl)) {
+ dev_err(&pdev->dev, "reset failed\n");
+ return;
+ }
+
+ err = mhi_prepare_for_power_up(mhi_cntrl);
+ if (err) {
+ dev_err(&pdev->dev, "failed to prepare MHI controller\n");
+ return;
+ }
+
+ err = mhi_sync_power_up(mhi_cntrl);
+ if (err) {
+ dev_err(&pdev->dev, "failed to power up MHI controller\n");
+ mhi_unprepare_after_power_down(mhi_cntrl);
+ return;
+ }
+
+ set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
+ mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
+}
+
+static pci_ers_result_t mhi_pci_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
+ struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
+
+ dev_err(&pdev->dev, "PCI error detected, state = %u\n", state);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ /* Clean up MHI state */
+ if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
+ mhi_power_down(mhi_cntrl, false);
+ mhi_unprepare_after_power_down(mhi_cntrl);
+ } else {
+ /* Nothing to do */
+ return PCI_ERS_RESULT_RECOVERED;
+ }
+
+ pci_disable_device(pdev);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t mhi_pci_slot_reset(struct pci_dev *pdev)
+{
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev, "Cannot re-enable PCI device after reset.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void mhi_pci_io_resume(struct pci_dev *pdev)
+{
+ struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
+
+ dev_err(&pdev->dev, "PCI slot reset done\n");
+
+ queue_work(system_long_wq, &mhi_pdev->recovery_work);
+}
+
+static const struct pci_error_handlers mhi_pci_err_handler = {
+ .error_detected = mhi_pci_error_detected,
+ .slot_reset = mhi_pci_slot_reset,
+ .resume = mhi_pci_io_resume,
+ .reset_prepare = mhi_pci_reset_prepare,
+ .reset_done = mhi_pci_reset_done,
+};
+
+static int __maybe_unused mhi_pci_runtime_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
+ struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
+ int err;
+
+ if (test_and_set_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status))
+ return 0;
+
+ del_timer(&mhi_pdev->health_check_timer);
+ cancel_work_sync(&mhi_pdev->recovery_work);
+
+ if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
+ mhi_cntrl->ee != MHI_EE_AMSS)
+ goto pci_suspend; /* Nothing to do at MHI level */
+
+ /* Transition to M3 state */
+ err = mhi_pm_suspend(mhi_cntrl);
+ if (err) {
+ dev_err(&pdev->dev, "failed to suspend device: %d\n", err);
+ clear_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status);
+ return -EBUSY;
+ }
+
+pci_suspend:
+ pci_disable_device(pdev);
+ pci_wake_from_d3(pdev, true);
+
+ return 0;
+}
+
+static int __maybe_unused mhi_pci_runtime_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
+ struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
+ int err;
+
+ if (!test_and_clear_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status))
+ return 0;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ goto err_recovery;
+
+ pci_set_master(pdev);
+ pci_wake_from_d3(pdev, false);
+
+ if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
+ mhi_cntrl->ee != MHI_EE_AMSS)
+ return 0; /* Nothing to do at MHI level */
+
+ /* Exit M3, transition to M0 state */
+ err = mhi_pm_resume(mhi_cntrl);
+ if (err) {
+ dev_err(&pdev->dev, "failed to resume device: %d\n", err);
+ goto err_recovery;
+ }
+
+ /* Resume health check */
+ mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
+
+ /* It can be a remote wakeup (no mhi runtime_get), update access time */
+ pm_runtime_mark_last_busy(dev);
+
+ return 0;
+
+err_recovery:
+ /* Do not fail to not mess up our PCI device state, the device likely
+ * lost power (d3cold) and we simply need to reset it from the recovery
+ * procedure, trigger the recovery asynchronously to prevent system
+ * suspend exit delaying.
+ */
+ queue_work(system_long_wq, &mhi_pdev->recovery_work);
+ pm_runtime_mark_last_busy(dev);
+
+ return 0;
+}
+
+static int __maybe_unused mhi_pci_suspend(struct device *dev)
+{
+ pm_runtime_disable(dev);
+ return mhi_pci_runtime_suspend(dev);
+}
+
+static int __maybe_unused mhi_pci_resume(struct device *dev)
+{
+ int ret;
+
+ /* Depending the platform, device may have lost power (d3cold), we need
+ * to resume it now to check its state and recover when necessary.
+ */
+ ret = mhi_pci_runtime_resume(dev);
+ pm_runtime_enable(dev);
+
+ return ret;
+}
+
+static int __maybe_unused mhi_pci_freeze(struct device *dev)
+{
+ struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
+ struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
+
+ /* We want to stop all operations, hibernation does not guarantee that
+ * device will be in the same state as before freezing, especially if
+ * the intermediate restore kernel reinitializes MHI device with new
+ * context.
+ */
+ flush_work(&mhi_pdev->recovery_work);
+ if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
+ mhi_power_down(mhi_cntrl, true);
+ mhi_unprepare_after_power_down(mhi_cntrl);
+ }
+
+ return 0;
+}
+
+static int __maybe_unused mhi_pci_restore(struct device *dev)
+{
+ struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
+
+ /* Reinitialize the device */
+ queue_work(system_long_wq, &mhi_pdev->recovery_work);
+
+ return 0;
+}
+
+static const struct dev_pm_ops mhi_pci_pm_ops = {
+ SET_RUNTIME_PM_OPS(mhi_pci_runtime_suspend, mhi_pci_runtime_resume, NULL)
+#ifdef CONFIG_PM_SLEEP
+ .suspend = mhi_pci_suspend,
+ .resume = mhi_pci_resume,
+ .freeze = mhi_pci_freeze,
+ .thaw = mhi_pci_restore,
+ .poweroff = mhi_pci_freeze,
+ .restore = mhi_pci_restore,
+#endif
+};
+
+static struct pci_driver mhi_pci_driver = {
+ .name = "mhi-pci-generic",
+ .id_table = mhi_pci_id_table,
+ .probe = mhi_pci_probe,
+ .remove = mhi_pci_remove,
+ .shutdown = mhi_pci_shutdown,
+ .err_handler = &mhi_pci_err_handler,
+ .driver.pm = &mhi_pci_pm_ops
+};
+module_pci_driver(mhi_pci_driver);
+
+MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>");
+MODULE_DESCRIPTION("Modem Host Interface (MHI) PCI controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/bus/mhi/host/pm.c b/drivers/bus/mhi/host/pm.c
new file mode 100644
index 000000000000..4a42186ff111
--- /dev/null
+++ b/drivers/bus/mhi/host/pm.c
@@ -0,0 +1,1277 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/mhi.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include "internal.h"
+
+/*
+ * Not all MHI state transitions are synchronous. Transitions like Linkdown,
+ * SYS_ERR, and shutdown can happen anytime asynchronously. This function will
+ * transition to a new state only if we're allowed to.
+ *
+ * Priority increases as we go down. For instance, from any state in L0, the
+ * transition can be made to states in L1, L2 and L3. A notable exception to
+ * this rule is state DISABLE. From DISABLE state we can only transition to
+ * POR state. Also, while in L2 state, user cannot jump back to previous
+ * L1 or L0 states.
+ *
+ * Valid transitions:
+ * L0: DISABLE <--> POR
+ * POR <--> POR
+ * POR -> M0 -> M2 --> M0
+ * POR -> FW_DL_ERR
+ * FW_DL_ERR <--> FW_DL_ERR
+ * M0 <--> M0
+ * M0 -> FW_DL_ERR
+ * M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0
+ * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR
+ * L2: SHUTDOWN_PROCESS -> LD_ERR_FATAL_DETECT
+ * SHUTDOWN_PROCESS -> DISABLE
+ * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT
+ * LD_ERR_FATAL_DETECT -> DISABLE
+ */
+static const struct mhi_pm_transitions dev_state_transitions[] = {
+ /* L0 States */
+ {
+ MHI_PM_DISABLE,
+ MHI_PM_POR
+ },
+ {
+ MHI_PM_POR,
+ MHI_PM_POR | MHI_PM_DISABLE | MHI_PM_M0 |
+ MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
+ },
+ {
+ MHI_PM_M0,
+ MHI_PM_M0 | MHI_PM_M2 | MHI_PM_M3_ENTER |
+ MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
+ },
+ {
+ MHI_PM_M2,
+ MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ {
+ MHI_PM_M3_ENTER,
+ MHI_PM_M3 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ {
+ MHI_PM_M3,
+ MHI_PM_M3_EXIT | MHI_PM_SYS_ERR_DETECT |
+ MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ {
+ MHI_PM_M3_EXIT,
+ MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ {
+ MHI_PM_FW_DL_ERR,
+ MHI_PM_FW_DL_ERR | MHI_PM_SYS_ERR_DETECT |
+ MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ /* L1 States */
+ {
+ MHI_PM_SYS_ERR_DETECT,
+ MHI_PM_SYS_ERR_PROCESS | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ {
+ MHI_PM_SYS_ERR_PROCESS,
+ MHI_PM_POR | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ /* L2 States */
+ {
+ MHI_PM_SHUTDOWN_PROCESS,
+ MHI_PM_DISABLE | MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ /* L3 States */
+ {
+ MHI_PM_LD_ERR_FATAL_DETECT,
+ MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_DISABLE
+ },
+};
+
+enum mhi_pm_state __must_check mhi_tryset_pm_state(struct mhi_controller *mhi_cntrl,
+ enum mhi_pm_state state)
+{
+ unsigned long cur_state = mhi_cntrl->pm_state;
+ int index = find_last_bit(&cur_state, 32);
+
+ if (unlikely(index >= ARRAY_SIZE(dev_state_transitions)))
+ return cur_state;
+
+ if (unlikely(dev_state_transitions[index].from_state != cur_state))
+ return cur_state;
+
+ if (unlikely(!(dev_state_transitions[index].to_states & state)))
+ return cur_state;
+
+ mhi_cntrl->pm_state = state;
+ return mhi_cntrl->pm_state;
+}
+
+void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum mhi_state state)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int ret;
+
+ if (state == MHI_STATE_RESET) {
+ ret = mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
+ MHICTRL_RESET_MASK, 1);
+ } else {
+ ret = mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
+ MHICTRL_MHISTATE_MASK, state);
+ }
+
+ if (ret)
+ dev_err(dev, "Failed to set MHI state to: %s\n",
+ mhi_state_str(state));
+}
+
+/* NOP for backward compatibility, host allowed to ring DB in M2 state */
+static void mhi_toggle_dev_wake_nop(struct mhi_controller *mhi_cntrl)
+{
+}
+
+static void mhi_toggle_dev_wake(struct mhi_controller *mhi_cntrl)
+{
+ mhi_cntrl->wake_get(mhi_cntrl, false);
+ mhi_cntrl->wake_put(mhi_cntrl, true);
+}
+
+/* Handle device ready state transition */
+int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_event *mhi_event;
+ enum mhi_pm_state cur_state;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ u32 interval_us = 25000; /* poll register field every 25 milliseconds */
+ int ret, i;
+
+ /* Check if device entered error state */
+ if (MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
+ dev_err(dev, "Device link is not accessible\n");
+ return -EIO;
+ }
+
+ /* Wait for RESET to be cleared and READY bit to be set by the device */
+ ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
+ MHICTRL_RESET_MASK, 0, interval_us);
+ if (ret) {
+ dev_err(dev, "Device failed to clear MHI Reset\n");
+ return ret;
+ }
+
+ ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
+ MHISTATUS_READY_MASK, 1, interval_us);
+ if (ret) {
+ dev_err(dev, "Device failed to enter MHI Ready\n");
+ return ret;
+ }
+
+ dev_dbg(dev, "Device in READY State\n");
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR);
+ mhi_cntrl->dev_state = MHI_STATE_READY;
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ if (cur_state != MHI_PM_POR) {
+ dev_err(dev, "Error moving to state %s from %s\n",
+ to_mhi_pm_state_str(MHI_PM_POR),
+ to_mhi_pm_state_str(cur_state));
+ return -EIO;
+ }
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+ dev_err(dev, "Device registers not accessible\n");
+ goto error_mmio;
+ }
+
+ /* Configure MMIO registers */
+ ret = mhi_init_mmio(mhi_cntrl);
+ if (ret) {
+ dev_err(dev, "Error configuring MMIO registers\n");
+ goto error_mmio;
+ }
+
+ /* Add elements to all SW event rings */
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ struct mhi_ring *ring = &mhi_event->ring;
+
+ /* Skip if this is an offload or HW event */
+ if (mhi_event->offload_ev || mhi_event->hw_ring)
+ continue;
+
+ ring->wp = ring->base + ring->len - ring->el_size;
+ *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size);
+ /* Update all cores */
+ smp_wmb();
+
+ /* Ring the event ring db */
+ spin_lock_irq(&mhi_event->lock);
+ mhi_ring_er_db(mhi_event);
+ spin_unlock_irq(&mhi_event->lock);
+ }
+
+ /* Set MHI to M0 state */
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ return 0;
+
+error_mmio:
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ return -EIO;
+}
+
+int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl)
+{
+ enum mhi_pm_state cur_state;
+ struct mhi_chan *mhi_chan;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int i;
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ mhi_cntrl->dev_state = MHI_STATE_M0;
+ cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M0);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ if (unlikely(cur_state != MHI_PM_M0)) {
+ dev_err(dev, "Unable to transition to M0 state\n");
+ return -EIO;
+ }
+ mhi_cntrl->M0++;
+
+ /* Wake up the device */
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_cntrl->wake_get(mhi_cntrl, true);
+
+ /* Ring all event rings and CMD ring only if we're in mission mode */
+ if (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) {
+ struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
+ struct mhi_cmd *mhi_cmd =
+ &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
+
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ if (mhi_event->offload_ev)
+ continue;
+
+ spin_lock_irq(&mhi_event->lock);
+ mhi_ring_er_db(mhi_event);
+ spin_unlock_irq(&mhi_event->lock);
+ }
+
+ /* Only ring primary cmd ring if ring is not empty */
+ spin_lock_irq(&mhi_cmd->lock);
+ if (mhi_cmd->ring.rp != mhi_cmd->ring.wp)
+ mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
+ spin_unlock_irq(&mhi_cmd->lock);
+ }
+
+ /* Ring channel DB registers */
+ mhi_chan = mhi_cntrl->mhi_chan;
+ for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
+ struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
+
+ if (mhi_chan->db_cfg.reset_req) {
+ write_lock_irq(&mhi_chan->lock);
+ mhi_chan->db_cfg.db_mode = true;
+ write_unlock_irq(&mhi_chan->lock);
+ }
+
+ read_lock_irq(&mhi_chan->lock);
+
+ /* Only ring DB if ring is not empty */
+ if (tre_ring->base && tre_ring->wp != tre_ring->rp)
+ mhi_ring_chan_db(mhi_cntrl, mhi_chan);
+ read_unlock_irq(&mhi_chan->lock);
+ }
+
+ mhi_cntrl->wake_put(mhi_cntrl, false);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ wake_up_all(&mhi_cntrl->state_event);
+
+ return 0;
+}
+
+/*
+ * After receiving the MHI state change event from the device indicating the
+ * transition to M1 state, the host can transition the device to M2 state
+ * for keeping it in low power state.
+ */
+void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl)
+{
+ enum mhi_pm_state state;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M2);
+ if (state == MHI_PM_M2) {
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M2);
+ mhi_cntrl->dev_state = MHI_STATE_M2;
+
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ mhi_cntrl->M2++;
+ wake_up_all(&mhi_cntrl->state_event);
+
+ /* If there are any pending resources, exit M2 immediately */
+ if (unlikely(atomic_read(&mhi_cntrl->pending_pkts) ||
+ atomic_read(&mhi_cntrl->dev_wake))) {
+ dev_dbg(dev,
+ "Exiting M2, pending_pkts: %d dev_wake: %d\n",
+ atomic_read(&mhi_cntrl->pending_pkts),
+ atomic_read(&mhi_cntrl->dev_wake));
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_cntrl->wake_get(mhi_cntrl, true);
+ mhi_cntrl->wake_put(mhi_cntrl, true);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ } else {
+ mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_IDLE);
+ }
+ } else {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ }
+}
+
+/* MHI M3 completion handler */
+int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl)
+{
+ enum mhi_pm_state state;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ mhi_cntrl->dev_state = MHI_STATE_M3;
+ state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ if (state != MHI_PM_M3) {
+ dev_err(dev, "Unable to transition to M3 state\n");
+ return -EIO;
+ }
+
+ mhi_cntrl->M3++;
+ wake_up_all(&mhi_cntrl->state_event);
+
+ return 0;
+}
+
+/* Handle device Mission Mode transition */
+static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_event *mhi_event;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_ee_type ee = MHI_EE_MAX, current_ee = mhi_cntrl->ee;
+ int i, ret;
+
+ dev_dbg(dev, "Processing Mission Mode transition\n");
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
+ ee = mhi_get_exec_env(mhi_cntrl);
+
+ if (!MHI_IN_MISSION_MODE(ee)) {
+ mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ wake_up_all(&mhi_cntrl->state_event);
+ return -EIO;
+ }
+ mhi_cntrl->ee = ee;
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ wake_up_all(&mhi_cntrl->state_event);
+
+ device_for_each_child(&mhi_cntrl->mhi_dev->dev, &current_ee,
+ mhi_destroy_device);
+ mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE);
+
+ /* Force MHI to be in M0 state before continuing */
+ ret = __mhi_device_get_sync(mhi_cntrl);
+ if (ret)
+ return ret;
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ ret = -EIO;
+ goto error_mission_mode;
+ }
+
+ /* Add elements to all HW event rings */
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ struct mhi_ring *ring = &mhi_event->ring;
+
+ if (mhi_event->offload_ev || !mhi_event->hw_ring)
+ continue;
+
+ ring->wp = ring->base + ring->len - ring->el_size;
+ *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size);
+ /* Update to all cores */
+ smp_wmb();
+
+ spin_lock_irq(&mhi_event->lock);
+ if (MHI_DB_ACCESS_VALID(mhi_cntrl))
+ mhi_ring_er_db(mhi_event);
+ spin_unlock_irq(&mhi_event->lock);
+ }
+
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ /*
+ * The MHI devices are only created when the client device switches its
+ * Execution Environment (EE) to either SBL or AMSS states
+ */
+ mhi_create_devices(mhi_cntrl);
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+
+error_mission_mode:
+ mhi_cntrl->wake_put(mhi_cntrl, false);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ return ret;
+}
+
+/* Handle shutdown transitions */
+static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl)
+{
+ enum mhi_pm_state cur_state;
+ struct mhi_event *mhi_event;
+ struct mhi_cmd_ctxt *cmd_ctxt;
+ struct mhi_cmd *mhi_cmd;
+ struct mhi_event_ctxt *er_ctxt;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int ret, i;
+
+ dev_dbg(dev, "Processing disable transition with PM state: %s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+
+ mutex_lock(&mhi_cntrl->pm_mutex);
+
+ /* Trigger MHI RESET so that the device will not access host memory */
+ if (!MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
+ dev_dbg(dev, "Triggering MHI Reset in device\n");
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
+
+ /* Wait for the reset bit to be cleared by the device */
+ ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
+ MHICTRL_RESET_MASK, 0, 25000);
+ if (ret)
+ dev_err(dev, "Device failed to clear MHI Reset\n");
+
+ /*
+ * Device will clear BHI_INTVEC as a part of RESET processing,
+ * hence re-program it
+ */
+ mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
+
+ if (!MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) {
+ /* wait for ready to be set */
+ ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs,
+ MHISTATUS,
+ MHISTATUS_READY_MASK, 1, 25000);
+ if (ret)
+ dev_err(dev, "Device failed to enter READY state\n");
+ }
+ }
+
+ dev_dbg(dev,
+ "Waiting for all pending event ring processing to complete\n");
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ if (mhi_event->offload_ev)
+ continue;
+ disable_irq(mhi_cntrl->irq[mhi_event->irq]);
+ tasklet_kill(&mhi_event->task);
+ }
+
+ /* Release lock and wait for all pending threads to complete */
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+ dev_dbg(dev, "Waiting for all pending threads to complete\n");
+ wake_up_all(&mhi_cntrl->state_event);
+
+ dev_dbg(dev, "Reset all active channels and remove MHI devices\n");
+ device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device);
+
+ mutex_lock(&mhi_cntrl->pm_mutex);
+
+ WARN_ON(atomic_read(&mhi_cntrl->dev_wake));
+ WARN_ON(atomic_read(&mhi_cntrl->pending_pkts));
+
+ /* Reset the ev rings and cmd rings */
+ dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n");
+ mhi_cmd = mhi_cntrl->mhi_cmd;
+ cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt;
+ for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
+ struct mhi_ring *ring = &mhi_cmd->ring;
+
+ ring->rp = ring->base;
+ ring->wp = ring->base;
+ cmd_ctxt->rp = cmd_ctxt->rbase;
+ cmd_ctxt->wp = cmd_ctxt->rbase;
+ }
+
+ mhi_event = mhi_cntrl->mhi_event;
+ er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
+ mhi_event++) {
+ struct mhi_ring *ring = &mhi_event->ring;
+
+ /* Skip offload events */
+ if (mhi_event->offload_ev)
+ continue;
+
+ ring->rp = ring->base;
+ ring->wp = ring->base;
+ er_ctxt->rp = er_ctxt->rbase;
+ er_ctxt->wp = er_ctxt->rbase;
+ }
+
+ /* Move to disable state */
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_DISABLE);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ if (unlikely(cur_state != MHI_PM_DISABLE))
+ dev_err(dev, "Error moving from PM state: %s to: %s\n",
+ to_mhi_pm_state_str(cur_state),
+ to_mhi_pm_state_str(MHI_PM_DISABLE));
+
+ dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ mhi_state_str(mhi_cntrl->dev_state));
+
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+}
+
+/* Handle system error transitions */
+static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl)
+{
+ enum mhi_pm_state cur_state, prev_state;
+ enum dev_st_transition next_state;
+ struct mhi_event *mhi_event;
+ struct mhi_cmd_ctxt *cmd_ctxt;
+ struct mhi_cmd *mhi_cmd;
+ struct mhi_event_ctxt *er_ctxt;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int ret, i;
+
+ dev_dbg(dev, "Transitioning from PM state: %s to: %s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS));
+
+ /* We must notify MHI control driver so it can clean up first */
+ mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_SYS_ERROR);
+
+ mutex_lock(&mhi_cntrl->pm_mutex);
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ prev_state = mhi_cntrl->pm_state;
+ cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ if (cur_state != MHI_PM_SYS_ERR_PROCESS) {
+ dev_err(dev, "Failed to transition from PM state: %s to: %s\n",
+ to_mhi_pm_state_str(cur_state),
+ to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS));
+ goto exit_sys_error_transition;
+ }
+
+ mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION;
+ mhi_cntrl->dev_state = MHI_STATE_RESET;
+
+ /* Wake up threads waiting for state transition */
+ wake_up_all(&mhi_cntrl->state_event);
+
+ /* Trigger MHI RESET so that the device will not access host memory */
+ if (MHI_REG_ACCESS_VALID(prev_state)) {
+ u32 in_reset = -1;
+ unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms);
+
+ dev_dbg(dev, "Triggering MHI Reset in device\n");
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
+
+ /* Wait for the reset bit to be cleared by the device */
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ mhi_read_reg_field(mhi_cntrl,
+ mhi_cntrl->regs,
+ MHICTRL,
+ MHICTRL_RESET_MASK,
+ &in_reset) ||
+ !in_reset, timeout);
+ if (!ret || in_reset) {
+ dev_err(dev, "Device failed to exit MHI Reset state\n");
+ goto exit_sys_error_transition;
+ }
+
+ /*
+ * Device will clear BHI_INTVEC as a part of RESET processing,
+ * hence re-program it
+ */
+ mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
+ }
+
+ dev_dbg(dev,
+ "Waiting for all pending event ring processing to complete\n");
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ if (mhi_event->offload_ev)
+ continue;
+ tasklet_kill(&mhi_event->task);
+ }
+
+ /* Release lock and wait for all pending threads to complete */
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+ dev_dbg(dev, "Waiting for all pending threads to complete\n");
+ wake_up_all(&mhi_cntrl->state_event);
+
+ dev_dbg(dev, "Reset all active channels and remove MHI devices\n");
+ device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device);
+
+ mutex_lock(&mhi_cntrl->pm_mutex);
+
+ WARN_ON(atomic_read(&mhi_cntrl->dev_wake));
+ WARN_ON(atomic_read(&mhi_cntrl->pending_pkts));
+
+ /* Reset the ev rings and cmd rings */
+ dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n");
+ mhi_cmd = mhi_cntrl->mhi_cmd;
+ cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt;
+ for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
+ struct mhi_ring *ring = &mhi_cmd->ring;
+
+ ring->rp = ring->base;
+ ring->wp = ring->base;
+ cmd_ctxt->rp = cmd_ctxt->rbase;
+ cmd_ctxt->wp = cmd_ctxt->rbase;
+ }
+
+ mhi_event = mhi_cntrl->mhi_event;
+ er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
+ mhi_event++) {
+ struct mhi_ring *ring = &mhi_event->ring;
+
+ /* Skip offload events */
+ if (mhi_event->offload_ev)
+ continue;
+
+ ring->rp = ring->base;
+ ring->wp = ring->base;
+ er_ctxt->rp = er_ctxt->rbase;
+ er_ctxt->wp = er_ctxt->rbase;
+ }
+
+ /* Transition to next state */
+ if (MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) {
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ if (cur_state != MHI_PM_POR) {
+ dev_err(dev, "Error moving to state %s from %s\n",
+ to_mhi_pm_state_str(MHI_PM_POR),
+ to_mhi_pm_state_str(cur_state));
+ goto exit_sys_error_transition;
+ }
+ next_state = DEV_ST_TRANSITION_PBL;
+ } else {
+ next_state = DEV_ST_TRANSITION_READY;
+ }
+
+ mhi_queue_state_transition(mhi_cntrl, next_state);
+
+exit_sys_error_transition:
+ dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ mhi_state_str(mhi_cntrl->dev_state));
+
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+}
+
+/* Queue a new work item and schedule work */
+int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
+ enum dev_st_transition state)
+{
+ struct state_transition *item = kmalloc(sizeof(*item), GFP_ATOMIC);
+ unsigned long flags;
+
+ if (!item)
+ return -ENOMEM;
+
+ item->state = state;
+ spin_lock_irqsave(&mhi_cntrl->transition_lock, flags);
+ list_add_tail(&item->node, &mhi_cntrl->transition_list);
+ spin_unlock_irqrestore(&mhi_cntrl->transition_lock, flags);
+
+ queue_work(mhi_cntrl->hiprio_wq, &mhi_cntrl->st_worker);
+
+ return 0;
+}
+
+/* SYS_ERR worker */
+void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+
+ /* skip if controller supports RDDM */
+ if (mhi_cntrl->rddm_image) {
+ dev_dbg(dev, "Controller supports RDDM, skip SYS_ERROR\n");
+ return;
+ }
+
+ mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_SYS_ERR);
+}
+
+/* Device State Transition worker */
+void mhi_pm_st_worker(struct work_struct *work)
+{
+ struct state_transition *itr, *tmp;
+ LIST_HEAD(head);
+ struct mhi_controller *mhi_cntrl = container_of(work,
+ struct mhi_controller,
+ st_worker);
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+
+ spin_lock_irq(&mhi_cntrl->transition_lock);
+ list_splice_tail_init(&mhi_cntrl->transition_list, &head);
+ spin_unlock_irq(&mhi_cntrl->transition_lock);
+
+ list_for_each_entry_safe(itr, tmp, &head, node) {
+ list_del(&itr->node);
+ dev_dbg(dev, "Handling state transition: %s\n",
+ TO_DEV_STATE_TRANS_STR(itr->state));
+
+ switch (itr->state) {
+ case DEV_ST_TRANSITION_PBL:
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
+ mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ mhi_fw_load_handler(mhi_cntrl);
+ break;
+ case DEV_ST_TRANSITION_SBL:
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ mhi_cntrl->ee = MHI_EE_SBL;
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ /*
+ * The MHI devices are only created when the client
+ * device switches its Execution Environment (EE) to
+ * either SBL or AMSS states
+ */
+ mhi_create_devices(mhi_cntrl);
+ if (mhi_cntrl->fbc_download)
+ mhi_download_amss_image(mhi_cntrl);
+ break;
+ case DEV_ST_TRANSITION_MISSION_MODE:
+ mhi_pm_mission_mode_transition(mhi_cntrl);
+ break;
+ case DEV_ST_TRANSITION_FP:
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ mhi_cntrl->ee = MHI_EE_FP;
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ mhi_create_devices(mhi_cntrl);
+ break;
+ case DEV_ST_TRANSITION_READY:
+ mhi_ready_state_transition(mhi_cntrl);
+ break;
+ case DEV_ST_TRANSITION_SYS_ERR:
+ mhi_pm_sys_error_transition(mhi_cntrl);
+ break;
+ case DEV_ST_TRANSITION_DISABLE:
+ mhi_pm_disable_transition(mhi_cntrl);
+ break;
+ default:
+ break;
+ }
+ kfree(itr);
+ }
+}
+
+int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_chan *itr, *tmp;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_pm_state new_state;
+ int ret;
+
+ if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
+ return -EINVAL;
+
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
+ return -EIO;
+
+ /* Return busy if there are any pending resources */
+ if (atomic_read(&mhi_cntrl->dev_wake) ||
+ atomic_read(&mhi_cntrl->pending_pkts))
+ return -EBUSY;
+
+ /* Take MHI out of M2 state */
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_cntrl->wake_get(mhi_cntrl, false);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ mhi_cntrl->dev_state == MHI_STATE_M0 ||
+ mhi_cntrl->dev_state == MHI_STATE_M1 ||
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_cntrl->wake_put(mhi_cntrl, false);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ dev_err(dev,
+ "Could not enter M0/M1 state");
+ return -EIO;
+ }
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+
+ if (atomic_read(&mhi_cntrl->dev_wake) ||
+ atomic_read(&mhi_cntrl->pending_pkts)) {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ return -EBUSY;
+ }
+
+ dev_dbg(dev, "Allowing M3 transition\n");
+ new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_ENTER);
+ if (new_state != MHI_PM_M3_ENTER) {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ dev_err(dev,
+ "Error setting to PM state: %s from: %s\n",
+ to_mhi_pm_state_str(MHI_PM_M3_ENTER),
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ return -EIO;
+ }
+
+ /* Set MHI to M3 and wait for completion */
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M3);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ dev_dbg(dev, "Waiting for M3 completion\n");
+
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ mhi_cntrl->dev_state == MHI_STATE_M3 ||
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ dev_err(dev,
+ "Did not enter M3 state, MHI state: %s, PM state: %s\n",
+ mhi_state_str(mhi_cntrl->dev_state),
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ return -EIO;
+ }
+
+ /* Notify clients about entering LPM */
+ list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
+ mutex_lock(&itr->mutex);
+ if (itr->mhi_dev)
+ mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER);
+ mutex_unlock(&itr->mutex);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mhi_pm_suspend);
+
+static int __mhi_pm_resume(struct mhi_controller *mhi_cntrl, bool force)
+{
+ struct mhi_chan *itr, *tmp;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_pm_state cur_state;
+ int ret;
+
+ dev_dbg(dev, "Entered with PM state: %s, MHI state: %s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ mhi_state_str(mhi_cntrl->dev_state));
+
+ if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
+ return 0;
+
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
+ return -EIO;
+
+ if (mhi_get_mhi_state(mhi_cntrl) != MHI_STATE_M3) {
+ dev_warn(dev, "Resuming from non M3 state (%s)\n",
+ mhi_state_str(mhi_get_mhi_state(mhi_cntrl)));
+ if (!force)
+ return -EINVAL;
+ }
+
+ /* Notify clients about exiting LPM */
+ list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
+ mutex_lock(&itr->mutex);
+ if (itr->mhi_dev)
+ mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT);
+ mutex_unlock(&itr->mutex);
+ }
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_EXIT);
+ if (cur_state != MHI_PM_M3_EXIT) {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ dev_info(dev,
+ "Error setting to PM state: %s from: %s\n",
+ to_mhi_pm_state_str(MHI_PM_M3_EXIT),
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ return -EIO;
+ }
+
+ /* Set MHI to M0 and wait for completion */
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ mhi_cntrl->dev_state == MHI_STATE_M0 ||
+ mhi_cntrl->dev_state == MHI_STATE_M2 ||
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ dev_err(dev,
+ "Did not enter M0 state, MHI state: %s, PM state: %s\n",
+ mhi_state_str(mhi_cntrl->dev_state),
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
+{
+ return __mhi_pm_resume(mhi_cntrl, false);
+}
+EXPORT_SYMBOL_GPL(mhi_pm_resume);
+
+int mhi_pm_resume_force(struct mhi_controller *mhi_cntrl)
+{
+ return __mhi_pm_resume(mhi_cntrl, true);
+}
+EXPORT_SYMBOL_GPL(mhi_pm_resume_force);
+
+int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl)
+{
+ int ret;
+
+ /* Wake up the device */
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ return -EIO;
+ }
+ mhi_cntrl->wake_get(mhi_cntrl, true);
+ if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
+ mhi_trigger_resume(mhi_cntrl);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ mhi_cntrl->pm_state == MHI_PM_M0 ||
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_cntrl->wake_put(mhi_cntrl, false);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/* Assert device wake db */
+static void mhi_assert_dev_wake(struct mhi_controller *mhi_cntrl, bool force)
+{
+ unsigned long flags;
+
+ /*
+ * If force flag is set, then increment the wake count value and
+ * ring wake db
+ */
+ if (unlikely(force)) {
+ spin_lock_irqsave(&mhi_cntrl->wlock, flags);
+ atomic_inc(&mhi_cntrl->dev_wake);
+ if (MHI_WAKE_DB_FORCE_SET_VALID(mhi_cntrl->pm_state) &&
+ !mhi_cntrl->wake_set) {
+ mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
+ mhi_cntrl->wake_set = true;
+ }
+ spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
+ } else {
+ /*
+ * If resources are already requested, then just increment
+ * the wake count value and return
+ */
+ if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, 1, 0)))
+ return;
+
+ spin_lock_irqsave(&mhi_cntrl->wlock, flags);
+ if ((atomic_inc_return(&mhi_cntrl->dev_wake) == 1) &&
+ MHI_WAKE_DB_SET_VALID(mhi_cntrl->pm_state) &&
+ !mhi_cntrl->wake_set) {
+ mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
+ mhi_cntrl->wake_set = true;
+ }
+ spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
+ }
+}
+
+/* De-assert device wake db */
+static void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl,
+ bool override)
+{
+ unsigned long flags;
+
+ /*
+ * Only continue if there is a single resource, else just decrement
+ * and return
+ */
+ if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, -1, 1)))
+ return;
+
+ spin_lock_irqsave(&mhi_cntrl->wlock, flags);
+ if ((atomic_dec_return(&mhi_cntrl->dev_wake) == 0) &&
+ MHI_WAKE_DB_CLEAR_VALID(mhi_cntrl->pm_state) && !override &&
+ mhi_cntrl->wake_set) {
+ mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 0);
+ mhi_cntrl->wake_set = false;
+ }
+ spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
+}
+
+int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
+ enum mhi_state state;
+ enum mhi_ee_type current_ee;
+ enum dev_st_transition next_state;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ u32 interval_us = 25000; /* poll register field every 25 milliseconds */
+ int ret, i;
+
+ dev_info(dev, "Requested to power ON\n");
+
+ /* Supply default wake routines if not provided by controller driver */
+ if (!mhi_cntrl->wake_get || !mhi_cntrl->wake_put ||
+ !mhi_cntrl->wake_toggle) {
+ mhi_cntrl->wake_get = mhi_assert_dev_wake;
+ mhi_cntrl->wake_put = mhi_deassert_dev_wake;
+ mhi_cntrl->wake_toggle = (mhi_cntrl->db_access & MHI_PM_M2) ?
+ mhi_toggle_dev_wake_nop : mhi_toggle_dev_wake;
+ }
+
+ mutex_lock(&mhi_cntrl->pm_mutex);
+ mhi_cntrl->pm_state = MHI_PM_DISABLE;
+
+ /* Setup BHI INTVEC */
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
+ mhi_cntrl->pm_state = MHI_PM_POR;
+ mhi_cntrl->ee = MHI_EE_MAX;
+ current_ee = mhi_get_exec_env(mhi_cntrl);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ /* Confirm that the device is in valid exec env */
+ if (!MHI_POWER_UP_CAPABLE(current_ee)) {
+ dev_err(dev, "%s is not a valid EE for power on\n",
+ TO_MHI_EXEC_STR(current_ee));
+ ret = -EIO;
+ goto error_exit;
+ }
+
+ state = mhi_get_mhi_state(mhi_cntrl);
+ dev_dbg(dev, "Attempting power on with EE: %s, state: %s\n",
+ TO_MHI_EXEC_STR(current_ee), mhi_state_str(state));
+
+ if (state == MHI_STATE_SYS_ERR) {
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
+ ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
+ MHICTRL_RESET_MASK, 0, interval_us);
+ if (ret) {
+ dev_info(dev, "Failed to reset MHI due to syserr state\n");
+ goto error_exit;
+ }
+
+ /*
+ * device cleares INTVEC as part of RESET processing,
+ * re-program it
+ */
+ mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
+ }
+
+ /* IRQs have been requested during probe, so we just need to enable them. */
+ enable_irq(mhi_cntrl->irq[0]);
+
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ if (mhi_event->offload_ev)
+ continue;
+
+ enable_irq(mhi_cntrl->irq[mhi_event->irq]);
+ }
+
+ /* Transition to next state */
+ next_state = MHI_IN_PBL(current_ee) ?
+ DEV_ST_TRANSITION_PBL : DEV_ST_TRANSITION_READY;
+
+ mhi_queue_state_transition(mhi_cntrl, next_state);
+
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+
+ dev_info(dev, "Power on setup success\n");
+
+ return 0;
+
+error_exit:
+ mhi_cntrl->pm_state = MHI_PM_DISABLE;
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_async_power_up);
+
+void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
+{
+ enum mhi_pm_state cur_state, transition_state;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+
+ mutex_lock(&mhi_cntrl->pm_mutex);
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ cur_state = mhi_cntrl->pm_state;
+ if (cur_state == MHI_PM_DISABLE) {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+ return; /* Already powered down */
+ }
+
+ /* If it's not a graceful shutdown, force MHI to linkdown state */
+ transition_state = (graceful) ? MHI_PM_SHUTDOWN_PROCESS :
+ MHI_PM_LD_ERR_FATAL_DETECT;
+
+ cur_state = mhi_tryset_pm_state(mhi_cntrl, transition_state);
+ if (cur_state != transition_state) {
+ dev_err(dev, "Failed to move to state: %s from: %s\n",
+ to_mhi_pm_state_str(transition_state),
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ /* Force link down or error fatal detected state */
+ mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
+ }
+
+ /* mark device inactive to avoid any further host processing */
+ mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION;
+ mhi_cntrl->dev_state = MHI_STATE_RESET;
+
+ wake_up_all(&mhi_cntrl->state_event);
+
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+
+ mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_DISABLE);
+
+ /* Wait for shutdown to complete */
+ flush_work(&mhi_cntrl->st_worker);
+
+ disable_irq(mhi_cntrl->irq[0]);
+}
+EXPORT_SYMBOL_GPL(mhi_power_down);
+
+int mhi_sync_power_up(struct mhi_controller *mhi_cntrl)
+{
+ int ret = mhi_async_power_up(mhi_cntrl);
+
+ if (ret)
+ return ret;
+
+ wait_event_timeout(mhi_cntrl->state_event,
+ MHI_IN_MISSION_MODE(mhi_cntrl->ee) ||
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ ret = (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -ETIMEDOUT;
+ if (ret)
+ mhi_power_down(mhi_cntrl, false);
+
+ return ret;
+}
+EXPORT_SYMBOL(mhi_sync_power_up);
+
+int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int ret;
+
+ /* Check if device is already in RDDM */
+ if (mhi_cntrl->ee == MHI_EE_RDDM)
+ return 0;
+
+ dev_dbg(dev, "Triggering SYS_ERR to force RDDM state\n");
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
+
+ /* Wait for RDDM event */
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ mhi_cntrl->ee == MHI_EE_RDDM,
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+ ret = ret ? 0 : -EIO;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_force_rddm_mode);
+
+void mhi_device_get(struct mhi_device *mhi_dev)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+
+ mhi_dev->dev_wake++;
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
+ mhi_trigger_resume(mhi_cntrl);
+
+ mhi_cntrl->wake_get(mhi_cntrl, true);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+}
+EXPORT_SYMBOL_GPL(mhi_device_get);
+
+int mhi_device_get_sync(struct mhi_device *mhi_dev)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ int ret;
+
+ ret = __mhi_device_get_sync(mhi_cntrl);
+ if (!ret)
+ mhi_dev->dev_wake++;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_device_get_sync);
+
+void mhi_device_put(struct mhi_device *mhi_dev)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+
+ mhi_dev->dev_wake--;
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
+ mhi_trigger_resume(mhi_cntrl);
+
+ mhi_cntrl->wake_put(mhi_cntrl, false);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+}
+EXPORT_SYMBOL_GPL(mhi_device_put);
diff --git a/drivers/bus/mips_cdmm.c b/drivers/bus/mips_cdmm.c
index 1b14256376d2..fca0d0669aa9 100644
--- a/drivers/bus/mips_cdmm.c
+++ b/drivers/bus/mips_cdmm.c
@@ -13,6 +13,8 @@
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/smp.h>
@@ -337,9 +339,23 @@ static phys_addr_t mips_cdmm_cur_base(void)
* Picking a suitable physical address at which to map the CDMM region is
* platform specific, so this weak function can be overridden by platform
* code to pick a suitable value if none is configured by the bootloader.
+ * By default this method tries to find a CDMM-specific node in the system
+ * dtb. Note that this won't work for early serial console.
*/
phys_addr_t __weak mips_cdmm_phys_base(void)
{
+ struct device_node *np;
+ struct resource res;
+ int err;
+
+ np = of_find_compatible_node(NULL, NULL, "mti,mips-cdmm");
+ if (np) {
+ err = of_address_to_resource(np, 0, &res);
+ of_node_put(np);
+ if (!err)
+ return res.start;
+ }
+
return 0;
}
@@ -544,10 +560,8 @@ static void mips_cdmm_bus_discover(struct mips_cdmm_bus *bus)
dev_set_name(&dev->dev, "cdmm%u-%u", cpu, id);
++id;
ret = device_register(&dev->dev);
- if (ret) {
+ if (ret)
put_device(&dev->dev);
- kfree(dev);
- }
}
}
diff --git a/drivers/bus/moxtet.c b/drivers/bus/moxtet.c
index b20fdcbd035b..5eb0fe73ddc4 100644
--- a/drivers/bus/moxtet.c
+++ b/drivers/bus/moxtet.c
@@ -2,7 +2,7 @@
/*
* Turris Mox module configuration bus driver
*
- * Copyright (C) 2019 Marek Behun <marek.behun@nic.cz>
+ * Copyright (C) 2019 Marek Behún <kabel@kernel.org>
*/
#include <dt-bindings/bus/moxtet.h>
@@ -815,7 +815,7 @@ static int moxtet_probe(struct spi_device *spi)
return 0;
}
-static int moxtet_remove(struct spi_device *spi)
+static void moxtet_remove(struct spi_device *spi)
{
struct moxtet *moxtet = spi_get_drvdata(spi);
@@ -828,8 +828,6 @@ static int moxtet_remove(struct spi_device *spi)
device_for_each_child(moxtet->dev, NULL, __unregister);
mutex_destroy(&moxtet->lock);
-
- return 0;
}
static const struct of_device_id moxtet_dt_ids[] = {
@@ -879,6 +877,6 @@ static void __exit moxtet_exit(void)
}
module_exit(moxtet_exit);
-MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>");
+MODULE_AUTHOR("Marek Behun <kabel@kernel.org>");
MODULE_DESCRIPTION("CZ.NIC's Turris Mox module configuration bus");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index 5b2a11a88951..d51573ac525e 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Address map functions for Marvell EBU SoCs (Kirkwood, Armada
* 370/XP, Dove, Orion5x and MV78xx0)
*
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- *
* The Marvell EBU SoCs have a configurable physical address space:
* the physical address at which certain devices (PCIe, NOR, NAND,
* etc.) sit can be configured. The configuration takes place through
@@ -25,8 +22,8 @@
*
* - Reads out the SDRAM address decoding windows at initialization
* time, and fills the mvebu_mbus_dram_info structure with these
- * informations. The exported function mv_mbus_dram_info() allow
- * device drivers to get those informations related to the SDRAM
+ * information. The exported function mv_mbus_dram_info() allow
+ * device drivers to get those information related to the SDRAM
* address decoding windows. This is because devices also have their
* own windows (configured through registers that are part of each
* device register space), and therefore the drivers for Marvell
@@ -123,7 +120,7 @@ struct mvebu_mbus_soc_data {
};
/*
- * Used to store the state of one MBus window accross suspend/resume.
+ * Used to store the state of one MBus window across suspend/resume.
*/
struct mvebu_mbus_win_data {
u32 ctrl;
@@ -469,18 +466,7 @@ static int mvebu_sdram_debug_show(struct seq_file *seq, void *v)
struct mvebu_mbus_state *mbus = &mbus_state;
return mbus->soc->show_cpu_target(mbus, seq, v);
}
-
-static int mvebu_sdram_debug_open(struct inode *inode, struct file *file)
-{
- return single_open(file, mvebu_sdram_debug_show, inode->i_private);
-}
-
-static const struct file_operations mvebu_sdram_debug_fops = {
- .open = mvebu_sdram_debug_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(mvebu_sdram_debug);
static int mvebu_devs_debug_show(struct seq_file *seq, void *v)
{
@@ -519,18 +505,7 @@ static int mvebu_devs_debug_show(struct seq_file *seq, void *v)
return 0;
}
-
-static int mvebu_devs_debug_open(struct inode *inode, struct file *file)
-{
- return single_open(file, mvebu_devs_debug_show, inode->i_private);
-}
-
-static const struct file_operations mvebu_devs_debug_fops = {
- .open = mvebu_devs_debug_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(mvebu_devs_debug);
/*
* SoC-specific functions and definitions
@@ -610,23 +585,23 @@ static unsigned int armada_xp_mbus_win_remap_offset(int win)
static void __init
mvebu_mbus_find_bridge_hole(uint64_t *start, uint64_t *end)
{
- struct memblock_region *r;
- uint64_t s = 0;
+ phys_addr_t reg_start, reg_end;
+ uint64_t i, s = 0;
- for_each_memblock(memory, r) {
+ for_each_mem_range(i, &reg_start, &reg_end) {
/*
* This part of the memory is above 4 GB, so we don't
* care for the MBus bridge hole.
*/
- if (r->base >= 0x100000000ULL)
+ if ((u64)reg_start >= 0x100000000ULL)
continue;
/*
* The MBus bridge hole is at the end of the RAM under
* the 4 GB limit.
*/
- if (r->base + r->size > s)
- s = r->base + r->size;
+ if (reg_end > s)
+ s = reg_end;
}
*start = s;
@@ -914,6 +889,7 @@ int mvebu_mbus_add_window_remap_by_id(unsigned int target,
return mvebu_mbus_alloc_window(s, base, size, remap, target, attribute);
}
+EXPORT_SYMBOL_GPL(mvebu_mbus_add_window_remap_by_id);
int mvebu_mbus_add_window_by_id(unsigned int target, unsigned int attribute,
phys_addr_t base, size_t size)
@@ -921,6 +897,7 @@ int mvebu_mbus_add_window_by_id(unsigned int target, unsigned int attribute,
return mvebu_mbus_add_window_remap_by_id(target, attribute, base,
size, MVEBU_MBUS_NO_REMAP);
}
+EXPORT_SYMBOL_GPL(mvebu_mbus_add_window_by_id);
int mvebu_mbus_del_window(phys_addr_t base, size_t size)
{
@@ -933,6 +910,7 @@ int mvebu_mbus_del_window(phys_addr_t base, size_t size)
mvebu_mbus_disable_window(&mbus_state, win);
return 0;
}
+EXPORT_SYMBOL_GPL(mvebu_mbus_del_window);
void mvebu_mbus_get_pcie_mem_aperture(struct resource *res)
{
@@ -940,6 +918,7 @@ void mvebu_mbus_get_pcie_mem_aperture(struct resource *res)
return;
*res = mbus_state.pcie_mem_aperture;
}
+EXPORT_SYMBOL_GPL(mvebu_mbus_get_pcie_mem_aperture);
void mvebu_mbus_get_pcie_io_aperture(struct resource *res)
{
@@ -947,6 +926,7 @@ void mvebu_mbus_get_pcie_io_aperture(struct resource *res)
return;
*res = mbus_state.pcie_io_aperture;
}
+EXPORT_SYMBOL_GPL(mvebu_mbus_get_pcie_io_aperture);
int mvebu_mbus_get_dram_win_info(phys_addr_t phyaddr, u8 *target, u8 *attr)
{
@@ -1111,7 +1091,7 @@ static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus,
mbus->sdramwins_base = ioremap(sdramwins_phys_base, sdramwins_size);
if (!mbus->sdramwins_base) {
- iounmap(mbus_state.mbuswins_base);
+ iounmap(mbus->mbuswins_base);
return -ENOMEM;
}
diff --git a/drivers/bus/omap_l3_noc.c b/drivers/bus/omap_l3_noc.c
index b040447575ad..eb1ba6319fda 100644
--- a/drivers/bus/omap_l3_noc.c
+++ b/drivers/bus/omap_l3_noc.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* OMAP L3 Interconnect error handling driver
*
* Copyright (C) 2011-2015 Texas Instruments Incorporated - http://www.ti.com/
* Santosh Shilimkar <santosh.shilimkar@ti.com>
* Sricharan <r.sricharan@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -285,7 +277,7 @@ static int omap_l3_probe(struct platform_device *pdev)
*/
l3->debug_irq = platform_get_irq(pdev, 0);
ret = devm_request_irq(l3->dev, l3->debug_irq, l3_interrupt_handler,
- 0x0, "l3-dbg-irq", l3);
+ IRQF_NO_THREAD, "l3-dbg-irq", l3);
if (ret) {
dev_err(l3->dev, "request_irq failed for %d\n",
l3->debug_irq);
@@ -294,7 +286,7 @@ static int omap_l3_probe(struct platform_device *pdev)
l3->app_irq = platform_get_irq(pdev, 1);
ret = devm_request_irq(l3->dev, l3->app_irq, l3_interrupt_handler,
- 0x0, "l3-app-irq", l3);
+ IRQF_NO_THREAD, "l3-app-irq", l3);
if (ret)
dev_err(l3->dev, "request_irq failed for %d\n", l3->app_irq);
diff --git a/drivers/bus/omap_l3_noc.h b/drivers/bus/omap_l3_noc.h
index 73431f81da28..bb3eebd3465d 100644
--- a/drivers/bus/omap_l3_noc.h
+++ b/drivers/bus/omap_l3_noc.h
@@ -1,18 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* OMAP L3 Interconnect error handling driver header
*
* Copyright (C) 2011-2015 Texas Instruments Incorporated - http://www.ti.com/
* Santosh Shilimkar <santosh.shilimkar@ti.com>
* sricharan <r.sricharan@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __OMAP_L3_NOC_H
#define __OMAP_L3_NOC_H
diff --git a/drivers/bus/qcom-ebi2.c b/drivers/bus/qcom-ebi2.c
index 03ddcf426887..663c82749222 100644
--- a/drivers/bus/qcom-ebi2.c
+++ b/drivers/bus/qcom-ebi2.c
@@ -102,8 +102,8 @@
/**
* struct cs_data - struct with info on a chipselect setting
* @enable_mask: mask to enable the chipselect in the EBI2 config
- * @slow_cfg0: offset to XMEMC slow CS config
- * @fast_cfg1: offset to XMEMC fast CS config
+ * @slow_cfg: offset to XMEMC slow CS config
+ * @fast_cfg: offset to XMEMC fast CS config
*/
struct cs_data {
u32 enable_mask;
@@ -353,8 +353,10 @@ static int qcom_ebi2_probe(struct platform_device *pdev)
/* Figure out the chipselect */
ret = of_property_read_u32(child, "reg", &csindex);
- if (ret)
+ if (ret) {
+ of_node_put(child);
return ret;
+ }
if (csindex > 5) {
dev_err(dev,
diff --git a/drivers/bus/qcom-ssc-block-bus.c b/drivers/bus/qcom-ssc-block-bus.c
new file mode 100644
index 000000000000..eedeb29a5ff3
--- /dev/null
+++ b/drivers/bus/qcom-ssc-block-bus.c
@@ -0,0 +1,389 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (c) 2021, Michael Srba
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+/* AXI Halt Register Offsets */
+#define AXI_HALTREQ_REG 0x0
+#define AXI_HALTACK_REG 0x4
+#define AXI_IDLE_REG 0x8
+
+#define SSCAON_CONFIG0_CLAMP_EN_OVRD BIT(4)
+#define SSCAON_CONFIG0_CLAMP_EN_OVRD_VAL BIT(5)
+
+static const char *const qcom_ssc_block_pd_names[] = {
+ "ssc_cx",
+ "ssc_mx"
+};
+
+struct qcom_ssc_block_bus_data {
+ const char *const *pd_names;
+ struct device *pds[ARRAY_SIZE(qcom_ssc_block_pd_names)];
+ char __iomem *reg_mpm_sscaon_config0;
+ char __iomem *reg_mpm_sscaon_config1;
+ struct regmap *halt_map;
+ struct clk *xo_clk;
+ struct clk *aggre2_clk;
+ struct clk *gcc_im_sleep_clk;
+ struct clk *aggre2_north_clk;
+ struct clk *ssc_xo_clk;
+ struct clk *ssc_ahbs_clk;
+ struct reset_control *ssc_bcr;
+ struct reset_control *ssc_reset;
+ u32 ssc_axi_halt;
+ int num_pds;
+};
+
+static void reg32_set_bits(char __iomem *reg, u32 value)
+{
+ u32 tmp = ioread32(reg);
+
+ iowrite32(tmp | value, reg);
+}
+
+static void reg32_clear_bits(char __iomem *reg, u32 value)
+{
+ u32 tmp = ioread32(reg);
+
+ iowrite32(tmp & (~value), reg);
+}
+
+static int qcom_ssc_block_bus_init(struct device *dev)
+{
+ int ret;
+
+ struct qcom_ssc_block_bus_data *data = dev_get_drvdata(dev);
+
+ ret = clk_prepare_enable(data->xo_clk);
+ if (ret) {
+ dev_err(dev, "error enabling xo_clk: %d\n", ret);
+ goto err_xo_clk;
+ }
+
+ ret = clk_prepare_enable(data->aggre2_clk);
+ if (ret) {
+ dev_err(dev, "error enabling aggre2_clk: %d\n", ret);
+ goto err_aggre2_clk;
+ }
+
+ ret = clk_prepare_enable(data->gcc_im_sleep_clk);
+ if (ret) {
+ dev_err(dev, "error enabling gcc_im_sleep_clk: %d\n", ret);
+ goto err_gcc_im_sleep_clk;
+ }
+
+ /*
+ * We need to intervene here because the HW logic driving these signals cannot handle
+ * initialization after power collapse by itself.
+ */
+ reg32_clear_bits(data->reg_mpm_sscaon_config0,
+ SSCAON_CONFIG0_CLAMP_EN_OVRD | SSCAON_CONFIG0_CLAMP_EN_OVRD_VAL);
+ /* override few_ack/rest_ack */
+ reg32_clear_bits(data->reg_mpm_sscaon_config1, BIT(31));
+
+ ret = clk_prepare_enable(data->aggre2_north_clk);
+ if (ret) {
+ dev_err(dev, "error enabling aggre2_north_clk: %d\n", ret);
+ goto err_aggre2_north_clk;
+ }
+
+ ret = reset_control_deassert(data->ssc_reset);
+ if (ret) {
+ dev_err(dev, "error deasserting ssc_reset: %d\n", ret);
+ goto err_ssc_reset;
+ }
+
+ ret = reset_control_deassert(data->ssc_bcr);
+ if (ret) {
+ dev_err(dev, "error deasserting ssc_bcr: %d\n", ret);
+ goto err_ssc_bcr;
+ }
+
+ regmap_write(data->halt_map, data->ssc_axi_halt + AXI_HALTREQ_REG, 0);
+
+ ret = clk_prepare_enable(data->ssc_xo_clk);
+ if (ret) {
+ dev_err(dev, "error deasserting ssc_xo_clk: %d\n", ret);
+ goto err_ssc_xo_clk;
+ }
+
+ ret = clk_prepare_enable(data->ssc_ahbs_clk);
+ if (ret) {
+ dev_err(dev, "error deasserting ssc_ahbs_clk: %d\n", ret);
+ goto err_ssc_ahbs_clk;
+ }
+
+ return 0;
+
+err_ssc_ahbs_clk:
+ clk_disable(data->ssc_xo_clk);
+
+err_ssc_xo_clk:
+ regmap_write(data->halt_map, data->ssc_axi_halt + AXI_HALTREQ_REG, 1);
+
+ reset_control_assert(data->ssc_bcr);
+
+err_ssc_bcr:
+ reset_control_assert(data->ssc_reset);
+
+err_ssc_reset:
+ clk_disable(data->aggre2_north_clk);
+
+err_aggre2_north_clk:
+ reg32_set_bits(data->reg_mpm_sscaon_config0, BIT(4) | BIT(5));
+ reg32_set_bits(data->reg_mpm_sscaon_config1, BIT(31));
+
+ clk_disable(data->gcc_im_sleep_clk);
+
+err_gcc_im_sleep_clk:
+ clk_disable(data->aggre2_clk);
+
+err_aggre2_clk:
+ clk_disable(data->xo_clk);
+
+err_xo_clk:
+ return ret;
+}
+
+static void qcom_ssc_block_bus_deinit(struct device *dev)
+{
+ int ret;
+
+ struct qcom_ssc_block_bus_data *data = dev_get_drvdata(dev);
+
+ clk_disable(data->ssc_xo_clk);
+ clk_disable(data->ssc_ahbs_clk);
+
+ ret = reset_control_assert(data->ssc_bcr);
+ if (ret)
+ dev_err(dev, "error asserting ssc_bcr: %d\n", ret);
+
+ regmap_write(data->halt_map, data->ssc_axi_halt + AXI_HALTREQ_REG, 1);
+
+ reg32_set_bits(data->reg_mpm_sscaon_config1, BIT(31));
+ reg32_set_bits(data->reg_mpm_sscaon_config0, BIT(4) | BIT(5));
+
+ ret = reset_control_assert(data->ssc_reset);
+ if (ret)
+ dev_err(dev, "error asserting ssc_reset: %d\n", ret);
+
+ clk_disable(data->gcc_im_sleep_clk);
+
+ clk_disable(data->aggre2_north_clk);
+
+ clk_disable(data->aggre2_clk);
+ clk_disable(data->xo_clk);
+}
+
+static int qcom_ssc_block_bus_pds_attach(struct device *dev, struct device **pds,
+ const char *const *pd_names, size_t num_pds)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < num_pds; i++) {
+ pds[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]);
+ if (IS_ERR_OR_NULL(pds[i])) {
+ ret = PTR_ERR(pds[i]) ? : -ENODATA;
+ goto unroll_attach;
+ }
+ }
+
+ return num_pds;
+
+unroll_attach:
+ for (i--; i >= 0; i--)
+ dev_pm_domain_detach(pds[i], false);
+
+ return ret;
+};
+
+static void qcom_ssc_block_bus_pds_detach(struct device *dev, struct device **pds, size_t num_pds)
+{
+ int i;
+
+ for (i = 0; i < num_pds; i++)
+ dev_pm_domain_detach(pds[i], false);
+}
+
+static int qcom_ssc_block_bus_pds_enable(struct device **pds, size_t num_pds)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < num_pds; i++) {
+ dev_pm_genpd_set_performance_state(pds[i], INT_MAX);
+ ret = pm_runtime_get_sync(pds[i]);
+ if (ret < 0)
+ goto unroll_pd_votes;
+ }
+
+ return 0;
+
+unroll_pd_votes:
+ for (i--; i >= 0; i--) {
+ dev_pm_genpd_set_performance_state(pds[i], 0);
+ pm_runtime_put(pds[i]);
+ }
+
+ return ret;
+};
+
+static void qcom_ssc_block_bus_pds_disable(struct device **pds, size_t num_pds)
+{
+ int i;
+
+ for (i = 0; i < num_pds; i++) {
+ dev_pm_genpd_set_performance_state(pds[i], 0);
+ pm_runtime_put(pds[i]);
+ }
+}
+
+static int qcom_ssc_block_bus_probe(struct platform_device *pdev)
+{
+ struct qcom_ssc_block_bus_data *data;
+ struct device_node *np = pdev->dev.of_node;
+ struct of_phandle_args halt_args;
+ struct resource *res;
+ int ret;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, data);
+
+ data->pd_names = qcom_ssc_block_pd_names;
+ data->num_pds = ARRAY_SIZE(qcom_ssc_block_pd_names);
+
+ /* power domains */
+ ret = qcom_ssc_block_bus_pds_attach(&pdev->dev, data->pds, data->pd_names, data->num_pds);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "error when attaching power domains\n");
+
+ ret = qcom_ssc_block_bus_pds_enable(data->pds, data->num_pds);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "error when enabling power domains\n");
+
+ /* low level overrides for when the HW logic doesn't "just work" */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mpm_sscaon_config0");
+ data->reg_mpm_sscaon_config0 = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(data->reg_mpm_sscaon_config0))
+ return dev_err_probe(&pdev->dev, PTR_ERR(data->reg_mpm_sscaon_config0),
+ "Failed to ioremap mpm_sscaon_config0\n");
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mpm_sscaon_config1");
+ data->reg_mpm_sscaon_config1 = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(data->reg_mpm_sscaon_config1))
+ return dev_err_probe(&pdev->dev, PTR_ERR(data->reg_mpm_sscaon_config1),
+ "Failed to ioremap mpm_sscaon_config1\n");
+
+ /* resets */
+ data->ssc_bcr = devm_reset_control_get_exclusive(&pdev->dev, "ssc_bcr");
+ if (IS_ERR(data->ssc_bcr))
+ return dev_err_probe(&pdev->dev, PTR_ERR(data->ssc_bcr),
+ "Failed to acquire reset: scc_bcr\n");
+
+ data->ssc_reset = devm_reset_control_get_exclusive(&pdev->dev, "ssc_reset");
+ if (IS_ERR(data->ssc_reset))
+ return dev_err_probe(&pdev->dev, PTR_ERR(data->ssc_reset),
+ "Failed to acquire reset: ssc_reset:\n");
+
+ /* clocks */
+ data->xo_clk = devm_clk_get(&pdev->dev, "xo");
+ if (IS_ERR(data->xo_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(data->xo_clk),
+ "Failed to get clock: xo\n");
+
+ data->aggre2_clk = devm_clk_get(&pdev->dev, "aggre2");
+ if (IS_ERR(data->aggre2_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(data->aggre2_clk),
+ "Failed to get clock: aggre2\n");
+
+ data->gcc_im_sleep_clk = devm_clk_get(&pdev->dev, "gcc_im_sleep");
+ if (IS_ERR(data->gcc_im_sleep_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(data->gcc_im_sleep_clk),
+ "Failed to get clock: gcc_im_sleep\n");
+
+ data->aggre2_north_clk = devm_clk_get(&pdev->dev, "aggre2_north");
+ if (IS_ERR(data->aggre2_north_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(data->aggre2_north_clk),
+ "Failed to get clock: aggre2_north\n");
+
+ data->ssc_xo_clk = devm_clk_get(&pdev->dev, "ssc_xo");
+ if (IS_ERR(data->ssc_xo_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(data->ssc_xo_clk),
+ "Failed to get clock: ssc_xo\n");
+
+ data->ssc_ahbs_clk = devm_clk_get(&pdev->dev, "ssc_ahbs");
+ if (IS_ERR(data->ssc_ahbs_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(data->ssc_ahbs_clk),
+ "Failed to get clock: ssc_ahbs\n");
+
+ ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, "qcom,halt-regs", 1, 0,
+ &halt_args);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "Failed to parse qcom,halt-regs\n");
+
+ data->halt_map = syscon_node_to_regmap(halt_args.np);
+ of_node_put(halt_args.np);
+ if (IS_ERR(data->halt_map))
+ return PTR_ERR(data->halt_map);
+
+ data->ssc_axi_halt = halt_args.args[0];
+
+ qcom_ssc_block_bus_init(&pdev->dev);
+
+ of_platform_populate(np, NULL, NULL, &pdev->dev);
+
+ return 0;
+}
+
+static int qcom_ssc_block_bus_remove(struct platform_device *pdev)
+{
+ struct qcom_ssc_block_bus_data *data = platform_get_drvdata(pdev);
+
+ qcom_ssc_block_bus_deinit(&pdev->dev);
+
+ iounmap(data->reg_mpm_sscaon_config0);
+ iounmap(data->reg_mpm_sscaon_config1);
+
+ qcom_ssc_block_bus_pds_disable(data->pds, data->num_pds);
+ qcom_ssc_block_bus_pds_detach(&pdev->dev, data->pds, data->num_pds);
+ pm_runtime_disable(&pdev->dev);
+ pm_clk_destroy(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id qcom_ssc_block_bus_of_match[] = {
+ { .compatible = "qcom,ssc-block-bus", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, qcom_ssc_block_bus_of_match);
+
+static struct platform_driver qcom_ssc_block_bus_driver = {
+ .probe = qcom_ssc_block_bus_probe,
+ .remove = qcom_ssc_block_bus_remove,
+ .driver = {
+ .name = "qcom-ssc-block-bus",
+ .of_match_table = qcom_ssc_block_bus_of_match,
+ },
+};
+
+module_platform_driver(qcom_ssc_block_bus_driver);
+
+MODULE_DESCRIPTION("A driver for handling the init sequence needed for accessing the SSC block on (some) qcom SoCs over AHB");
+MODULE_AUTHOR("Michael Srba <Michael.Srba@seznam.cz>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/simple-pm-bus.c b/drivers/bus/simple-pm-bus.c
index c5eb46cbf388..6b8d6257ed8a 100644
--- a/drivers/bus/simple-pm-bus.c
+++ b/drivers/bus/simple-pm-bus.c
@@ -13,31 +13,68 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-
static int simple_pm_bus_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
+ const struct device *dev = &pdev->dev;
+ const struct of_dev_auxdata *lookup = dev_get_platdata(dev);
+ struct device_node *np = dev->of_node;
+ const struct of_device_id *match;
+
+ /*
+ * Allow user to use driver_override to bind this driver to a
+ * transparent bus device which has a different compatible string
+ * that's not listed in simple_pm_bus_of_match. We don't want to do any
+ * of the simple-pm-bus tasks for these devices, so return early.
+ */
+ if (pdev->driver_override)
+ return 0;
+
+ match = of_match_device(dev->driver->of_match_table, dev);
+ /*
+ * These are transparent bus devices (not simple-pm-bus matches) that
+ * have their child nodes populated automatically. So, don't need to
+ * do anything more. We only match with the device if this driver is
+ * the most specific match because we don't want to incorrectly bind to
+ * a device that has a more specific driver.
+ */
+ if (match && match->data) {
+ if (of_property_match_string(np, "compatible", match->compatible) == 0)
+ return 0;
+ else
+ return -ENODEV;
+ }
dev_dbg(&pdev->dev, "%s\n", __func__);
pm_runtime_enable(&pdev->dev);
if (np)
- of_platform_populate(np, NULL, NULL, &pdev->dev);
+ of_platform_populate(np, NULL, lookup, &pdev->dev);
return 0;
}
static int simple_pm_bus_remove(struct platform_device *pdev)
{
+ const void *data = of_device_get_match_data(&pdev->dev);
+
+ if (pdev->driver_override || data)
+ return 0;
+
dev_dbg(&pdev->dev, "%s\n", __func__);
pm_runtime_disable(&pdev->dev);
return 0;
}
+#define ONLY_BUS ((void *) 1) /* Match if the device is only a bus. */
+
static const struct of_device_id simple_pm_bus_of_match[] = {
{ .compatible = "simple-pm-bus", },
+ { .compatible = "simple-bus", .data = ONLY_BUS },
+ { .compatible = "simple-mfd", .data = ONLY_BUS },
+ { .compatible = "isa", .data = ONLY_BUS },
+ { .compatible = "arm,amba-bus", .data = ONLY_BUS },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, simple_pm_bus_of_match);
diff --git a/drivers/bus/sun50i-de2.c b/drivers/bus/sun50i-de2.c
index 672518741f86..414f29cdedf0 100644
--- a/drivers/bus/sun50i-de2.c
+++ b/drivers/bus/sun50i-de2.c
@@ -15,10 +15,9 @@ static int sun50i_de2_bus_probe(struct platform_device *pdev)
int ret;
ret = sunxi_sram_claim(&pdev->dev);
- if (ret) {
- dev_err(&pdev->dev, "Error couldn't map SRAM to device\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Couldn't map SRAM to device\n");
of_platform_populate(np, NULL, NULL, &pdev->dev);
diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
index be79d6c6a4e4..4cd2e127946e 100644
--- a/drivers/bus/sunxi-rsb.c
+++ b/drivers/bus/sunxi-rsb.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* RSB (Reduced Serial Bus) driver.
*
* Author: Chen-Yu Tsai <wens@csie.org>
*
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- *
* The RSB controller looks like an SMBus controller which only supports
* byte and word data transfers. But, it differs from standard SMBus
* protocol on several aspects:
@@ -31,7 +28,6 @@
* This document is officially released by Allwinner.
*
* This driver is based on i2c-sun6i-p2wi.c, the P2WI bus driver.
- *
*/
#include <linux/clk.h>
@@ -45,6 +41,8 @@
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/slab.h>
@@ -126,6 +124,7 @@ struct sunxi_rsb {
struct completion complete;
struct mutex lock;
unsigned int status;
+ u32 clk_freq;
};
/* bus / slave device related functions */
@@ -166,11 +165,11 @@ static int sunxi_rsb_device_probe(struct device *dev)
return drv->probe(rdev);
}
-static int sunxi_rsb_device_remove(struct device *dev)
+static void sunxi_rsb_device_remove(struct device *dev)
{
const struct sunxi_rsb_driver *drv = to_sunxi_rsb_driver(dev->driver);
- return drv->remove(to_sunxi_rsb_device(dev));
+ drv->remove(to_sunxi_rsb_device(dev));
}
static struct bus_type sunxi_rsb_bus = {
@@ -224,6 +223,8 @@ static struct sunxi_rsb_device *sunxi_rsb_device_create(struct sunxi_rsb *rsb,
dev_dbg(&rdev->dev, "device %s registered\n", dev_name(&rdev->dev));
+ return rdev;
+
err_device_add:
put_device(&rdev->dev);
@@ -335,6 +336,10 @@ static int sunxi_rsb_read(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr,
return -EINVAL;
}
+ ret = pm_runtime_resume_and_get(rsb->dev);
+ if (ret)
+ return ret;
+
mutex_lock(&rsb->lock);
writel(addr, rsb->regs + RSB_ADDR);
@@ -345,11 +350,14 @@ static int sunxi_rsb_read(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr,
if (ret)
goto unlock;
- *buf = readl(rsb->regs + RSB_DATA);
+ *buf = readl(rsb->regs + RSB_DATA) & GENMASK(len * 8 - 1, 0);
unlock:
mutex_unlock(&rsb->lock);
+ pm_runtime_mark_last_busy(rsb->dev);
+ pm_runtime_put_autosuspend(rsb->dev);
+
return ret;
}
@@ -377,6 +385,10 @@ static int sunxi_rsb_write(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr,
return -EINVAL;
}
+ ret = pm_runtime_resume_and_get(rsb->dev);
+ if (ret)
+ return ret;
+
mutex_lock(&rsb->lock);
writel(addr, rsb->regs + RSB_ADDR);
@@ -387,6 +399,9 @@ static int sunxi_rsb_write(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr,
mutex_unlock(&rsb->lock);
+ pm_runtime_mark_last_busy(rsb->dev);
+ pm_runtime_put_autosuspend(rsb->dev);
+
return ret;
}
@@ -614,11 +629,100 @@ static int of_rsb_register_devices(struct sunxi_rsb *rsb)
return 0;
}
-static const struct of_device_id sunxi_rsb_of_match_table[] = {
- { .compatible = "allwinner,sun8i-a23-rsb" },
- {}
-};
-MODULE_DEVICE_TABLE(of, sunxi_rsb_of_match_table);
+static int sunxi_rsb_hw_init(struct sunxi_rsb *rsb)
+{
+ struct device *dev = rsb->dev;
+ unsigned long p_clk_freq;
+ u32 clk_delay, reg;
+ int clk_div, ret;
+
+ ret = clk_prepare_enable(rsb->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable clk: %d\n", ret);
+ return ret;
+ }
+
+ ret = reset_control_deassert(rsb->rstc);
+ if (ret) {
+ dev_err(dev, "failed to deassert reset line: %d\n", ret);
+ goto err_clk_disable;
+ }
+
+ /* reset the controller */
+ writel(RSB_CTRL_SOFT_RST, rsb->regs + RSB_CTRL);
+ readl_poll_timeout(rsb->regs + RSB_CTRL, reg,
+ !(reg & RSB_CTRL_SOFT_RST), 1000, 100000);
+
+ /*
+ * Clock frequency and delay calculation code is from
+ * Allwinner U-boot sources.
+ *
+ * From A83 user manual:
+ * bus clock frequency = parent clock frequency / (2 * (divider + 1))
+ */
+ p_clk_freq = clk_get_rate(rsb->clk);
+ clk_div = p_clk_freq / rsb->clk_freq / 2;
+ if (!clk_div)
+ clk_div = 1;
+ else if (clk_div > RSB_CCR_MAX_CLK_DIV + 1)
+ clk_div = RSB_CCR_MAX_CLK_DIV + 1;
+
+ clk_delay = clk_div >> 1;
+ if (!clk_delay)
+ clk_delay = 1;
+
+ dev_info(dev, "RSB running at %lu Hz\n", p_clk_freq / clk_div / 2);
+ writel(RSB_CCR_SDA_OUT_DELAY(clk_delay) | RSB_CCR_CLK_DIV(clk_div - 1),
+ rsb->regs + RSB_CCR);
+
+ return 0;
+
+err_clk_disable:
+ clk_disable_unprepare(rsb->clk);
+
+ return ret;
+}
+
+static void sunxi_rsb_hw_exit(struct sunxi_rsb *rsb)
+{
+ reset_control_assert(rsb->rstc);
+
+ /* Keep the clock and PM reference counts consistent. */
+ if (!pm_runtime_status_suspended(rsb->dev))
+ clk_disable_unprepare(rsb->clk);
+}
+
+static int __maybe_unused sunxi_rsb_runtime_suspend(struct device *dev)
+{
+ struct sunxi_rsb *rsb = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(rsb->clk);
+
+ return 0;
+}
+
+static int __maybe_unused sunxi_rsb_runtime_resume(struct device *dev)
+{
+ struct sunxi_rsb *rsb = dev_get_drvdata(dev);
+
+ return clk_prepare_enable(rsb->clk);
+}
+
+static int __maybe_unused sunxi_rsb_suspend(struct device *dev)
+{
+ struct sunxi_rsb *rsb = dev_get_drvdata(dev);
+
+ sunxi_rsb_hw_exit(rsb);
+
+ return 0;
+}
+
+static int __maybe_unused sunxi_rsb_resume(struct device *dev)
+{
+ struct sunxi_rsb *rsb = dev_get_drvdata(dev);
+
+ return sunxi_rsb_hw_init(rsb);
+}
static int sunxi_rsb_probe(struct platform_device *pdev)
{
@@ -626,10 +730,8 @@ static int sunxi_rsb_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
struct resource *r;
struct sunxi_rsb *rsb;
- unsigned long p_clk_freq;
- u32 clk_delay, clk_freq = 3000000;
- int clk_div, irq, ret;
- u32 reg;
+ u32 clk_freq = 3000000;
+ int irq, ret;
of_property_read_u32(np, "clock-frequency", &clk_freq);
if (clk_freq > RSB_MAX_FREQ) {
@@ -644,6 +746,7 @@ static int sunxi_rsb_probe(struct platform_device *pdev)
return -ENOMEM;
rsb->dev = dev;
+ rsb->clk_freq = clk_freq;
platform_set_drvdata(pdev, rsb);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
rsb->regs = devm_ioremap_resource(dev, r);
@@ -661,79 +764,41 @@ static int sunxi_rsb_probe(struct platform_device *pdev)
return ret;
}
- ret = clk_prepare_enable(rsb->clk);
- if (ret) {
- dev_err(dev, "failed to enable clk: %d\n", ret);
- return ret;
- }
-
- p_clk_freq = clk_get_rate(rsb->clk);
-
rsb->rstc = devm_reset_control_get(dev, NULL);
if (IS_ERR(rsb->rstc)) {
ret = PTR_ERR(rsb->rstc);
dev_err(dev, "failed to retrieve reset controller: %d\n", ret);
- goto err_clk_disable;
- }
-
- ret = reset_control_deassert(rsb->rstc);
- if (ret) {
- dev_err(dev, "failed to deassert reset line: %d\n", ret);
- goto err_clk_disable;
+ return ret;
}
init_completion(&rsb->complete);
mutex_init(&rsb->lock);
- /* reset the controller */
- writel(RSB_CTRL_SOFT_RST, rsb->regs + RSB_CTRL);
- readl_poll_timeout(rsb->regs + RSB_CTRL, reg,
- !(reg & RSB_CTRL_SOFT_RST), 1000, 100000);
-
- /*
- * Clock frequency and delay calculation code is from
- * Allwinner U-boot sources.
- *
- * From A83 user manual:
- * bus clock frequency = parent clock frequency / (2 * (divider + 1))
- */
- clk_div = p_clk_freq / clk_freq / 2;
- if (!clk_div)
- clk_div = 1;
- else if (clk_div > RSB_CCR_MAX_CLK_DIV + 1)
- clk_div = RSB_CCR_MAX_CLK_DIV + 1;
-
- clk_delay = clk_div >> 1;
- if (!clk_delay)
- clk_delay = 1;
-
- dev_info(dev, "RSB running at %lu Hz\n", p_clk_freq / clk_div / 2);
- writel(RSB_CCR_SDA_OUT_DELAY(clk_delay) | RSB_CCR_CLK_DIV(clk_div - 1),
- rsb->regs + RSB_CCR);
-
ret = devm_request_irq(dev, irq, sunxi_rsb_irq, 0, RSB_CTRL_NAME, rsb);
if (ret) {
dev_err(dev, "can't register interrupt handler irq %d: %d\n",
irq, ret);
- goto err_reset_assert;
+ return ret;
}
+ ret = sunxi_rsb_hw_init(rsb);
+ if (ret)
+ return ret;
+
/* initialize all devices on the bus into RSB mode */
ret = sunxi_rsb_init_device_mode(rsb);
if (ret)
dev_warn(dev, "Initialize device mode failed: %d\n", ret);
+ pm_suspend_ignore_children(dev, true);
+ pm_runtime_set_active(dev);
+ pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_enable(dev);
+
of_rsb_register_devices(rsb);
return 0;
-
-err_reset_assert:
- reset_control_assert(rsb->rstc);
-
-err_clk_disable:
- clk_disable_unprepare(rsb->clk);
-
- return ret;
}
static int sunxi_rsb_remove(struct platform_device *pdev)
@@ -741,18 +806,40 @@ static int sunxi_rsb_remove(struct platform_device *pdev)
struct sunxi_rsb *rsb = platform_get_drvdata(pdev);
device_for_each_child(rsb->dev, NULL, sunxi_rsb_remove_devices);
- reset_control_assert(rsb->rstc);
- clk_disable_unprepare(rsb->clk);
+ pm_runtime_disable(&pdev->dev);
+ sunxi_rsb_hw_exit(rsb);
return 0;
}
+static void sunxi_rsb_shutdown(struct platform_device *pdev)
+{
+ struct sunxi_rsb *rsb = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(&pdev->dev);
+ sunxi_rsb_hw_exit(rsb);
+}
+
+static const struct dev_pm_ops sunxi_rsb_dev_pm_ops = {
+ SET_RUNTIME_PM_OPS(sunxi_rsb_runtime_suspend,
+ sunxi_rsb_runtime_resume, NULL)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(sunxi_rsb_suspend, sunxi_rsb_resume)
+};
+
+static const struct of_device_id sunxi_rsb_of_match_table[] = {
+ { .compatible = "allwinner,sun8i-a23-rsb" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sunxi_rsb_of_match_table);
+
static struct platform_driver sunxi_rsb_driver = {
.probe = sunxi_rsb_probe,
.remove = sunxi_rsb_remove,
+ .shutdown = sunxi_rsb_shutdown,
.driver = {
.name = RSB_CTRL_NAME,
.of_match_table = sunxi_rsb_of_match_table,
+ .pm = &sunxi_rsb_dev_pm_ops,
},
};
diff --git a/drivers/bus/tegra-gmi.c b/drivers/bus/tegra-gmi.c
index a6570789f7af..662266719682 100644
--- a/drivers/bus/tegra-gmi.c
+++ b/drivers/bus/tegra-gmi.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Driver for NVIDIA Generic Memory Interface
*
* Copyright (C) 2016 Host Mobility AB. All rights reserved.
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/clk.h>
@@ -13,8 +10,11 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
#include <linux/reset.h>
+#include <soc/tegra/common.h>
+
#define TEGRA_GMI_CONFIG 0x00
#define TEGRA_GMI_CONFIG_GO BIT(31)
#define TEGRA_GMI_BUS_WIDTH_32BIT BIT(30)
@@ -54,9 +54,10 @@ static int tegra_gmi_enable(struct tegra_gmi *gmi)
{
int err;
- err = clk_prepare_enable(gmi->clk);
- if (err < 0) {
- dev_err(gmi->dev, "failed to enable clock: %d\n", err);
+ pm_runtime_enable(gmi->dev);
+ err = pm_runtime_resume_and_get(gmi->dev);
+ if (err) {
+ pm_runtime_disable(gmi->dev);
return err;
}
@@ -83,7 +84,9 @@ static void tegra_gmi_disable(struct tegra_gmi *gmi)
writel(config, gmi->base + TEGRA_GMI_CONFIG);
reset_control_assert(gmi->rst);
- clk_disable_unprepare(gmi->clk);
+
+ pm_runtime_put_sync_suspend(gmi->dev);
+ pm_runtime_force_suspend(gmi->dev);
}
static int tegra_gmi_parse_dt(struct tegra_gmi *gmi)
@@ -213,6 +216,7 @@ static int tegra_gmi_probe(struct platform_device *pdev)
if (!gmi)
return -ENOMEM;
+ platform_set_drvdata(pdev, gmi);
gmi->dev = dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -232,6 +236,10 @@ static int tegra_gmi_probe(struct platform_device *pdev)
return PTR_ERR(gmi->rst);
}
+ err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
+ if (err)
+ return err;
+
err = tegra_gmi_parse_dt(gmi);
if (err)
return err;
@@ -247,8 +255,6 @@ static int tegra_gmi_probe(struct platform_device *pdev)
return err;
}
- platform_set_drvdata(pdev, gmi);
-
return 0;
}
@@ -262,6 +268,34 @@ static int tegra_gmi_remove(struct platform_device *pdev)
return 0;
}
+static int __maybe_unused tegra_gmi_runtime_resume(struct device *dev)
+{
+ struct tegra_gmi *gmi = dev_get_drvdata(dev);
+ int err;
+
+ err = clk_prepare_enable(gmi->clk);
+ if (err < 0) {
+ dev_err(gmi->dev, "failed to enable clock: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused tegra_gmi_runtime_suspend(struct device *dev)
+{
+ struct tegra_gmi *gmi = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(gmi->clk);
+
+ return 0;
+}
+
+static const struct dev_pm_ops tegra_gmi_pm = {
+ SET_RUNTIME_PM_OPS(tegra_gmi_runtime_suspend, tegra_gmi_runtime_resume,
+ NULL)
+};
+
static const struct of_device_id tegra_gmi_id_table[] = {
{ .compatible = "nvidia,tegra20-gmi", },
{ .compatible = "nvidia,tegra30-gmi", },
@@ -275,6 +309,7 @@ static struct platform_driver tegra_gmi_driver = {
.driver = {
.name = "tegra-gmi",
.of_match_table = tegra_gmi_id_table,
+ .pm = &tegra_gmi_pm,
},
};
module_platform_driver(tegra_gmi_driver);
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index 6113fc0a52ae..9a7d12332fad 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -6,7 +6,9 @@
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
+#include <linux/cpu_pm.h>
#include <linux/delay.h>
+#include <linux/list.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
@@ -15,15 +17,55 @@
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
+#include <linux/sys_soc.h>
+#include <linux/timekeeping.h>
#include <linux/iopoll.h>
#include <linux/platform_data/ti-sysc.h>
#include <dt-bindings/bus/ti-sysc.h>
+#define DIS_ISP BIT(2)
+#define DIS_IVA BIT(1)
+#define DIS_SGX BIT(0)
+
+#define SOC_FLAG(match, flag) { .machine = match, .data = (void *)(flag), }
+
#define MAX_MODULE_SOFTRESET_WAIT 10000
-static const char * const reg_names[] = { "rev", "sysc", "syss", };
+enum sysc_soc {
+ SOC_UNKNOWN,
+ SOC_2420,
+ SOC_2430,
+ SOC_3430,
+ SOC_3630,
+ SOC_4430,
+ SOC_4460,
+ SOC_4470,
+ SOC_5430,
+ SOC_AM3,
+ SOC_AM4,
+ SOC_DRA7,
+};
+
+struct sysc_address {
+ unsigned long base;
+ struct list_head node;
+};
+
+struct sysc_module {
+ struct sysc *ddata;
+ struct list_head node;
+};
+
+struct sysc_soc_info {
+ unsigned long general_purpose:1;
+ enum sysc_soc soc;
+ struct mutex list_lock; /* disabled and restored modules list lock */
+ struct list_head disabled_modules;
+ struct list_head restored_modules;
+ struct notifier_block nb;
+};
enum sysc_clocks {
SYSC_FCK,
@@ -39,6 +81,8 @@ enum sysc_clocks {
SYSC_MAX_CLOCKS,
};
+static struct sysc_soc_info *sysc_soc;
+static const char * const reg_names[] = { "rev", "sysc", "syss", };
static const char * const clock_names[SYSC_MAX_CLOCKS] = {
"fck", "ick", "opt0", "opt1", "opt2", "opt3", "opt4",
"opt5", "opt6", "opt7",
@@ -65,16 +109,19 @@ static const char * const clock_names[SYSC_MAX_CLOCKS] = {
* @cookie: data used by legacy platform callbacks
* @name: name if available
* @revision: interconnect target module revision
+ * @reserved: target module is reserved and already in use
* @enabled: sysc runtime enabled status
* @needs_resume: runtime resume needed on resume from suspend
* @child_needs_resume: runtime resume needed for child on resume from suspend
* @disable_on_idle: status flag used for disabling modules with resets
* @idle_work: work structure used to perform delayed idle on a module
- * @clk_enable_quirk: module specific clock enable quirk
- * @clk_disable_quirk: module specific clock disable quirk
+ * @pre_reset_quirk: module specific pre-reset quirk
+ * @post_reset_quirk: module specific post-reset quirk
* @reset_done_quirk: module specific reset done quirk
* @module_enable_quirk: module specific enable quirk
* @module_disable_quirk: module specific disable quirk
+ * @module_unlock_quirk: module specific sysconfig unlock quirk
+ * @module_lock_quirk: module specific sysconfig lock quirk
*/
struct sysc {
struct device *dev;
@@ -93,19 +140,24 @@ struct sysc {
struct ti_sysc_cookie cookie;
const char *name;
u32 revision;
+ u32 sysconfig;
+ unsigned int reserved:1;
unsigned int enabled:1;
unsigned int needs_resume:1;
unsigned int child_needs_resume:1;
struct delayed_work idle_work;
- void (*clk_enable_quirk)(struct sysc *sysc);
- void (*clk_disable_quirk)(struct sysc *sysc);
+ void (*pre_reset_quirk)(struct sysc *sysc);
+ void (*post_reset_quirk)(struct sysc *sysc);
void (*reset_done_quirk)(struct sysc *sysc);
void (*module_enable_quirk)(struct sysc *sysc);
void (*module_disable_quirk)(struct sysc *sysc);
+ void (*module_unlock_quirk)(struct sysc *sysc);
+ void (*module_lock_quirk)(struct sysc *sysc);
};
static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np,
bool is_child);
+static int sysc_reset(struct sysc *ddata);
static void sysc_write(struct sysc *ddata, int offset, u32 value)
{
@@ -182,6 +234,77 @@ static u32 sysc_read_sysstatus(struct sysc *ddata)
return sysc_read(ddata, offset);
}
+static int sysc_poll_reset_sysstatus(struct sysc *ddata)
+{
+ int error, retries;
+ u32 syss_done, rstval;
+
+ if (ddata->cfg.quirks & SYSS_QUIRK_RESETDONE_INVERTED)
+ syss_done = 0;
+ else
+ syss_done = ddata->cfg.syss_mask;
+
+ if (likely(!timekeeping_suspended)) {
+ error = readx_poll_timeout_atomic(sysc_read_sysstatus, ddata,
+ rstval, (rstval & ddata->cfg.syss_mask) ==
+ syss_done, 100, MAX_MODULE_SOFTRESET_WAIT);
+ } else {
+ retries = MAX_MODULE_SOFTRESET_WAIT;
+ while (retries--) {
+ rstval = sysc_read_sysstatus(ddata);
+ if ((rstval & ddata->cfg.syss_mask) == syss_done)
+ return 0;
+ udelay(2); /* Account for udelay flakeyness */
+ }
+ error = -ETIMEDOUT;
+ }
+
+ return error;
+}
+
+static int sysc_poll_reset_sysconfig(struct sysc *ddata)
+{
+ int error, retries;
+ u32 sysc_mask, rstval;
+
+ sysc_mask = BIT(ddata->cap->regbits->srst_shift);
+
+ if (likely(!timekeeping_suspended)) {
+ error = readx_poll_timeout_atomic(sysc_read_sysconfig, ddata,
+ rstval, !(rstval & sysc_mask),
+ 100, MAX_MODULE_SOFTRESET_WAIT);
+ } else {
+ retries = MAX_MODULE_SOFTRESET_WAIT;
+ while (retries--) {
+ rstval = sysc_read_sysconfig(ddata);
+ if (!(rstval & sysc_mask))
+ return 0;
+ udelay(2); /* Account for udelay flakeyness */
+ }
+ error = -ETIMEDOUT;
+ }
+
+ return error;
+}
+
+/* Poll on reset status */
+static int sysc_wait_softreset(struct sysc *ddata)
+{
+ int syss_offset, error = 0;
+
+ if (ddata->cap->regbits->srst_shift < 0)
+ return 0;
+
+ syss_offset = ddata->offsets[SYSC_SYSSTATUS];
+
+ if (syss_offset >= 0)
+ error = sysc_poll_reset_sysstatus(ddata);
+ else if (ddata->cfg.quirks & SYSC_QUIRK_RESET_STATUS)
+ error = sysc_poll_reset_sysconfig(ddata);
+
+ return error;
+}
+
static int sysc_add_named_clock_from_child(struct sysc *ddata,
const char *name,
const char *optfck_name)
@@ -218,7 +341,7 @@ static int sysc_add_named_clock_from_child(struct sysc *ddata,
* limit for clk_get(). If cl ever needs to be freed, it should be done
* with clkdev_drop().
*/
- cl = kcalloc(1, sizeof(*cl), GFP_KERNEL);
+ cl = kzalloc(sizeof(*cl), GFP_KERNEL);
if (!cl)
return -ENOMEM;
@@ -565,6 +688,51 @@ static int sysc_parse_and_check_child_range(struct sysc *ddata)
return 0;
}
+/* Interconnect instances to probe before l4_per instances */
+static struct resource early_bus_ranges[] = {
+ /* am3/4 l4_wkup */
+ { .start = 0x44c00000, .end = 0x44c00000 + 0x300000, },
+ /* omap4/5 and dra7 l4_cfg */
+ { .start = 0x4a000000, .end = 0x4a000000 + 0x300000, },
+ /* omap4 l4_wkup */
+ { .start = 0x4a300000, .end = 0x4a300000 + 0x30000, },
+ /* omap5 and dra7 l4_wkup without dra7 dcan segment */
+ { .start = 0x4ae00000, .end = 0x4ae00000 + 0x30000, },
+};
+
+static atomic_t sysc_defer = ATOMIC_INIT(10);
+
+/**
+ * sysc_defer_non_critical - defer non_critical interconnect probing
+ * @ddata: device driver data
+ *
+ * We want to probe l4_cfg and l4_wkup interconnect instances before any
+ * l4_per instances as l4_per instances depend on resources on l4_cfg and
+ * l4_wkup interconnects.
+ */
+static int sysc_defer_non_critical(struct sysc *ddata)
+{
+ struct resource *res;
+ int i;
+
+ if (!atomic_read(&sysc_defer))
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(early_bus_ranges); i++) {
+ res = &early_bus_ranges[i];
+ if (ddata->module_pa >= res->start &&
+ ddata->module_pa <= res->end) {
+ atomic_set(&sysc_defer, 0);
+
+ return 0;
+ }
+ }
+
+ atomic_dec_if_positive(&sysc_defer);
+
+ return -EPROBE_DEFER;
+}
+
static struct device_node *stdout_path;
static void sysc_init_stdout_path(struct sysc *ddata)
@@ -624,7 +792,7 @@ static void sysc_check_one_child(struct sysc *ddata,
const char *name;
name = of_get_property(np, "ti,hwmods", NULL);
- if (name)
+ if (name && !of_device_is_compatible(np, "ti,sysc"))
dev_warn(ddata->dev, "really a child ti,hwmods property?");
sysc_check_quirk_stdout(ddata, np);
@@ -738,7 +906,7 @@ static int sysc_check_registers(struct sysc *ddata)
}
/**
- * syc_ioremap - ioremap register space for the interconnect target module
+ * sysc_ioremap - ioremap register space for the interconnect target module
* @ddata: device driver data
*
* Note that the interconnect target module registers can be anywhere
@@ -783,14 +951,22 @@ static int sysc_ioremap(struct sysc *ddata)
*/
static int sysc_map_and_check_registers(struct sysc *ddata)
{
+ struct device_node *np = ddata->dev->of_node;
int error;
error = sysc_parse_and_check_child_range(ddata);
if (error)
return error;
+ error = sysc_defer_non_critical(ddata);
+ if (error)
+ return error;
+
sysc_check_children(ddata);
+ if (!of_get_property(np, "reg", NULL))
+ return 0;
+
error = sysc_parse_registers(ddata);
if (error)
return error;
@@ -861,6 +1037,22 @@ static void sysc_show_registers(struct sysc *ddata)
buf);
}
+/**
+ * sysc_write_sysconfig - handle sysconfig quirks for register write
+ * @ddata: device driver data
+ * @value: register value
+ */
+static void sysc_write_sysconfig(struct sysc *ddata, u32 value)
+{
+ if (ddata->module_unlock_quirk)
+ ddata->module_unlock_quirk(ddata);
+
+ sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], value);
+
+ if (ddata->module_lock_quirk)
+ ddata->module_lock_quirk(ddata);
+}
+
#define SYSC_IDLE_MASK (SYSC_NR_IDLEMODES - 1)
#define SYSC_CLOCACT_ICK 2
@@ -870,18 +1062,53 @@ static int sysc_enable_module(struct device *dev)
struct sysc *ddata;
const struct sysc_regbits *regbits;
u32 reg, idlemodes, best_mode;
+ int error;
ddata = dev_get_drvdata(dev);
+
+ /*
+ * Some modules like DSS reset automatically on idle. Enable optional
+ * reset clocks and wait for OCP softreset to complete.
+ */
+ if (ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_IN_RESET) {
+ error = sysc_enable_opt_clocks(ddata);
+ if (error) {
+ dev_err(ddata->dev,
+ "Optional clocks failed for enable: %i\n",
+ error);
+ return error;
+ }
+ }
+ /*
+ * Some modules like i2c and hdq1w have unusable reset status unless
+ * the module reset quirk is enabled. Skip status check on enable.
+ */
+ if (!(ddata->cfg.quirks & SYSC_MODULE_QUIRK_ENA_RESETDONE)) {
+ error = sysc_wait_softreset(ddata);
+ if (error)
+ dev_warn(ddata->dev, "OCP softreset timed out\n");
+ }
+ if (ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_IN_RESET)
+ sysc_disable_opt_clocks(ddata);
+
+ /*
+ * Some subsystem private interconnects, like DSS top level module,
+ * need only the automatic OCP softreset handling with no sysconfig
+ * register bits to configure.
+ */
if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV)
return 0;
regbits = ddata->cap->regbits;
reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
- /* Set CLOCKACTIVITY, we only use it for ick */
+ /*
+ * Set CLOCKACTIVITY, we only use it for ick. And we only configure it
+ * based on the SYSC_QUIRK_USE_CLOCKACT flag, not based on the hardware
+ * capabilities. See the old HWMOD_SET_DEFAULT_CLOCKACT flag.
+ */
if (regbits->clkact_shift >= 0 &&
- (ddata->cfg.quirks & SYSC_QUIRK_USE_CLOCKACT ||
- ddata->cfg.sysc_val & BIT(regbits->clkact_shift)))
+ (ddata->cfg.quirks & SYSC_QUIRK_USE_CLOCKACT))
reg |= SYSC_CLOCACT_ICK << regbits->clkact_shift;
/* Set SIDLE mode */
@@ -907,7 +1134,7 @@ static int sysc_enable_module(struct device *dev)
reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift);
reg |= best_mode << regbits->sidle_shift;
- sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
+ sysc_write_sysconfig(ddata, reg);
set_midle:
/* Set MIDLE mode */
@@ -918,7 +1145,8 @@ set_midle:
best_mode = fls(ddata->cfg.midlemodes) - 1;
if (best_mode > SYSC_IDLE_MASK) {
dev_err(dev, "%s: invalid midlemode\n", __func__);
- return -EINVAL;
+ error = -EINVAL;
+ goto save_context;
}
if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_MSTANDBY)
@@ -926,20 +1154,26 @@ set_midle:
reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift);
reg |= best_mode << regbits->midle_shift;
- sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
+ sysc_write_sysconfig(ddata, reg);
set_autoidle:
/* Autoidle bit must enabled separately if available */
if (regbits->autoidle_shift >= 0 &&
ddata->cfg.sysc_val & BIT(regbits->autoidle_shift)) {
reg |= 1 << regbits->autoidle_shift;
- sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
+ sysc_write_sysconfig(ddata, reg);
}
+ error = 0;
+
+save_context:
+ /* Save context and flush posted write */
+ ddata->sysconfig = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+
if (ddata->module_enable_quirk)
ddata->module_enable_quirk(ddata);
- return 0;
+ return error;
}
static int sysc_best_idle_mode(u32 idlemodes, u32 *best_mode)
@@ -991,13 +1225,15 @@ static int sysc_disable_module(struct device *dev)
reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift);
reg |= best_mode << regbits->midle_shift;
- sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
+ sysc_write_sysconfig(ddata, reg);
set_sidle:
/* Set SIDLE mode */
idlemodes = ddata->cfg.sidlemodes;
- if (!idlemodes || regbits->sidle_shift < 0)
- return 0;
+ if (!idlemodes || regbits->sidle_shift < 0) {
+ ret = 0;
+ goto save_context;
+ }
if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_SIDLE) {
best_mode = SYSC_IDLE_FORCE;
@@ -1005,7 +1241,8 @@ set_sidle:
ret = sysc_best_idle_mode(idlemodes, &best_mode);
if (ret) {
dev_err(dev, "%s: invalid sidlemode\n", __func__);
- return ret;
+ ret = -EINVAL;
+ goto save_context;
}
}
@@ -1014,9 +1251,15 @@ set_sidle:
if (regbits->autoidle_shift >= 0 &&
ddata->cfg.sysc_val & BIT(regbits->autoidle_shift))
reg |= 1 << regbits->autoidle_shift;
- sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
+ sysc_write_sysconfig(ddata, reg);
- return 0;
+ ret = 0;
+
+save_context:
+ /* Save context and flush posted write */
+ ddata->sysconfig = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+
+ return ret;
}
static int __maybe_unused sysc_runtime_suspend_legacy(struct device *dev,
@@ -1095,10 +1338,10 @@ static int __maybe_unused sysc_runtime_suspend(struct device *dev)
ddata->enabled = false;
err_allow_idle:
- reset_control_assert(ddata->rsts);
-
sysc_clkdm_allow_idle(ddata);
+ reset_control_assert(ddata->rsts);
+
return error;
}
@@ -1154,28 +1397,112 @@ err_allow_idle:
return error;
}
+/*
+ * Checks if device context was lost. Assumes the sysconfig register value
+ * after lost context is different from the configured value. Only works for
+ * enabled devices.
+ *
+ * Eventually we may want to also add support to using the context lost
+ * registers that some SoCs have.
+ */
+static int sysc_check_context(struct sysc *ddata)
+{
+ u32 reg;
+
+ if (!ddata->enabled)
+ return -ENODATA;
+
+ reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+ if (reg == ddata->sysconfig)
+ return 0;
+
+ return -EACCES;
+}
+
+static int sysc_reinit_module(struct sysc *ddata, bool leave_enabled)
+{
+ struct device *dev = ddata->dev;
+ int error;
+
+ if (ddata->enabled) {
+ /* Nothing to do if enabled and context not lost */
+ error = sysc_check_context(ddata);
+ if (!error)
+ return 0;
+
+ /* Disable target module if it is enabled */
+ error = sysc_runtime_suspend(dev);
+ if (error)
+ dev_warn(dev, "reinit suspend failed: %i\n", error);
+ }
+
+ /* Enable target module */
+ error = sysc_runtime_resume(dev);
+ if (error)
+ dev_warn(dev, "reinit resume failed: %i\n", error);
+
+ /* Some modules like am335x gpmc need reset and restore of sysconfig */
+ if (ddata->cfg.quirks & SYSC_QUIRK_RESET_ON_CTX_LOST) {
+ error = sysc_reset(ddata);
+ if (error)
+ dev_warn(dev, "reinit reset failed: %i\n", error);
+
+ sysc_write_sysconfig(ddata, ddata->sysconfig);
+ }
+
+ if (leave_enabled)
+ return error;
+
+ /* Disable target module if no leave_enabled was set */
+ error = sysc_runtime_suspend(dev);
+ if (error)
+ dev_warn(dev, "reinit suspend failed: %i\n", error);
+
+ return error;
+}
+
static int __maybe_unused sysc_noirq_suspend(struct device *dev)
{
struct sysc *ddata;
ddata = dev_get_drvdata(dev);
- if (ddata->cfg.quirks & SYSC_QUIRK_LEGACY_IDLE)
+ if (ddata->cfg.quirks &
+ (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE))
return 0;
- return pm_runtime_force_suspend(dev);
+ if (!ddata->enabled)
+ return 0;
+
+ ddata->needs_resume = 1;
+
+ return sysc_runtime_suspend(dev);
}
static int __maybe_unused sysc_noirq_resume(struct device *dev)
{
struct sysc *ddata;
+ int error = 0;
ddata = dev_get_drvdata(dev);
- if (ddata->cfg.quirks & SYSC_QUIRK_LEGACY_IDLE)
+ if (ddata->cfg.quirks &
+ (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE))
return 0;
- return pm_runtime_force_resume(dev);
+ if (ddata->cfg.quirks & SYSC_QUIRK_REINIT_ON_RESUME) {
+ error = sysc_reinit_module(ddata, ddata->needs_resume);
+ if (error)
+ dev_warn(dev, "noirq_resume failed: %i\n", error);
+ } else if (ddata->needs_resume) {
+ error = sysc_runtime_resume(dev);
+ if (error)
+ dev_warn(dev, "noirq_resume failed: %i\n", error);
+ }
+
+ ddata->needs_resume = 0;
+
+ return error;
}
static const struct dev_pm_ops sysc_pm_ops = {
@@ -1212,122 +1539,167 @@ struct sysc_revision_quirk {
static const struct sysc_revision_quirk sysc_revision_quirks[] = {
/* These drivers need to be fixed to not use pm_runtime_irq_safe() */
- SYSC_QUIRK("gpio", 0, 0, 0x10, 0x114, 0x50600801, 0xffff00ff,
- SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_OPT_CLKS_IN_RESET),
- SYSC_QUIRK("sham", 0, 0x100, 0x110, 0x114, 0x40000c03, 0xffffffff,
- SYSC_QUIRK_LEGACY_IDLE),
- SYSC_QUIRK("smartreflex", 0, -1, 0x24, -1, 0x00000000, 0xffffffff,
- SYSC_QUIRK_LEGACY_IDLE),
- SYSC_QUIRK("smartreflex", 0, -1, 0x38, -1, 0x00000000, 0xffffffff,
- SYSC_QUIRK_LEGACY_IDLE),
- SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000015, 0xffffffff,
- 0),
- /* Some timers on omap4 and later */
- SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x50002100, 0xffffffff,
- 0),
- SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x4fff1301, 0xffff00ff,
- 0),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000046, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
/* Uarts on omap4 and later */
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff,
- SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47422e03, 0xffffffff,
- SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
/* Quirks that need to be set based on the module address */
- SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -1, 0x50000800, 0xffffffff,
+ SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -ENODEV, 0x50000800, 0xffffffff,
SYSC_QUIRK_EXT_OPT_CLOCK | SYSC_QUIRK_NO_RESET_ON_INIT |
SYSC_QUIRK_SWSUP_SIDLE),
/* Quirks that need to be set based on detected module */
- SYSC_QUIRK("aess", 0, 0, 0x10, -1, 0x40000000, 0xffffffff,
+ SYSC_QUIRK("aess", 0, 0, 0x10, -ENODEV, 0x40000000, 0xffffffff,
SYSC_MODULE_QUIRK_AESS),
- SYSC_QUIRK("dcan", 0x48480000, 0x20, -1, -1, 0xa3170504, 0xffffffff,
+ /* Errata i893 handling for dra7 dcan1 and 2 */
+ SYSC_QUIRK("dcan", 0x4ae3c000, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff,
+ SYSC_QUIRK_CLKDM_NOAUTO),
+ SYSC_QUIRK("dcan", 0x48480000, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff,
SYSC_QUIRK_CLKDM_NOAUTO),
- SYSC_QUIRK("dwc3", 0x48880000, 0, 0x10, -1, 0x500a0200, 0xffffffff,
+ SYSC_QUIRK("dss", 0x4832a000, 0, 0x10, 0x14, 0x00000020, 0xffffffff,
+ SYSC_QUIRK_OPT_CLKS_IN_RESET | SYSC_MODULE_QUIRK_DSS_RESET),
+ SYSC_QUIRK("dss", 0x58000000, 0, -ENODEV, 0x14, 0x00000040, 0xffffffff,
+ SYSC_QUIRK_OPT_CLKS_IN_RESET | SYSC_MODULE_QUIRK_DSS_RESET),
+ SYSC_QUIRK("dss", 0x58000000, 0, -ENODEV, 0x14, 0x00000061, 0xffffffff,
+ SYSC_QUIRK_OPT_CLKS_IN_RESET | SYSC_MODULE_QUIRK_DSS_RESET),
+ SYSC_QUIRK("dwc3", 0x48880000, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff,
SYSC_QUIRK_CLKDM_NOAUTO),
- SYSC_QUIRK("dwc3", 0x488c0000, 0, 0x10, -1, 0x500a0200, 0xffffffff,
+ SYSC_QUIRK("dwc3", 0x488c0000, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff,
SYSC_QUIRK_CLKDM_NOAUTO),
+ SYSC_QUIRK("gpio", 0, 0, 0x10, 0x114, 0x50600801, 0xffff00ff,
+ SYSC_QUIRK_OPT_CLKS_IN_RESET),
+ SYSC_QUIRK("gpmc", 0, 0, 0x10, 0x14, 0x00000060, 0xffffffff,
+ SYSC_QUIRK_REINIT_ON_CTX_LOST | SYSC_QUIRK_RESET_ON_CTX_LOST |
+ SYSC_QUIRK_GPMC_DEBUG),
+ SYSC_QUIRK("hdmi", 0, 0, 0x10, -ENODEV, 0x50030200, 0xffffffff,
+ SYSC_QUIRK_OPT_CLKS_NEEDED),
SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x00000006, 0xffffffff,
- SYSC_MODULE_QUIRK_HDQ1W),
+ SYSC_MODULE_QUIRK_HDQ1W | SYSC_MODULE_QUIRK_ENA_RESETDONE),
SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x0000000a, 0xffffffff,
- SYSC_MODULE_QUIRK_HDQ1W),
+ SYSC_MODULE_QUIRK_HDQ1W | SYSC_MODULE_QUIRK_ENA_RESETDONE),
SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x00000036, 0x000000ff,
- SYSC_MODULE_QUIRK_I2C),
+ SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x0000003c, 0x000000ff,
- SYSC_MODULE_QUIRK_I2C),
+ SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x00000040, 0x000000ff,
- SYSC_MODULE_QUIRK_I2C),
+ SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
SYSC_QUIRK("i2c", 0, 0, 0x10, 0x90, 0x5040000a, 0xfffff0f0,
- SYSC_MODULE_QUIRK_I2C),
- SYSC_QUIRK("gpu", 0x50000000, 0x14, -1, -1, 0x00010201, 0xffffffff, 0),
- SYSC_QUIRK("gpu", 0x50000000, 0xfe00, 0xfe10, -1, 0x40000000 , 0xffffffff,
+ SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
+ SYSC_QUIRK("gpu", 0x50000000, 0x14, -ENODEV, -ENODEV, 0x00010201, 0xffffffff, 0),
+ SYSC_QUIRK("gpu", 0x50000000, 0xfe00, 0xfe10, -ENODEV, 0x40000000 , 0xffffffff,
SYSC_MODULE_QUIRK_SGX),
- SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050,
- 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
- SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -1, 0x4ea2080d, 0xffffffff,
+ SYSC_QUIRK("lcdc", 0, 0, 0x54, -ENODEV, 0x4f201000, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
+ SYSC_QUIRK("mcasp", 0, 0, 0x4, -ENODEV, 0x44306302, 0xffffffff,
+ SYSC_QUIRK_SWSUP_SIDLE),
+ SYSC_QUIRK("rtc", 0, 0x74, 0x78, -ENODEV, 0x4eb01908, 0xffff00f0,
+ SYSC_MODULE_QUIRK_RTC_UNLOCK),
+ SYSC_QUIRK("tptc", 0, 0, 0x10, -ENODEV, 0x40006c00, 0xffffefff,
+ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
+ SYSC_QUIRK("tptc", 0, 0, -ENODEV, -ENODEV, 0x40007c00, 0xffffffff,
+ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
+ SYSC_QUIRK("sata", 0, 0xfc, 0x1100, -ENODEV, 0x5e412000, 0xffffffff,
+ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
+ SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, 0x14, 0x50700100, 0xffffffff,
+ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
+ SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -ENODEV, 0x50700101, 0xffffffff,
+ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
+ SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050,
+ 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY |
+ SYSC_MODULE_QUIRK_OTG),
+ SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -ENODEV, 0x4ea2080d, 0xffffffff,
+ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY |
+ SYSC_QUIRK_REINIT_ON_CTX_LOST),
SYSC_QUIRK("wdt", 0, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0,
SYSC_MODULE_QUIRK_WDT),
+ /* PRUSS on am3, am4 and am5 */
+ SYSC_QUIRK("pruss", 0, 0x26000, 0x26004, -ENODEV, 0x47000000, 0xff000000,
+ SYSC_MODULE_QUIRK_PRUSS),
/* Watchdog on am3 and am4 */
SYSC_QUIRK("wdt", 0x44e35000, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0,
SYSC_MODULE_QUIRK_WDT | SYSC_QUIRK_SWSUP_SIDLE),
#ifdef DEBUG
- SYSC_QUIRK("adc", 0, 0, 0x10, -1, 0x47300001, 0xffffffff, 0),
- SYSC_QUIRK("atl", 0, 0, -1, -1, 0x0a070100, 0xffffffff, 0),
- SYSC_QUIRK("cm", 0, 0, -1, -1, 0x40000301, 0xffffffff, 0),
- SYSC_QUIRK("control", 0, 0, 0x10, -1, 0x40000900, 0xffffffff, 0),
+ SYSC_QUIRK("adc", 0, 0, 0x10, -ENODEV, 0x47300001, 0xffffffff, 0),
+ SYSC_QUIRK("atl", 0, 0, -ENODEV, -ENODEV, 0x0a070100, 0xffffffff, 0),
+ SYSC_QUIRK("cm", 0, 0, -ENODEV, -ENODEV, 0x40000301, 0xffffffff, 0),
+ SYSC_QUIRK("control", 0, 0, 0x10, -ENODEV, 0x40000900, 0xffffffff, 0),
SYSC_QUIRK("cpgmac", 0, 0x1200, 0x1208, 0x1204, 0x4edb1902,
0xffff00f0, 0),
- SYSC_QUIRK("dcan", 0, 0x20, -1, -1, 0xa3170504, 0xffffffff, 0),
- SYSC_QUIRK("dcan", 0, 0x20, -1, -1, 0x4edb1902, 0xffffffff, 0),
- SYSC_QUIRK("dmic", 0, 0, 0x10, -1, 0x50010000, 0xffffffff, 0),
- SYSC_QUIRK("dwc3", 0, 0, 0x10, -1, 0x500a0200, 0xffffffff, 0),
+ SYSC_QUIRK("dcan", 0, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff, 0),
+ SYSC_QUIRK("dcan", 0, 0x20, -ENODEV, -ENODEV, 0x4edb1902, 0xffffffff, 0),
+ SYSC_QUIRK("dispc", 0x4832a400, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0),
+ SYSC_QUIRK("dispc", 0x58001000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0),
+ SYSC_QUIRK("dispc", 0x58001000, 0, 0x10, 0x14, 0x00000051, 0xffffffff, 0),
+ SYSC_QUIRK("dmic", 0, 0, 0x10, -ENODEV, 0x50010000, 0xffffffff, 0),
+ SYSC_QUIRK("dsi", 0x58004000, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0),
+ SYSC_QUIRK("dsi", 0x58005000, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0),
+ SYSC_QUIRK("dsi", 0x58005000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0),
+ SYSC_QUIRK("dsi", 0x58009000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0),
+ SYSC_QUIRK("dwc3", 0, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff, 0),
SYSC_QUIRK("d2d", 0x4a0b6000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
SYSC_QUIRK("d2d", 0x4a0cd000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
- SYSC_QUIRK("epwmss", 0, 0, 0x4, -1, 0x47400001, 0xffffffff, 0),
- SYSC_QUIRK("gpu", 0, 0x1fc00, 0x1fc10, -1, 0, 0, 0),
- SYSC_QUIRK("gpu", 0, 0xfe00, 0xfe10, -1, 0x40000000 , 0xffffffff, 0),
+ SYSC_QUIRK("elm", 0x48080000, 0, 0x10, 0x14, 0x00000020, 0xffffffff, 0),
+ SYSC_QUIRK("emif", 0, 0, -ENODEV, -ENODEV, 0x40441403, 0xffff0fff, 0),
+ SYSC_QUIRK("emif", 0, 0, -ENODEV, -ENODEV, 0x50440500, 0xffffffff, 0),
+ SYSC_QUIRK("epwmss", 0, 0, 0x4, -ENODEV, 0x47400001, 0xffffffff, 0),
+ SYSC_QUIRK("gpu", 0, 0x1fc00, 0x1fc10, -ENODEV, 0, 0, 0),
+ SYSC_QUIRK("gpu", 0, 0xfe00, 0xfe10, -ENODEV, 0x40000000 , 0xffffffff, 0),
+ SYSC_QUIRK("hdmi", 0, 0, 0x10, -ENODEV, 0x50031d00, 0xffffffff, 0),
SYSC_QUIRK("hsi", 0, 0, 0x10, 0x14, 0x50043101, 0xffffffff, 0),
- SYSC_QUIRK("iss", 0, 0, 0x10, -1, 0x40000101, 0xffffffff, 0),
- SYSC_QUIRK("lcdc", 0, 0, 0x54, -1, 0x4f201000, 0xffffffff, 0),
- SYSC_QUIRK("mcasp", 0, 0, 0x4, -1, 0x44306302, 0xffffffff, 0),
- SYSC_QUIRK("mcasp", 0, 0, 0x4, -1, 0x44307b02, 0xffffffff, 0),
- SYSC_QUIRK("mcbsp", 0, -1, 0x8c, -1, 0, 0, 0),
- SYSC_QUIRK("mcspi", 0, 0, 0x10, -1, 0x40300a0b, 0xffff00ff, 0),
+ SYSC_QUIRK("iss", 0, 0, 0x10, -ENODEV, 0x40000101, 0xffffffff, 0),
+ SYSC_QUIRK("keypad", 0x4a31c000, 0, 0x10, 0x14, 0x00000020, 0xffffffff, 0),
+ SYSC_QUIRK("mcasp", 0, 0, 0x4, -ENODEV, 0x44307b02, 0xffffffff, 0),
+ SYSC_QUIRK("mcbsp", 0, -ENODEV, 0x8c, -ENODEV, 0, 0, 0),
+ SYSC_QUIRK("mcspi", 0, 0, 0x10, -ENODEV, 0x40300a0b, 0xffff00ff, 0),
SYSC_QUIRK("mcspi", 0, 0, 0x110, 0x114, 0x40300a0b, 0xffffffff, 0),
- SYSC_QUIRK("mailbox", 0, 0, 0x10, -1, 0x00000400, 0xffffffff, 0),
- SYSC_QUIRK("m3", 0, 0, -1, -1, 0x5f580105, 0x0fff0f00, 0),
+ SYSC_QUIRK("mailbox", 0, 0, 0x10, -ENODEV, 0x00000400, 0xffffffff, 0),
+ SYSC_QUIRK("m3", 0, 0, -ENODEV, -ENODEV, 0x5f580105, 0x0fff0f00, 0),
SYSC_QUIRK("ocp2scp", 0, 0, 0x10, 0x14, 0x50060005, 0xfffffff0, 0),
- SYSC_QUIRK("ocp2scp", 0, 0, -1, -1, 0x50060007, 0xffffffff, 0),
- SYSC_QUIRK("padconf", 0, 0, 0x10, -1, 0x4fff0800, 0xffffffff, 0),
- SYSC_QUIRK("padconf", 0, 0, -1, -1, 0x40001100, 0xffffffff, 0),
- SYSC_QUIRK("prcm", 0, 0, -1, -1, 0x40000100, 0xffffffff, 0),
- SYSC_QUIRK("prcm", 0, 0, -1, -1, 0x00004102, 0xffffffff, 0),
- SYSC_QUIRK("prcm", 0, 0, -1, -1, 0x40000400, 0xffffffff, 0),
- SYSC_QUIRK("scm", 0, 0, 0x10, -1, 0x40000900, 0xffffffff, 0),
- SYSC_QUIRK("scm", 0, 0, -1, -1, 0x4e8b0100, 0xffffffff, 0),
- SYSC_QUIRK("scm", 0, 0, -1, -1, 0x4f000100, 0xffffffff, 0),
- SYSC_QUIRK("scm", 0, 0, -1, -1, 0x40000900, 0xffffffff, 0),
- SYSC_QUIRK("scrm", 0, 0, -1, -1, 0x00000010, 0xffffffff, 0),
- SYSC_QUIRK("sdio", 0, 0, 0x10, -1, 0x40202301, 0xffff0ff0, 0),
+ SYSC_QUIRK("ocp2scp", 0, 0, -ENODEV, -ENODEV, 0x50060007, 0xffffffff, 0),
+ SYSC_QUIRK("padconf", 0, 0, 0x10, -ENODEV, 0x4fff0800, 0xffffffff, 0),
+ SYSC_QUIRK("padconf", 0, 0, -ENODEV, -ENODEV, 0x40001100, 0xffffffff, 0),
+ SYSC_QUIRK("pcie", 0x51000000, -ENODEV, -ENODEV, -ENODEV, 0, 0, 0),
+ SYSC_QUIRK("pcie", 0x51800000, -ENODEV, -ENODEV, -ENODEV, 0, 0, 0),
+ SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x40000100, 0xffffffff, 0),
+ SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x00004102, 0xffffffff, 0),
+ SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x40000400, 0xffffffff, 0),
+ SYSC_QUIRK("rfbi", 0x4832a800, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
+ SYSC_QUIRK("rfbi", 0x58002000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
+ SYSC_QUIRK("scm", 0, 0, 0x10, -ENODEV, 0x40000900, 0xffffffff, 0),
+ SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x4e8b0100, 0xffffffff, 0),
+ SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x4f000100, 0xffffffff, 0),
+ SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x40000900, 0xffffffff, 0),
+ SYSC_QUIRK("scrm", 0, 0, -ENODEV, -ENODEV, 0x00000010, 0xffffffff, 0),
+ SYSC_QUIRK("sdio", 0, 0, 0x10, -ENODEV, 0x40202301, 0xffff0ff0, 0),
SYSC_QUIRK("sdio", 0, 0x2fc, 0x110, 0x114, 0x31010000, 0xffffffff, 0),
SYSC_QUIRK("sdma", 0, 0, 0x2c, 0x28, 0x00010900, 0xffffffff, 0),
- SYSC_QUIRK("slimbus", 0, 0, 0x10, -1, 0x40000902, 0xffffffff, 0),
- SYSC_QUIRK("slimbus", 0, 0, 0x10, -1, 0x40002903, 0xffffffff, 0),
- SYSC_QUIRK("spinlock", 0, 0, 0x10, -1, 0x50020000, 0xffffffff, 0),
- SYSC_QUIRK("rng", 0, 0x1fe0, 0x1fe4, -1, 0x00000020, 0xffffffff, 0),
- SYSC_QUIRK("rtc", 0, 0x74, 0x78, -1, 0x4eb01908, 0xffff00f0, 0),
- SYSC_QUIRK("timer32k", 0, 0, 0x4, -1, 0x00000060, 0xffffffff, 0),
+ SYSC_QUIRK("sham", 0, 0x100, 0x110, 0x114, 0x40000c03, 0xffffffff, 0),
+ SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40000902, 0xffffffff, 0),
+ SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40002903, 0xffffffff, 0),
+ SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x24, -ENODEV, 0x00000000, 0xffffffff, 0),
+ SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x38, -ENODEV, 0x00000000, 0xffffffff, 0),
+ SYSC_QUIRK("spinlock", 0, 0, 0x10, -ENODEV, 0x50020000, 0xffffffff, 0),
+ SYSC_QUIRK("rng", 0, 0x1fe0, 0x1fe4, -ENODEV, 0x00000020, 0xffffffff, 0),
+ SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000013, 0xffffffff, 0),
+ SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000015, 0xffffffff, 0),
+ /* Some timers on omap4 and later */
+ SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x50002100, 0xffffffff, 0),
+ SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x4fff1301, 0xffff00ff, 0),
+ SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000040, 0xffffffff, 0),
+ SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000011, 0xffffffff, 0),
+ SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000060, 0xffffffff, 0),
+ SYSC_QUIRK("tpcc", 0, 0, -ENODEV, -ENODEV, 0x40014c00, 0xffffffff, 0),
SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000004, 0xffffffff, 0),
SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000008, 0xffffffff, 0),
- SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, 0x14, 0x50700100, 0xffffffff, 0),
- SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -1, 0x50700101, 0xffffffff, 0),
- SYSC_QUIRK("vfpe", 0, 0, 0x104, -1, 0x4d001200, 0xffffffff, 0),
+ SYSC_QUIRK("venc", 0x58003000, 0, -ENODEV, -ENODEV, 0x00000002, 0xffffffff, 0),
+ SYSC_QUIRK("vfpe", 0, 0, 0x104, -ENODEV, 0x4d001200, 0xffffffff, 0),
#endif
};
@@ -1349,16 +1721,13 @@ static void sysc_init_early_quirks(struct sysc *ddata)
if (q->base != ddata->module_pa)
continue;
- if (q->rev_offset >= 0 &&
- q->rev_offset != ddata->offsets[SYSC_REVISION])
+ if (q->rev_offset != ddata->offsets[SYSC_REVISION])
continue;
- if (q->sysc_offset >= 0 &&
- q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG])
+ if (q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG])
continue;
- if (q->syss_offset >= 0 &&
- q->syss_offset != ddata->offsets[SYSC_SYSSTATUS])
+ if (q->syss_offset != ddata->offsets[SYSC_SYSSTATUS])
continue;
ddata->name = q->name;
@@ -1378,16 +1747,13 @@ static void sysc_init_revision_quirks(struct sysc *ddata)
if (q->base && q->base != ddata->module_pa)
continue;
- if (q->rev_offset >= 0 &&
- q->rev_offset != ddata->offsets[SYSC_REVISION])
+ if (q->rev_offset != ddata->offsets[SYSC_REVISION])
continue;
- if (q->sysc_offset >= 0 &&
- q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG])
+ if (q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG])
continue;
- if (q->syss_offset >= 0 &&
- q->syss_offset != ddata->offsets[SYSC_SYSSTATUS])
+ if (q->syss_offset != ddata->offsets[SYSC_SYSSTATUS])
continue;
if (q->revision == ddata->revision ||
@@ -1399,6 +1765,129 @@ static void sysc_init_revision_quirks(struct sysc *ddata)
}
}
+/*
+ * DSS needs dispc outputs disabled to reset modules. Returns mask of
+ * enabled DSS interrupts. Eventually we may be able to do this on
+ * dispc init rather than top-level DSS init.
+ */
+static u32 sysc_quirk_dispc(struct sysc *ddata, int dispc_offset,
+ bool disable)
+{
+ bool lcd_en, digit_en, lcd2_en = false, lcd3_en = false;
+ const int lcd_en_mask = BIT(0), digit_en_mask = BIT(1);
+ int manager_count;
+ bool framedonetv_irq = true;
+ u32 val, irq_mask = 0;
+
+ switch (sysc_soc->soc) {
+ case SOC_2420 ... SOC_3630:
+ manager_count = 2;
+ framedonetv_irq = false;
+ break;
+ case SOC_4430 ... SOC_4470:
+ manager_count = 3;
+ break;
+ case SOC_5430:
+ case SOC_DRA7:
+ manager_count = 4;
+ break;
+ case SOC_AM4:
+ manager_count = 1;
+ framedonetv_irq = false;
+ break;
+ case SOC_UNKNOWN:
+ default:
+ return 0;
+ }
+
+ /* Remap the whole module range to be able to reset dispc outputs */
+ devm_iounmap(ddata->dev, ddata->module_va);
+ ddata->module_va = devm_ioremap(ddata->dev,
+ ddata->module_pa,
+ ddata->module_size);
+ if (!ddata->module_va)
+ return -EIO;
+
+ /* DISP_CONTROL */
+ val = sysc_read(ddata, dispc_offset + 0x40);
+ lcd_en = val & lcd_en_mask;
+ digit_en = val & digit_en_mask;
+ if (lcd_en)
+ irq_mask |= BIT(0); /* FRAMEDONE */
+ if (digit_en) {
+ if (framedonetv_irq)
+ irq_mask |= BIT(24); /* FRAMEDONETV */
+ else
+ irq_mask |= BIT(2) | BIT(3); /* EVSYNC bits */
+ }
+ if (disable & (lcd_en | digit_en))
+ sysc_write(ddata, dispc_offset + 0x40,
+ val & ~(lcd_en_mask | digit_en_mask));
+
+ if (manager_count <= 2)
+ return irq_mask;
+
+ /* DISPC_CONTROL2 */
+ val = sysc_read(ddata, dispc_offset + 0x238);
+ lcd2_en = val & lcd_en_mask;
+ if (lcd2_en)
+ irq_mask |= BIT(22); /* FRAMEDONE2 */
+ if (disable && lcd2_en)
+ sysc_write(ddata, dispc_offset + 0x238,
+ val & ~lcd_en_mask);
+
+ if (manager_count <= 3)
+ return irq_mask;
+
+ /* DISPC_CONTROL3 */
+ val = sysc_read(ddata, dispc_offset + 0x848);
+ lcd3_en = val & lcd_en_mask;
+ if (lcd3_en)
+ irq_mask |= BIT(30); /* FRAMEDONE3 */
+ if (disable && lcd3_en)
+ sysc_write(ddata, dispc_offset + 0x848,
+ val & ~lcd_en_mask);
+
+ return irq_mask;
+}
+
+/* DSS needs child outputs disabled and SDI registers cleared for reset */
+static void sysc_pre_reset_quirk_dss(struct sysc *ddata)
+{
+ const int dispc_offset = 0x1000;
+ int error;
+ u32 irq_mask, val;
+
+ /* Get enabled outputs */
+ irq_mask = sysc_quirk_dispc(ddata, dispc_offset, false);
+ if (!irq_mask)
+ return;
+
+ /* Clear IRQSTATUS */
+ sysc_write(ddata, dispc_offset + 0x18, irq_mask);
+
+ /* Disable outputs */
+ val = sysc_quirk_dispc(ddata, dispc_offset, true);
+
+ /* Poll IRQSTATUS */
+ error = readl_poll_timeout(ddata->module_va + dispc_offset + 0x18,
+ val, val != irq_mask, 100, 50);
+ if (error)
+ dev_warn(ddata->dev, "%s: timed out %08x !+ %08x\n",
+ __func__, val, irq_mask);
+
+ if (sysc_soc->soc == SOC_3430) {
+ /* Clear DSS_SDI_CONTROL */
+ sysc_write(ddata, 0x44, 0);
+
+ /* Clear DSS_PLL_CONTROL */
+ sysc_write(ddata, 0x48, 0);
+ }
+
+ /* Clear DSS_CONTROL to switch DSS clock sources to PRCM if not */
+ sysc_write(ddata, 0x40, 0);
+}
+
/* 1-wire needs module's internal clocks enabled for reset */
static void sysc_pre_reset_quirk_hdq1w(struct sysc *ddata)
{
@@ -1418,7 +1907,7 @@ static void sysc_module_enable_quirk_aess(struct sysc *ddata)
sysc_write(ddata, offset, 1);
}
-/* I2C needs extra enable bit toggling for reset */
+/* I2C needs to be disabled for reset */
static void sysc_clk_quirk_i2c(struct sysc *ddata, bool enable)
{
int offset;
@@ -1439,14 +1928,64 @@ static void sysc_clk_quirk_i2c(struct sysc *ddata, bool enable)
sysc_write(ddata, offset, val);
}
-static void sysc_clk_enable_quirk_i2c(struct sysc *ddata)
+static void sysc_pre_reset_quirk_i2c(struct sysc *ddata)
+{
+ sysc_clk_quirk_i2c(ddata, false);
+}
+
+static void sysc_post_reset_quirk_i2c(struct sysc *ddata)
{
sysc_clk_quirk_i2c(ddata, true);
}
-static void sysc_clk_disable_quirk_i2c(struct sysc *ddata)
+/* RTC on am3 and 4 needs to be unlocked and locked for sysconfig */
+static void sysc_quirk_rtc(struct sysc *ddata, bool lock)
{
- sysc_clk_quirk_i2c(ddata, false);
+ u32 val, kick0_val = 0, kick1_val = 0;
+ unsigned long flags;
+ int error;
+
+ if (!lock) {
+ kick0_val = 0x83e70b13;
+ kick1_val = 0x95a4f1e0;
+ }
+
+ local_irq_save(flags);
+ /* RTC_STATUS BUSY bit may stay active for 1/32768 seconds (~30 usec) */
+ error = readl_poll_timeout_atomic(ddata->module_va + 0x44, val,
+ !(val & BIT(0)), 100, 50);
+ if (error)
+ dev_warn(ddata->dev, "rtc busy timeout\n");
+ /* Now we have ~15 microseconds to read/write various registers */
+ sysc_write(ddata, 0x6c, kick0_val);
+ sysc_write(ddata, 0x70, kick1_val);
+ local_irq_restore(flags);
+}
+
+static void sysc_module_unlock_quirk_rtc(struct sysc *ddata)
+{
+ sysc_quirk_rtc(ddata, false);
+}
+
+static void sysc_module_lock_quirk_rtc(struct sysc *ddata)
+{
+ sysc_quirk_rtc(ddata, true);
+}
+
+/* OTG omap2430 glue layer up to omap4 needs OTG_FORCESTDBY configured */
+static void sysc_module_enable_quirk_otg(struct sysc *ddata)
+{
+ int offset = 0x414; /* OTG_FORCESTDBY */
+
+ sysc_write(ddata, offset, 0);
+}
+
+static void sysc_module_disable_quirk_otg(struct sysc *ddata)
+{
+ int offset = 0x414; /* OTG_FORCESTDBY */
+ u32 val = BIT(0); /* ENABLEFORCE */
+
+ sysc_write(ddata, offset, val);
}
/* 36xx SGX needs a quirk for to bypass OCP IPG interrupt logic */
@@ -1482,20 +2021,38 @@ static void sysc_reset_done_quirk_wdt(struct sysc *ddata)
dev_warn(ddata->dev, "wdt disable step2 failed\n");
}
+/* PRUSS needs to set MSTANDBY_INIT inorder to idle properly */
+static void sysc_module_disable_quirk_pruss(struct sysc *ddata)
+{
+ u32 reg;
+
+ reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+ reg |= SYSC_PRUSS_STANDBY_INIT;
+ sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
+}
+
static void sysc_init_module_quirks(struct sysc *ddata)
{
if (ddata->legacy_mode || !ddata->name)
return;
if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_HDQ1W) {
- ddata->clk_disable_quirk = sysc_pre_reset_quirk_hdq1w;
+ ddata->pre_reset_quirk = sysc_pre_reset_quirk_hdq1w;
+
+ return;
+ }
+
+#ifdef CONFIG_OMAP_GPMC_DEBUG
+ if (ddata->cfg.quirks & SYSC_QUIRK_GPMC_DEBUG) {
+ ddata->cfg.quirks |= SYSC_QUIRK_NO_RESET_ON_INIT;
return;
}
+#endif
if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_I2C) {
- ddata->clk_enable_quirk = sysc_clk_enable_quirk_i2c;
- ddata->clk_disable_quirk = sysc_clk_disable_quirk_i2c;
+ ddata->pre_reset_quirk = sysc_pre_reset_quirk_i2c;
+ ddata->post_reset_quirk = sysc_post_reset_quirk_i2c;
return;
}
@@ -1503,6 +2060,21 @@ static void sysc_init_module_quirks(struct sysc *ddata)
if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_AESS)
ddata->module_enable_quirk = sysc_module_enable_quirk_aess;
+ if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_DSS_RESET)
+ ddata->pre_reset_quirk = sysc_pre_reset_quirk_dss;
+
+ if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_RTC_UNLOCK) {
+ ddata->module_unlock_quirk = sysc_module_unlock_quirk_rtc;
+ ddata->module_lock_quirk = sysc_module_lock_quirk_rtc;
+
+ return;
+ }
+
+ if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_OTG) {
+ ddata->module_enable_quirk = sysc_module_enable_quirk_otg;
+ ddata->module_disable_quirk = sysc_module_disable_quirk_otg;
+ }
+
if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_SGX)
ddata->module_enable_quirk = sysc_module_enable_quirk_sgx;
@@ -1510,6 +2082,9 @@ static void sysc_init_module_quirks(struct sysc *ddata)
ddata->reset_done_quirk = sysc_reset_done_quirk_wdt;
ddata->module_disable_quirk = sysc_reset_done_quirk_wdt;
}
+
+ if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_PRUSS)
+ ddata->module_disable_quirk = sysc_module_disable_quirk_pruss;
}
static int sysc_clockdomain_init(struct sysc *ddata)
@@ -1524,7 +2099,7 @@ static int sysc_clockdomain_init(struct sysc *ddata)
switch (ddata->nr_clocks) {
case 2:
ick = ddata->clocks[SYSC_ICK];
- /* fallthrough */
+ fallthrough;
case 1:
fck = ddata->clocks[SYSC_FCK];
break;
@@ -1565,50 +2140,37 @@ static int sysc_legacy_init(struct sysc *ddata)
*/
static int sysc_reset(struct sysc *ddata)
{
- int sysc_offset, syss_offset, sysc_val, rstval, error = 0;
- u32 sysc_mask, syss_done;
+ int sysc_offset, sysc_val, error;
+ u32 sysc_mask;
sysc_offset = ddata->offsets[SYSC_SYSCONFIG];
- syss_offset = ddata->offsets[SYSC_SYSSTATUS];
- if (ddata->legacy_mode || sysc_offset < 0 ||
+ if (ddata->legacy_mode ||
ddata->cap->regbits->srst_shift < 0 ||
ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)
return 0;
sysc_mask = BIT(ddata->cap->regbits->srst_shift);
- if (ddata->cfg.quirks & SYSS_QUIRK_RESETDONE_INVERTED)
- syss_done = 0;
- else
- syss_done = ddata->cfg.syss_mask;
-
- if (ddata->clk_disable_quirk)
- ddata->clk_disable_quirk(ddata);
+ if (ddata->pre_reset_quirk)
+ ddata->pre_reset_quirk(ddata);
- sysc_val = sysc_read_sysconfig(ddata);
- sysc_val |= sysc_mask;
- sysc_write(ddata, sysc_offset, sysc_val);
+ if (sysc_offset >= 0) {
+ sysc_val = sysc_read_sysconfig(ddata);
+ sysc_val |= sysc_mask;
+ sysc_write(ddata, sysc_offset, sysc_val);
+ }
if (ddata->cfg.srst_udelay)
usleep_range(ddata->cfg.srst_udelay,
ddata->cfg.srst_udelay * 2);
- if (ddata->clk_enable_quirk)
- ddata->clk_enable_quirk(ddata);
+ if (ddata->post_reset_quirk)
+ ddata->post_reset_quirk(ddata);
- /* Poll on reset status */
- if (syss_offset >= 0) {
- error = readx_poll_timeout(sysc_read_sysstatus, ddata, rstval,
- (rstval & ddata->cfg.syss_mask) ==
- syss_done,
- 100, MAX_MODULE_SOFTRESET_WAIT);
-
- } else if (ddata->cfg.quirks & SYSC_QUIRK_RESET_STATUS) {
- error = readx_poll_timeout(sysc_read_sysconfig, ddata, rstval,
- !(rstval & sysc_mask),
- 100, MAX_MODULE_SOFTRESET_WAIT);
- }
+ error = sysc_wait_softreset(ddata);
+ if (error)
+ dev_warn(ddata->dev, "OCP softreset timed out\n");
if (ddata->reset_done_quirk)
ddata->reset_done_quirk(ddata);
@@ -1624,6 +2186,7 @@ static int sysc_reset(struct sysc *ddata)
*/
static int sysc_init_module(struct sysc *ddata)
{
+ bool rstctrl_deasserted = false;
int error = 0;
error = sysc_clockdomain_init(ddata);
@@ -1648,6 +2211,7 @@ static int sysc_init_module(struct sysc *ddata)
error = reset_control_deassert(ddata->rsts);
if (error)
goto err_main_clocks;
+ rstctrl_deasserted = true;
}
ddata->revision = sysc_read_revision(ddata);
@@ -1657,13 +2221,13 @@ static int sysc_init_module(struct sysc *ddata)
if (ddata->legacy_mode) {
error = sysc_legacy_init(ddata);
if (error)
- goto err_reset;
+ goto err_main_clocks;
}
if (!ddata->legacy_mode) {
error = sysc_enable_module(ddata->dev);
if (error)
- goto err_reset;
+ goto err_main_clocks;
}
error = sysc_reset(ddata);
@@ -1673,10 +2237,6 @@ static int sysc_init_module(struct sysc *ddata)
if (error && !ddata->legacy_mode)
sysc_disable_module(ddata->dev);
-err_reset:
- if (error && !(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT))
- reset_control_assert(ddata->rsts);
-
err_main_clocks:
if (error)
sysc_disable_main_clocks(ddata);
@@ -1687,6 +2247,10 @@ err_opt_clocks:
sysc_clkdm_allow_idle(ddata);
}
+ if (error && rstctrl_deasserted &&
+ !(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT))
+ reset_control_assert(ddata->rsts);
+
return error;
}
@@ -1956,6 +2520,78 @@ static struct dev_pm_domain sysc_child_pm_domain = {
}
};
+/* Caller needs to take list_lock if ever used outside of cpu_pm */
+static void sysc_reinit_modules(struct sysc_soc_info *soc)
+{
+ struct sysc_module *module;
+ struct list_head *pos;
+ struct sysc *ddata;
+
+ list_for_each(pos, &sysc_soc->restored_modules) {
+ module = list_entry(pos, struct sysc_module, node);
+ ddata = module->ddata;
+ sysc_reinit_module(ddata, ddata->enabled);
+ }
+}
+
+/**
+ * sysc_context_notifier - optionally reset and restore module after idle
+ * @nb: notifier block
+ * @cmd: unused
+ * @v: unused
+ *
+ * Some interconnect target modules need to be restored, or reset and restored
+ * on CPU_PM CPU_PM_CLUSTER_EXIT notifier. This is needed at least for am335x
+ * OTG and GPMC target modules even if the modules are unused.
+ */
+static int sysc_context_notifier(struct notifier_block *nb, unsigned long cmd,
+ void *v)
+{
+ struct sysc_soc_info *soc;
+
+ soc = container_of(nb, struct sysc_soc_info, nb);
+
+ switch (cmd) {
+ case CPU_CLUSTER_PM_ENTER:
+ break;
+ case CPU_CLUSTER_PM_ENTER_FAILED: /* No need to restore context */
+ break;
+ case CPU_CLUSTER_PM_EXIT:
+ sysc_reinit_modules(soc);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+/**
+ * sysc_add_restored - optionally add reset and restore quirk hanlling
+ * @ddata: device data
+ */
+static void sysc_add_restored(struct sysc *ddata)
+{
+ struct sysc_module *restored_module;
+
+ restored_module = kzalloc(sizeof(*restored_module), GFP_KERNEL);
+ if (!restored_module)
+ return;
+
+ restored_module->ddata = ddata;
+
+ mutex_lock(&sysc_soc->list_lock);
+
+ list_add(&restored_module->node, &sysc_soc->restored_modules);
+
+ if (sysc_soc->nb.notifier_call)
+ goto out_unlock;
+
+ sysc_soc->nb.notifier_call = sysc_context_notifier;
+ cpu_pm_register_notifier(&sysc_soc->nb);
+
+out_unlock:
+ mutex_unlock(&sysc_soc->list_lock);
+}
+
/**
* sysc_legacy_idle_quirk - handle children in omap_device compatible way
* @ddata: device driver data
@@ -2313,6 +2949,16 @@ static const struct sysc_capabilities sysc_dra7_mcan = {
.mod_quirks = SYSS_QUIRK_RESETDONE_INVERTED,
};
+/*
+ * PRUSS found on some AM33xx, AM437x and AM57xx SoCs
+ */
+static const struct sysc_capabilities sysc_pruss = {
+ .type = TI_SYSC_PRUSS,
+ .sysc_mask = SYSC_PRUSS_STANDBY_INIT | SYSC_PRUSS_SUB_MWAIT,
+ .regbits = &sysc_regbits_omap4_simple,
+ .mod_quirks = SYSC_MODULE_QUIRK_PRUSS,
+};
+
static int sysc_init_pdata(struct sysc *ddata)
{
struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev);
@@ -2386,6 +3032,231 @@ static void ti_sysc_idle(struct work_struct *work)
pm_runtime_put_sync(ddata->dev);
}
+/*
+ * SoC model and features detection. Only needed for SoCs that need
+ * special handling for quirks, no need to list others.
+ */
+static const struct soc_device_attribute sysc_soc_match[] = {
+ SOC_FLAG("OMAP242*", SOC_2420),
+ SOC_FLAG("OMAP243*", SOC_2430),
+ SOC_FLAG("OMAP3[45]*", SOC_3430),
+ SOC_FLAG("OMAP3[67]*", SOC_3630),
+ SOC_FLAG("OMAP443*", SOC_4430),
+ SOC_FLAG("OMAP446*", SOC_4460),
+ SOC_FLAG("OMAP447*", SOC_4470),
+ SOC_FLAG("OMAP54*", SOC_5430),
+ SOC_FLAG("AM433", SOC_AM3),
+ SOC_FLAG("AM43*", SOC_AM4),
+ SOC_FLAG("DRA7*", SOC_DRA7),
+
+ { /* sentinel */ }
+};
+
+/*
+ * List of SoCs variants with disabled features. By default we assume all
+ * devices in the device tree are available so no need to list those SoCs.
+ */
+static const struct soc_device_attribute sysc_soc_feat_match[] = {
+ /* OMAP3430/3530 and AM3517 variants with some accelerators disabled */
+ SOC_FLAG("AM3505", DIS_SGX),
+ SOC_FLAG("OMAP3525", DIS_SGX),
+ SOC_FLAG("OMAP3515", DIS_IVA | DIS_SGX),
+ SOC_FLAG("OMAP3503", DIS_ISP | DIS_IVA | DIS_SGX),
+
+ /* OMAP3630/DM3730 variants with some accelerators disabled */
+ SOC_FLAG("AM3703", DIS_IVA | DIS_SGX),
+ SOC_FLAG("DM3725", DIS_SGX),
+ SOC_FLAG("OMAP3611", DIS_ISP | DIS_IVA | DIS_SGX),
+ SOC_FLAG("OMAP3615/AM3715", DIS_IVA),
+ SOC_FLAG("OMAP3621", DIS_ISP),
+
+ { /* sentinel */ }
+};
+
+static int sysc_add_disabled(unsigned long base)
+{
+ struct sysc_address *disabled_module;
+
+ disabled_module = kzalloc(sizeof(*disabled_module), GFP_KERNEL);
+ if (!disabled_module)
+ return -ENOMEM;
+
+ disabled_module->base = base;
+
+ mutex_lock(&sysc_soc->list_lock);
+ list_add(&disabled_module->node, &sysc_soc->disabled_modules);
+ mutex_unlock(&sysc_soc->list_lock);
+
+ return 0;
+}
+
+/*
+ * One time init to detect the booted SoC, disable unavailable features
+ * and initialize list for optional cpu_pm notifier.
+ *
+ * Note that we initialize static data shared across all ti-sysc instances
+ * so ddata is only used for SoC type. This can be called from module_init
+ * once we no longer need to rely on platform data.
+ */
+static int sysc_init_static_data(struct sysc *ddata)
+{
+ const struct soc_device_attribute *match;
+ struct ti_sysc_platform_data *pdata;
+ unsigned long features = 0;
+ struct device_node *np;
+
+ if (sysc_soc)
+ return 0;
+
+ sysc_soc = kzalloc(sizeof(*sysc_soc), GFP_KERNEL);
+ if (!sysc_soc)
+ return -ENOMEM;
+
+ mutex_init(&sysc_soc->list_lock);
+ INIT_LIST_HEAD(&sysc_soc->disabled_modules);
+ INIT_LIST_HEAD(&sysc_soc->restored_modules);
+ sysc_soc->general_purpose = true;
+
+ pdata = dev_get_platdata(ddata->dev);
+ if (pdata && pdata->soc_type_gp)
+ sysc_soc->general_purpose = pdata->soc_type_gp();
+
+ match = soc_device_match(sysc_soc_match);
+ if (match && match->data)
+ sysc_soc->soc = (int)match->data;
+
+ /*
+ * Check and warn about possible old incomplete dtb. We now want to see
+ * simple-pm-bus instead of simple-bus in the dtb for genpd using SoCs.
+ */
+ switch (sysc_soc->soc) {
+ case SOC_AM3:
+ case SOC_AM4:
+ case SOC_4430 ... SOC_4470:
+ case SOC_5430:
+ case SOC_DRA7:
+ np = of_find_node_by_path("/ocp");
+ WARN_ONCE(np && of_device_is_compatible(np, "simple-bus"),
+ "ti-sysc: Incomplete old dtb, please update\n");
+ break;
+ default:
+ break;
+ }
+
+ /* Ignore devices that are not available on HS and EMU SoCs */
+ if (!sysc_soc->general_purpose) {
+ switch (sysc_soc->soc) {
+ case SOC_3430 ... SOC_3630:
+ sysc_add_disabled(0x48304000); /* timer12 */
+ break;
+ case SOC_AM3:
+ sysc_add_disabled(0x48310000); /* rng */
+ break;
+ default:
+ break;
+ }
+ }
+
+ match = soc_device_match(sysc_soc_feat_match);
+ if (!match)
+ return 0;
+
+ if (match->data)
+ features = (unsigned long)match->data;
+
+ /*
+ * Add disabled devices to the list based on the module base.
+ * Note that this must be done before we attempt to access the
+ * device and have module revision checks working.
+ */
+ if (features & DIS_ISP)
+ sysc_add_disabled(0x480bd400);
+ if (features & DIS_IVA)
+ sysc_add_disabled(0x5d000000);
+ if (features & DIS_SGX)
+ sysc_add_disabled(0x50000000);
+
+ return 0;
+}
+
+static void sysc_cleanup_static_data(void)
+{
+ struct sysc_module *restored_module;
+ struct sysc_address *disabled_module;
+ struct list_head *pos, *tmp;
+
+ if (!sysc_soc)
+ return;
+
+ if (sysc_soc->nb.notifier_call)
+ cpu_pm_unregister_notifier(&sysc_soc->nb);
+
+ mutex_lock(&sysc_soc->list_lock);
+ list_for_each_safe(pos, tmp, &sysc_soc->restored_modules) {
+ restored_module = list_entry(pos, struct sysc_module, node);
+ list_del(pos);
+ kfree(restored_module);
+ }
+ list_for_each_safe(pos, tmp, &sysc_soc->disabled_modules) {
+ disabled_module = list_entry(pos, struct sysc_address, node);
+ list_del(pos);
+ kfree(disabled_module);
+ }
+ mutex_unlock(&sysc_soc->list_lock);
+}
+
+static int sysc_check_disabled_devices(struct sysc *ddata)
+{
+ struct sysc_address *disabled_module;
+ struct list_head *pos;
+ int error = 0;
+
+ mutex_lock(&sysc_soc->list_lock);
+ list_for_each(pos, &sysc_soc->disabled_modules) {
+ disabled_module = list_entry(pos, struct sysc_address, node);
+ if (ddata->module_pa == disabled_module->base) {
+ dev_dbg(ddata->dev, "module disabled for this SoC\n");
+ error = -ENODEV;
+ break;
+ }
+ }
+ mutex_unlock(&sysc_soc->list_lock);
+
+ return error;
+}
+
+/*
+ * Ignore timers tagged with no-reset and no-idle. These are likely in use,
+ * for example by drivers/clocksource/timer-ti-dm-systimer.c. If more checks
+ * are needed, we could also look at the timer register configuration.
+ */
+static int sysc_check_active_timer(struct sysc *ddata)
+{
+ int error;
+
+ if (ddata->cap->type != TI_SYSC_OMAP2_TIMER &&
+ ddata->cap->type != TI_SYSC_OMAP4_TIMER)
+ return 0;
+
+ /*
+ * Quirk for omap3 beagleboard revision A to B4 to use gpt12.
+ * Revision C and later are fixed with commit 23885389dbbb ("ARM:
+ * dts: Fix timer regression for beagleboard revision c"). This all
+ * can be dropped if we stop supporting old beagleboard revisions
+ * A to B4 at some point.
+ */
+ if (sysc_soc->soc == SOC_3430)
+ error = -ENXIO;
+ else
+ error = -EBUSY;
+
+ if ((ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT) &&
+ (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE))
+ return error;
+
+ return 0;
+}
+
static const struct of_device_id sysc_match_table[] = {
{ .compatible = "simple-bus", },
{ /* sentinel */ },
@@ -2401,9 +3272,16 @@ static int sysc_probe(struct platform_device *pdev)
if (!ddata)
return -ENOMEM;
+ ddata->offsets[SYSC_REVISION] = -ENODEV;
+ ddata->offsets[SYSC_SYSCONFIG] = -ENODEV;
+ ddata->offsets[SYSC_SYSSTATUS] = -ENODEV;
ddata->dev = &pdev->dev;
platform_set_drvdata(pdev, ddata);
+ error = sysc_init_static_data(ddata);
+ if (error)
+ return error;
+
error = sysc_init_match(ddata);
if (error)
return error;
@@ -2434,6 +3312,16 @@ static int sysc_probe(struct platform_device *pdev)
sysc_init_early_quirks(ddata);
+ error = sysc_check_disabled_devices(ddata);
+ if (error)
+ return error;
+
+ error = sysc_check_active_timer(ddata);
+ if (error == -ENXIO)
+ ddata->reserved = true;
+ else if (error)
+ return error;
+
error = sysc_get_clocks(ddata);
if (error)
return error;
@@ -2447,17 +3335,13 @@ static int sysc_probe(struct platform_device *pdev)
goto unprepare;
pm_runtime_enable(ddata->dev);
- error = pm_runtime_get_sync(ddata->dev);
+ error = pm_runtime_resume_and_get(ddata->dev);
if (error < 0) {
- pm_runtime_put_noidle(ddata->dev);
pm_runtime_disable(ddata->dev);
goto unprepare;
}
/* Balance use counts as PM runtime should have enabled these all */
- if (!(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT))
- reset_control_assert(ddata->rsts);
-
if (!(ddata->cfg.quirks &
(SYSC_QUIRK_NO_IDLE | SYSC_QUIRK_NO_IDLE_ON_INIT))) {
sysc_disable_main_clocks(ddata);
@@ -2465,14 +3349,21 @@ static int sysc_probe(struct platform_device *pdev)
sysc_clkdm_allow_idle(ddata);
}
+ if (!(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT))
+ reset_control_assert(ddata->rsts);
+
sysc_show_registers(ddata);
ddata->dev->type = &sysc_device_type;
- error = of_platform_populate(ddata->dev->of_node, sysc_match_table,
- pdata ? pdata->auxdata : NULL,
- ddata->dev);
- if (error)
- goto err;
+
+ if (!ddata->reserved) {
+ error = of_platform_populate(ddata->dev->of_node,
+ sysc_match_table,
+ pdata ? pdata->auxdata : NULL,
+ ddata->dev);
+ if (error)
+ goto err;
+ }
INIT_DELAYED_WORK(&ddata->idle_work, ti_sysc_idle);
@@ -2485,6 +3376,9 @@ static int sysc_probe(struct platform_device *pdev)
pm_runtime_put(&pdev->dev);
}
+ if (ddata->cfg.quirks & SYSC_QUIRK_REINIT_ON_CTX_LOST)
+ sysc_add_restored(ddata);
+
return 0;
err:
@@ -2501,11 +3395,12 @@ static int sysc_remove(struct platform_device *pdev)
struct sysc *ddata = platform_get_drvdata(pdev);
int error;
- cancel_delayed_work_sync(&ddata->idle_work);
+ /* Device can still be enabled, see deferred idle quirk in probe */
+ if (cancel_delayed_work_sync(&ddata->idle_work))
+ ti_sysc_idle(&ddata->idle_work.work);
- error = pm_runtime_get_sync(ddata->dev);
+ error = pm_runtime_resume_and_get(ddata->dev);
if (error < 0) {
- pm_runtime_put_noidle(ddata->dev);
pm_runtime_disable(ddata->dev);
goto unprepare;
}
@@ -2514,7 +3409,9 @@ static int sysc_remove(struct platform_device *pdev)
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- reset_control_assert(ddata->rsts);
+
+ if (!reset_control_status(ddata->rsts))
+ reset_control_assert(ddata->rsts);
unprepare:
sysc_unprepare(ddata);
@@ -2538,6 +3435,7 @@ static const struct of_device_id sysc_match[] = {
{ .compatible = "ti,sysc-usb-host-fs",
.data = &sysc_omap4_usb_host_fs, },
{ .compatible = "ti,sysc-dra7-mcan", .data = &sysc_dra7_mcan, },
+ { .compatible = "ti,sysc-pruss", .data = &sysc_pruss, },
{ },
};
MODULE_DEVICE_TABLE(of, sysc_match);
@@ -2564,6 +3462,7 @@ static void __exit sysc_exit(void)
{
bus_unregister_notifier(&platform_bus_type, &sysc_nb);
platform_driver_unregister(&sysc_driver);
+ sysc_cleanup_static_data();
}
module_exit(sysc_exit);
diff --git a/drivers/bus/ts-nbus.c b/drivers/bus/ts-nbus.c
index 9989ce904a37..38c886dc2ed6 100644
--- a/drivers/bus/ts-nbus.c
+++ b/drivers/bus/ts-nbus.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NBUS driver for TS-4600 based boards
*
* Copyright (c) 2016 - Savoir-faire Linux
* Author: Sebastien Bourdelin <sebastien.bourdelin@savoirfairelinux.com>
*
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- *
* This driver implements a GPIOs bit-banged bus, called the NBUS by Technologic
* Systems. It is used to communicate with the peripherals in the FPGA on the
* TS-4600 SoM.
diff --git a/drivers/bus/vexpress-config.c b/drivers/bus/vexpress-config.c
index ff70575b2db6..a58ac0c8e282 100644
--- a/drivers/bus/vexpress-config.c
+++ b/drivers/bus/vexpress-config.c
@@ -6,10 +6,61 @@
#include <linux/err.h>
#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/of_device.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
#include <linux/vexpress.h>
+#define SYS_MISC 0x0
+#define SYS_MISC_MASTERSITE (1 << 14)
+
+#define SYS_PROCID0 0x24
+#define SYS_PROCID1 0x28
+#define SYS_HBI_MASK 0xfff
+#define SYS_PROCIDx_HBI_SHIFT 0
+
+#define SYS_CFGDATA 0x40
+
+#define SYS_CFGCTRL 0x44
+#define SYS_CFGCTRL_START (1 << 31)
+#define SYS_CFGCTRL_WRITE (1 << 30)
+#define SYS_CFGCTRL_DCC(n) (((n) & 0xf) << 26)
+#define SYS_CFGCTRL_FUNC(n) (((n) & 0x3f) << 20)
+#define SYS_CFGCTRL_SITE(n) (((n) & 0x3) << 16)
+#define SYS_CFGCTRL_POSITION(n) (((n) & 0xf) << 12)
+#define SYS_CFGCTRL_DEVICE(n) (((n) & 0xfff) << 0)
+
+#define SYS_CFGSTAT 0x48
+#define SYS_CFGSTAT_ERR (1 << 1)
+#define SYS_CFGSTAT_COMPLETE (1 << 0)
+
+#define VEXPRESS_SITE_MB 0
+#define VEXPRESS_SITE_DB1 1
+#define VEXPRESS_SITE_DB2 2
+#define VEXPRESS_SITE_MASTER 0xf
+
+struct vexpress_syscfg {
+ struct device *dev;
+ void __iomem *base;
+ struct list_head funcs;
+};
+
+struct vexpress_syscfg_func {
+ struct list_head list;
+ struct vexpress_syscfg *syscfg;
+ struct regmap *regmap;
+ int num_templates;
+ u32 template[]; /* Keep it last! */
+};
+
+struct vexpress_config_bridge_ops {
+ struct regmap * (*regmap_init)(struct device *dev, void *context);
+ void (*regmap_exit)(struct regmap *regmap, void *context);
+};
struct vexpress_config_bridge {
struct vexpress_config_bridge_ops *ops;
@@ -18,26 +69,20 @@ struct vexpress_config_bridge {
static DEFINE_MUTEX(vexpress_config_mutex);
-static struct class *vexpress_config_class;
static u32 vexpress_config_site_master = VEXPRESS_SITE_MASTER;
-void vexpress_config_set_master(u32 site)
+static void vexpress_config_set_master(u32 site)
{
vexpress_config_site_master = site;
}
-u32 vexpress_config_get_master(void)
-{
- return vexpress_config_site_master;
-}
-
-void vexpress_config_lock(void *arg)
+static void vexpress_config_lock(void *arg)
{
mutex_lock(&vexpress_config_mutex);
}
-void vexpress_config_unlock(void *arg)
+static void vexpress_config_unlock(void *arg)
{
mutex_unlock(&vexpress_config_mutex);
}
@@ -59,7 +104,7 @@ static void vexpress_config_find_prop(struct device_node *node,
}
}
-int vexpress_config_get_topo(struct device_node *node, u32 *site,
+static int vexpress_config_get_topo(struct device_node *node, u32 *site,
u32 *position, u32 *dcc)
{
vexpress_config_find_prop(node, "arm,vexpress,site", site);
@@ -88,9 +133,6 @@ struct regmap *devm_regmap_init_vexpress_config(struct device *dev)
struct regmap *regmap;
struct regmap **res;
- if (WARN_ON(dev->parent->class != vexpress_config_class))
- return ERR_PTR(-ENODEV);
-
bridge = dev_get_drvdata(dev->parent);
if (WARN_ON(!bridge))
return ERR_PTR(-EINVAL);
@@ -113,91 +155,265 @@ struct regmap *devm_regmap_init_vexpress_config(struct device *dev)
}
EXPORT_SYMBOL_GPL(devm_regmap_init_vexpress_config);
-struct device *vexpress_config_bridge_register(struct device *parent,
- struct vexpress_config_bridge_ops *ops, void *context)
+static int vexpress_syscfg_exec(struct vexpress_syscfg_func *func,
+ int index, bool write, u32 *data)
{
- struct device *dev;
- struct vexpress_config_bridge *bridge;
+ struct vexpress_syscfg *syscfg = func->syscfg;
+ u32 command, status;
+ int tries;
+ long timeout;
- if (!vexpress_config_class) {
- vexpress_config_class = class_create(THIS_MODULE,
- "vexpress-config");
- if (IS_ERR(vexpress_config_class))
- return (void *)vexpress_config_class;
+ if (WARN_ON(index >= func->num_templates))
+ return -EINVAL;
+
+ command = readl(syscfg->base + SYS_CFGCTRL);
+ if (WARN_ON(command & SYS_CFGCTRL_START))
+ return -EBUSY;
+
+ command = func->template[index];
+ command |= SYS_CFGCTRL_START;
+ command |= write ? SYS_CFGCTRL_WRITE : 0;
+
+ /* Use a canary for reads */
+ if (!write)
+ *data = 0xdeadbeef;
+
+ dev_dbg(syscfg->dev, "func %p, command %x, data %x\n",
+ func, command, *data);
+ writel(*data, syscfg->base + SYS_CFGDATA);
+ writel(0, syscfg->base + SYS_CFGSTAT);
+ writel(command, syscfg->base + SYS_CFGCTRL);
+ mb();
+
+ /* The operation can take ages... Go to sleep, 100us initially */
+ tries = 100;
+ timeout = 100;
+ do {
+ if (!irqs_disabled()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(usecs_to_jiffies(timeout));
+ if (signal_pending(current))
+ return -EINTR;
+ } else {
+ udelay(timeout);
+ }
+
+ status = readl(syscfg->base + SYS_CFGSTAT);
+ if (status & SYS_CFGSTAT_ERR)
+ return -EFAULT;
+
+ if (timeout > 20)
+ timeout -= 20;
+ } while (--tries && !(status & SYS_CFGSTAT_COMPLETE));
+ if (WARN_ON_ONCE(!tries))
+ return -ETIMEDOUT;
+
+ if (!write) {
+ *data = readl(syscfg->base + SYS_CFGDATA);
+ dev_dbg(syscfg->dev, "func %p, read data %x\n", func, *data);
}
- dev = device_create(vexpress_config_class, parent, 0,
- NULL, "%s.bridge", dev_name(parent));
+ return 0;
+}
+
+static int vexpress_syscfg_read(void *context, unsigned int index,
+ unsigned int *val)
+{
+ struct vexpress_syscfg_func *func = context;
+
+ return vexpress_syscfg_exec(func, index, false, val);
+}
+
+static int vexpress_syscfg_write(void *context, unsigned int index,
+ unsigned int val)
+{
+ struct vexpress_syscfg_func *func = context;
+
+ return vexpress_syscfg_exec(func, index, true, &val);
+}
+
+static struct regmap_config vexpress_syscfg_regmap_config = {
+ .lock = vexpress_config_lock,
+ .unlock = vexpress_config_unlock,
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_read = vexpress_syscfg_read,
+ .reg_write = vexpress_syscfg_write,
+ .reg_format_endian = REGMAP_ENDIAN_LITTLE,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+};
+
- if (IS_ERR(dev))
- return dev;
+static struct regmap *vexpress_syscfg_regmap_init(struct device *dev,
+ void *context)
+{
+ int err;
+ struct vexpress_syscfg *syscfg = context;
+ struct vexpress_syscfg_func *func;
+ struct property *prop;
+ const __be32 *val = NULL;
+ __be32 energy_quirk[4];
+ int num;
+ u32 site, position, dcc;
+ int i;
+
+ err = vexpress_config_get_topo(dev->of_node, &site,
+ &position, &dcc);
+ if (err)
+ return ERR_PTR(err);
+
+ prop = of_find_property(dev->of_node,
+ "arm,vexpress-sysreg,func", NULL);
+ if (!prop)
+ return ERR_PTR(-EINVAL);
- bridge = devm_kmalloc(dev, sizeof(*bridge), GFP_KERNEL);
- if (!bridge) {
- put_device(dev);
- device_unregister(dev);
+ num = prop->length / sizeof(u32) / 2;
+ val = prop->value;
+
+ /*
+ * "arm,vexpress-energy" function used to be described
+ * by its first device only, now it requires both
+ */
+ if (num == 1 && of_device_is_compatible(dev->of_node,
+ "arm,vexpress-energy")) {
+ num = 2;
+ energy_quirk[0] = *val;
+ energy_quirk[2] = *val++;
+ energy_quirk[1] = *val;
+ energy_quirk[3] = cpu_to_be32(be32_to_cpup(val) + 1);
+ val = energy_quirk;
+ }
+
+ func = kzalloc(struct_size(func, template, num), GFP_KERNEL);
+ if (!func)
return ERR_PTR(-ENOMEM);
+
+ func->syscfg = syscfg;
+ func->num_templates = num;
+
+ for (i = 0; i < num; i++) {
+ u32 function, device;
+
+ function = be32_to_cpup(val++);
+ device = be32_to_cpup(val++);
+
+ dev_dbg(dev, "func %p: %u/%u/%u/%u/%u\n",
+ func, site, position, dcc,
+ function, device);
+
+ func->template[i] = SYS_CFGCTRL_DCC(dcc);
+ func->template[i] |= SYS_CFGCTRL_SITE(site);
+ func->template[i] |= SYS_CFGCTRL_POSITION(position);
+ func->template[i] |= SYS_CFGCTRL_FUNC(function);
+ func->template[i] |= SYS_CFGCTRL_DEVICE(device);
}
- bridge->ops = ops;
- bridge->context = context;
- dev_set_drvdata(dev, bridge);
+ vexpress_syscfg_regmap_config.max_register = num - 1;
- dev_dbg(parent, "Registered bridge '%s', parent node %p\n",
- dev_name(dev), parent->of_node);
+ func->regmap = regmap_init(dev, NULL, func,
+ &vexpress_syscfg_regmap_config);
- return dev;
-}
+ if (IS_ERR(func->regmap)) {
+ void *err = func->regmap;
+ kfree(func);
+ return err;
+ }
+
+ list_add(&func->list, &syscfg->funcs);
-static int vexpress_config_node_match(struct device *dev, const void *data)
+ return func->regmap;
+}
+
+static void vexpress_syscfg_regmap_exit(struct regmap *regmap, void *context)
{
- const struct device_node *node = data;
+ struct vexpress_syscfg *syscfg = context;
+ struct vexpress_syscfg_func *func, *tmp;
- dev_dbg(dev, "Parent node %p, looking for %p\n",
- dev->parent->of_node, node);
+ regmap_exit(regmap);
- return dev->parent->of_node == node;
+ list_for_each_entry_safe(func, tmp, &syscfg->funcs, list) {
+ if (func->regmap == regmap) {
+ list_del(&syscfg->funcs);
+ kfree(func);
+ break;
+ }
+ }
}
-static int vexpress_config_populate(struct device_node *node)
+static struct vexpress_config_bridge_ops vexpress_syscfg_bridge_ops = {
+ .regmap_init = vexpress_syscfg_regmap_init,
+ .regmap_exit = vexpress_syscfg_regmap_exit,
+};
+
+
+static int vexpress_syscfg_probe(struct platform_device *pdev)
{
- struct device_node *bridge;
- struct device *parent;
- int ret;
+ struct vexpress_syscfg *syscfg;
+ struct resource *res;
+ struct vexpress_config_bridge *bridge;
+ struct device_node *node;
+ int master;
+ u32 dt_hbi;
+
+ syscfg = devm_kzalloc(&pdev->dev, sizeof(*syscfg), GFP_KERNEL);
+ if (!syscfg)
+ return -ENOMEM;
+ syscfg->dev = &pdev->dev;
+ INIT_LIST_HEAD(&syscfg->funcs);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ syscfg->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(syscfg->base))
+ return PTR_ERR(syscfg->base);
- bridge = of_parse_phandle(node, "arm,vexpress,config-bridge", 0);
+ bridge = devm_kmalloc(&pdev->dev, sizeof(*bridge), GFP_KERNEL);
if (!bridge)
- return -EINVAL;
+ return -ENOMEM;
- parent = class_find_device(vexpress_config_class, NULL, bridge,
- vexpress_config_node_match);
- of_node_put(bridge);
- if (WARN_ON(!parent))
- return -ENODEV;
+ bridge->ops = &vexpress_syscfg_bridge_ops;
+ bridge->context = syscfg;
- ret = of_platform_populate(node, NULL, NULL, parent);
+ dev_set_drvdata(&pdev->dev, bridge);
- put_device(parent);
+ master = readl(syscfg->base + SYS_MISC) & SYS_MISC_MASTERSITE ?
+ VEXPRESS_SITE_DB2 : VEXPRESS_SITE_DB1;
+ vexpress_config_set_master(master);
- return ret;
-}
+ /* Confirm board type against DT property, if available */
+ if (of_property_read_u32(of_root, "arm,hbi", &dt_hbi) == 0) {
+ u32 id = readl(syscfg->base + (master == VEXPRESS_SITE_DB1 ?
+ SYS_PROCID0 : SYS_PROCID1));
+ u32 hbi = (id >> SYS_PROCIDx_HBI_SHIFT) & SYS_HBI_MASK;
-static int __init vexpress_config_init(void)
-{
- int err = 0;
- struct device_node *node;
+ if (WARN_ON(dt_hbi != hbi))
+ dev_warn(&pdev->dev, "DT HBI (%x) is not matching hardware (%x)!\n",
+ dt_hbi, hbi);
+ }
- /* Need the config devices early, before the "normal" devices... */
for_each_compatible_node(node, NULL, "arm,vexpress,config-bus") {
- err = vexpress_config_populate(node);
- if (err) {
- of_node_put(node);
- break;
- }
+ struct device_node *bridge_np;
+
+ bridge_np = of_parse_phandle(node, "arm,vexpress,config-bridge", 0);
+ if (bridge_np != pdev->dev.parent->of_node)
+ continue;
+
+ of_platform_populate(node, NULL, NULL, &pdev->dev);
}
- return err;
+ return 0;
}
-postcore_initcall(vexpress_config_init);
+static const struct platform_device_id vexpress_syscfg_id_table[] = {
+ { "vexpress-syscfg", },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, vexpress_syscfg_id_table);
+
+static struct platform_driver vexpress_syscfg_driver = {
+ .driver.name = "vexpress-syscfg",
+ .id_table = vexpress_syscfg_id_table,
+ .probe = vexpress_syscfg_probe,
+};
+module_platform_driver(vexpress_syscfg_driver);
+MODULE_LICENSE("GPL v2");