aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/bus/qcom-ssc-block-bus.c34
-rw-r--r--drivers/cpufreq/Kconfig.powerpc18
-rw-r--r--drivers/cpufreq/Makefile3
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq.c173
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq.h33
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c102
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq_pmi.c150
-rw-r--r--drivers/firmware/Kconfig2
-rw-r--r--drivers/firmware/Makefile1
-rw-r--r--drivers/firmware/arm_ffa/bus.c14
-rw-r--r--drivers/firmware/arm_ffa/driver.c532
-rw-r--r--drivers/firmware/arm_scmi/bus.c69
-rw-r--r--drivers/firmware/arm_scmi/driver.c10
-rw-r--r--drivers/firmware/samsung/Kconfig14
-rw-r--r--drivers/firmware/samsung/Makefile4
-rw-r--r--drivers/firmware/samsung/exynos-acpm-pmic.c224
-rw-r--r--drivers/firmware/samsung/exynos-acpm-pmic.h29
-rw-r--r--drivers/firmware/samsung/exynos-acpm.c769
-rw-r--r--drivers/firmware/samsung/exynos-acpm.h23
-rw-r--r--drivers/firmware/smccc/soc_id.c80
-rw-r--r--drivers/firmware/xilinx/zynqmp.c6
-rw-r--r--drivers/gpu/drm/clients/drm_log.c4
-rw-r--r--drivers/gpu/drm/drm_draw.c2
-rw-r--r--drivers/irqchip/irq-meson-gpio.c48
-rw-r--r--drivers/memory/mtk-smi.c33
-rw-r--r--drivers/memory/tegra/tegra20-emc.c4
-rw-r--r--drivers/mmc/host/sdhci-msm.c2
-rw-r--r--drivers/net/ethernet/toshiba/Kconfig11
-rw-r--r--drivers/net/ethernet/toshiba/Makefile2
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c2556
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.h475
-rw-r--r--drivers/net/ethernet/toshiba/spider_net_ethtool.c174
-rw-r--r--drivers/ntb/msi.c22
-rw-r--r--drivers/nvme/host/apple.c2
-rw-r--r--drivers/pci/controller/pci-hyperv.c14
-rw-r--r--drivers/pci/msi/api.c6
-rw-r--r--drivers/pci/msi/msi.c167
-rw-r--r--drivers/pci/pci.h9
-rw-r--r--drivers/pci/tph.c44
-rw-r--r--drivers/reset/Kconfig7
-rw-r--r--drivers/reset/Makefile1
-rw-r--r--drivers/reset/reset-imx-scu.c101
-rw-r--r--drivers/sh/clk/cpg.c25
-rw-r--r--drivers/soc/apple/rtkit-internal.h1
-rw-r--r--drivers/soc/apple/rtkit.c112
-rw-r--r--drivers/soc/atmel/soc.c5
-rw-r--r--drivers/soc/atmel/soc.h3
-rw-r--r--drivers/soc/mediatek/mt8167-mmsys.h31
-rw-r--r--drivers/soc/mediatek/mt8173-mmsys.h99
-rw-r--r--drivers/soc/mediatek/mt8183-mmsys.h50
-rw-r--r--drivers/soc/mediatek/mt8186-mmsys.h88
-rw-r--r--drivers/soc/mediatek/mt8188-mmsys.h266
-rw-r--r--drivers/soc/mediatek/mt8192-mmsys.h71
-rw-r--r--drivers/soc/mediatek/mt8195-mmsys.h632
-rw-r--r--drivers/soc/mediatek/mt8365-mmsys.h84
-rw-r--r--drivers/soc/mediatek/mtk-mmsys.h14
-rw-r--r--drivers/soc/mediatek/mtk-mutex.c6
-rw-r--r--drivers/soc/mediatek/mtk-socinfo.c22
-rw-r--r--drivers/soc/qcom/ice.c51
-rw-r--r--drivers/soc/qcom/pdr_internal.h1
-rw-r--r--drivers/soc/qcom/qcom_aoss.c3
-rw-r--r--drivers/soc/qcom/qcom_pd_mapper.c12
-rw-r--r--drivers/soc/qcom/qcom_pdr_msg.c3
-rw-r--r--drivers/soc/renesas/Kconfig18
-rw-r--r--drivers/soc/renesas/Makefile4
-rw-r--r--drivers/soc/renesas/r9a08g045-sysc.c23
-rw-r--r--drivers/soc/renesas/r9a09g047-sys.c67
-rw-r--r--drivers/soc/renesas/r9a09g057-sys.c67
-rw-r--r--drivers/soc/renesas/renesas-soc.c33
-rw-r--r--drivers/soc/renesas/rz-sysc.c137
-rw-r--r--drivers/soc/renesas/rz-sysc.h46
-rw-r--r--drivers/soc/samsung/exynos-asv.c1
-rw-r--r--drivers/soc/samsung/exynos-chipid.c5
-rw-r--r--drivers/soc/samsung/exynos-pmu.c1
-rw-r--r--drivers/soc/samsung/exynos-usi.c108
-rw-r--r--drivers/soc/samsung/exynos3250-pmu.c1
-rw-r--r--drivers/soc/samsung/exynos5250-pmu.c1
-rw-r--r--drivers/soc/samsung/exynos5420-pmu.c1
-rw-r--r--drivers/soc/tegra/pmc.c3
-rw-r--r--drivers/soc/ti/k3-socinfo.c13
-rw-r--r--drivers/soc/ti/ti_sci_inta_msi.c10
-rw-r--r--drivers/tty/Kconfig19
-rw-r--r--drivers/tty/serial/serial_core.c6
-rw-r--r--drivers/ufs/host/ufs-qcom.c77
84 files changed, 3242 insertions, 4952 deletions
diff --git a/drivers/bus/qcom-ssc-block-bus.c b/drivers/bus/qcom-ssc-block-bus.c
index 85d781a32df4..7f5fd4e0940d 100644
--- a/drivers/bus/qcom-ssc-block-bus.c
+++ b/drivers/bus/qcom-ssc-block-bus.c
@@ -264,18 +264,6 @@ static int qcom_ssc_block_bus_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
- data->pd_names = qcom_ssc_block_pd_names;
- data->num_pds = ARRAY_SIZE(qcom_ssc_block_pd_names);
-
- /* power domains */
- ret = qcom_ssc_block_bus_pds_attach(&pdev->dev, data->pds, data->pd_names, data->num_pds);
- if (ret < 0)
- return dev_err_probe(&pdev->dev, ret, "error when attaching power domains\n");
-
- ret = qcom_ssc_block_bus_pds_enable(data->pds, data->num_pds);
- if (ret < 0)
- return dev_err_probe(&pdev->dev, ret, "error when enabling power domains\n");
-
/* low level overrides for when the HW logic doesn't "just work" */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mpm_sscaon_config0");
data->reg_mpm_sscaon_config0 = devm_ioremap_resource(&pdev->dev, res);
@@ -343,11 +331,30 @@ static int qcom_ssc_block_bus_probe(struct platform_device *pdev)
data->ssc_axi_halt = halt_args.args[0];
+ /* power domains */
+ data->pd_names = qcom_ssc_block_pd_names;
+ data->num_pds = ARRAY_SIZE(qcom_ssc_block_pd_names);
+
+ ret = qcom_ssc_block_bus_pds_attach(&pdev->dev, data->pds, data->pd_names, data->num_pds);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "error when attaching power domains\n");
+
+ ret = qcom_ssc_block_bus_pds_enable(data->pds, data->num_pds);
+ if (ret < 0) {
+ dev_err_probe(&pdev->dev, ret, "error when enabling power domains\n");
+ goto err_detach_pds_bus;
+ }
+
qcom_ssc_block_bus_init(&pdev->dev);
of_platform_populate(np, NULL, NULL, &pdev->dev);
return 0;
+
+err_detach_pds_bus:
+ qcom_ssc_block_bus_pds_detach(&pdev->dev, data->pds, data->num_pds);
+
+ return ret;
}
static void qcom_ssc_block_bus_remove(struct platform_device *pdev)
@@ -356,9 +363,6 @@ static void qcom_ssc_block_bus_remove(struct platform_device *pdev)
qcom_ssc_block_bus_deinit(&pdev->dev);
- iounmap(data->reg_mpm_sscaon_config0);
- iounmap(data->reg_mpm_sscaon_config1);
-
qcom_ssc_block_bus_pds_disable(data->pds, data->num_pds);
qcom_ssc_block_bus_pds_detach(&pdev->dev, data->pds, data->num_pds);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/cpufreq/Kconfig.powerpc b/drivers/cpufreq/Kconfig.powerpc
index eb678fa5260a..551e65d35a1d 100644
--- a/drivers/cpufreq/Kconfig.powerpc
+++ b/drivers/cpufreq/Kconfig.powerpc
@@ -1,22 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-config CPU_FREQ_CBE
- tristate "CBE frequency scaling"
- depends on CBE_RAS && PPC_CELL
- default m
- help
- This adds the cpufreq driver for Cell BE processors.
- For details, take a look at <file:Documentation/cpu-freq/>.
- If you don't have such processor, say N
-
-config CPU_FREQ_CBE_PMI
- bool "CBE frequency scaling using PMI interface"
- depends on CPU_FREQ_CBE
- default n
- help
- Select this, if you want to use the PMI interface to switch
- frequencies. Using PMI, the processor will not only be able to run at
- lower speed, but also at lower core voltage.
-
config CPU_FREQ_PMAC
bool "Support for Apple PowerBooks"
depends on ADB_PMU && PPC32
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 890fff99f37d..22ab45209f9b 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -91,9 +91,6 @@ obj-$(CONFIG_ARM_VEXPRESS_SPC_CPUFREQ) += vexpress-spc-cpufreq.o
##################################################################################
# PowerPC platform drivers
-obj-$(CONFIG_CPU_FREQ_CBE) += ppc-cbe-cpufreq.o
-ppc-cbe-cpufreq-y += ppc_cbe_cpufreq_pervasive.o ppc_cbe_cpufreq.o
-obj-$(CONFIG_CPU_FREQ_CBE_PMI) += ppc_cbe_cpufreq_pmi.o
obj-$(CONFIG_QORIQ_CPUFREQ) += qoriq-cpufreq.o
obj-$(CONFIG_CPU_FREQ_PMAC) += pmac32-cpufreq.o
obj-$(CONFIG_CPU_FREQ_PMAC64) += pmac64-cpufreq.o
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c
deleted file mode 100644
index 98595b3ea13f..000000000000
--- a/drivers/cpufreq/ppc_cbe_cpufreq.c
+++ /dev/null
@@ -1,173 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * cpufreq driver for the cell processor
- *
- * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
- *
- * Author: Christian Krafft <krafft@de.ibm.com>
- */
-
-#include <linux/cpufreq.h>
-#include <linux/module.h>
-#include <linux/of.h>
-
-#include <asm/machdep.h>
-#include <asm/cell-regs.h>
-
-#include "ppc_cbe_cpufreq.h"
-
-/* the CBE supports an 8 step frequency scaling */
-static struct cpufreq_frequency_table cbe_freqs[] = {
- {0, 1, 0},
- {0, 2, 0},
- {0, 3, 0},
- {0, 4, 0},
- {0, 5, 0},
- {0, 6, 0},
- {0, 8, 0},
- {0, 10, 0},
- {0, 0, CPUFREQ_TABLE_END},
-};
-
-/*
- * hardware specific functions
- */
-
-static int set_pmode(unsigned int cpu, unsigned int slow_mode)
-{
- int rc;
-
- if (cbe_cpufreq_has_pmi)
- rc = cbe_cpufreq_set_pmode_pmi(cpu, slow_mode);
- else
- rc = cbe_cpufreq_set_pmode(cpu, slow_mode);
-
- pr_debug("register contains slow mode %d\n", cbe_cpufreq_get_pmode(cpu));
-
- return rc;
-}
-
-/*
- * cpufreq functions
- */
-
-static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
-{
- struct cpufreq_frequency_table *pos;
- const u32 *max_freqp;
- u32 max_freq;
- int cur_pmode;
- struct device_node *cpu;
-
- cpu = of_get_cpu_node(policy->cpu, NULL);
-
- if (!cpu)
- return -ENODEV;
-
- pr_debug("init cpufreq on CPU %d\n", policy->cpu);
-
- /*
- * Let's check we can actually get to the CELL regs
- */
- if (!cbe_get_cpu_pmd_regs(policy->cpu) ||
- !cbe_get_cpu_mic_tm_regs(policy->cpu)) {
- pr_info("invalid CBE regs pointers for cpufreq\n");
- of_node_put(cpu);
- return -EINVAL;
- }
-
- max_freqp = of_get_property(cpu, "clock-frequency", NULL);
-
- of_node_put(cpu);
-
- if (!max_freqp)
- return -EINVAL;
-
- /* we need the freq in kHz */
- max_freq = *max_freqp / 1000;
-
- pr_debug("max clock-frequency is at %u kHz\n", max_freq);
- pr_debug("initializing frequency table\n");
-
- /* initialize frequency table */
- cpufreq_for_each_entry(pos, cbe_freqs) {
- pos->frequency = max_freq / pos->driver_data;
- pr_debug("%d: %d\n", (int)(pos - cbe_freqs), pos->frequency);
- }
-
- /* if DEBUG is enabled set_pmode() measures the latency
- * of a transition */
- policy->cpuinfo.transition_latency = 25000;
-
- cur_pmode = cbe_cpufreq_get_pmode(policy->cpu);
- pr_debug("current pmode is at %d\n",cur_pmode);
-
- policy->cur = cbe_freqs[cur_pmode].frequency;
-
-#ifdef CONFIG_SMP
- cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
-#endif
-
- policy->freq_table = cbe_freqs;
- cbe_cpufreq_pmi_policy_init(policy);
- return 0;
-}
-
-static void cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy)
-{
- cbe_cpufreq_pmi_policy_exit(policy);
-}
-
-static int cbe_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int cbe_pmode_new)
-{
- pr_debug("setting frequency for cpu %d to %d kHz, " \
- "1/%d of max frequency\n",
- policy->cpu,
- cbe_freqs[cbe_pmode_new].frequency,
- cbe_freqs[cbe_pmode_new].driver_data);
-
- return set_pmode(policy->cpu, cbe_pmode_new);
-}
-
-static struct cpufreq_driver cbe_cpufreq_driver = {
- .verify = cpufreq_generic_frequency_table_verify,
- .target_index = cbe_cpufreq_target,
- .init = cbe_cpufreq_cpu_init,
- .exit = cbe_cpufreq_cpu_exit,
- .name = "cbe-cpufreq",
- .flags = CPUFREQ_CONST_LOOPS,
-};
-
-/*
- * module init and destoy
- */
-
-static int __init cbe_cpufreq_init(void)
-{
- int ret;
-
- if (!machine_is(cell))
- return -ENODEV;
-
- cbe_cpufreq_pmi_init();
-
- ret = cpufreq_register_driver(&cbe_cpufreq_driver);
- if (ret)
- cbe_cpufreq_pmi_exit();
-
- return ret;
-}
-
-static void __exit cbe_cpufreq_exit(void)
-{
- cpufreq_unregister_driver(&cbe_cpufreq_driver);
- cbe_cpufreq_pmi_exit();
-}
-
-module_init(cbe_cpufreq_init);
-module_exit(cbe_cpufreq_exit);
-
-MODULE_DESCRIPTION("cpufreq driver for Cell BE processors");
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.h b/drivers/cpufreq/ppc_cbe_cpufreq.h
deleted file mode 100644
index 00cd8633b0d9..000000000000
--- a/drivers/cpufreq/ppc_cbe_cpufreq.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * ppc_cbe_cpufreq.h
- *
- * This file contains the definitions used by the cbe_cpufreq driver.
- *
- * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
- *
- * Author: Christian Krafft <krafft@de.ibm.com>
- *
- */
-
-#include <linux/cpufreq.h>
-#include <linux/types.h>
-
-int cbe_cpufreq_set_pmode(int cpu, unsigned int pmode);
-int cbe_cpufreq_get_pmode(int cpu);
-
-int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode);
-
-#if IS_ENABLED(CONFIG_CPU_FREQ_CBE_PMI)
-extern bool cbe_cpufreq_has_pmi;
-void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy);
-void cbe_cpufreq_pmi_policy_exit(struct cpufreq_policy *policy);
-void cbe_cpufreq_pmi_init(void);
-void cbe_cpufreq_pmi_exit(void);
-#else
-#define cbe_cpufreq_has_pmi (0)
-static inline void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy) {}
-static inline void cbe_cpufreq_pmi_policy_exit(struct cpufreq_policy *policy) {}
-static inline void cbe_cpufreq_pmi_init(void) {}
-static inline void cbe_cpufreq_pmi_exit(void) {}
-#endif
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c b/drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c
deleted file mode 100644
index 04830cd95333..000000000000
--- a/drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c
+++ /dev/null
@@ -1,102 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * pervasive backend for the cbe_cpufreq driver
- *
- * This driver makes use of the pervasive unit to
- * engage the desired frequency.
- *
- * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
- *
- * Author: Christian Krafft <krafft@de.ibm.com>
- */
-
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/time.h>
-#include <asm/machdep.h>
-#include <asm/hw_irq.h>
-#include <asm/cell-regs.h>
-
-#include "ppc_cbe_cpufreq.h"
-
-/* to write to MIC register */
-static u64 MIC_Slow_Fast_Timer_table[] = {
- [0 ... 7] = 0x007fc00000000000ull,
-};
-
-/* more values for the MIC */
-static u64 MIC_Slow_Next_Timer_table[] = {
- 0x0000240000000000ull,
- 0x0000268000000000ull,
- 0x000029C000000000ull,
- 0x00002D0000000000ull,
- 0x0000300000000000ull,
- 0x0000334000000000ull,
- 0x000039C000000000ull,
- 0x00003FC000000000ull,
-};
-
-
-int cbe_cpufreq_set_pmode(int cpu, unsigned int pmode)
-{
- struct cbe_pmd_regs __iomem *pmd_regs;
- struct cbe_mic_tm_regs __iomem *mic_tm_regs;
- unsigned long flags;
- u64 value;
-#ifdef DEBUG
- long time;
-#endif
-
- local_irq_save(flags);
-
- mic_tm_regs = cbe_get_cpu_mic_tm_regs(cpu);
- pmd_regs = cbe_get_cpu_pmd_regs(cpu);
-
-#ifdef DEBUG
- time = jiffies;
-#endif
-
- out_be64(&mic_tm_regs->slow_fast_timer_0, MIC_Slow_Fast_Timer_table[pmode]);
- out_be64(&mic_tm_regs->slow_fast_timer_1, MIC_Slow_Fast_Timer_table[pmode]);
-
- out_be64(&mic_tm_regs->slow_next_timer_0, MIC_Slow_Next_Timer_table[pmode]);
- out_be64(&mic_tm_regs->slow_next_timer_1, MIC_Slow_Next_Timer_table[pmode]);
-
- value = in_be64(&pmd_regs->pmcr);
- /* set bits to zero */
- value &= 0xFFFFFFFFFFFFFFF8ull;
- /* set bits to next pmode */
- value |= pmode;
-
- out_be64(&pmd_regs->pmcr, value);
-
-#ifdef DEBUG
- /* wait until new pmode appears in status register */
- value = in_be64(&pmd_regs->pmsr) & 0x07;
- while (value != pmode) {
- cpu_relax();
- value = in_be64(&pmd_regs->pmsr) & 0x07;
- }
-
- time = jiffies - time;
- time = jiffies_to_msecs(time);
- pr_debug("had to wait %lu ms for a transition using " \
- "pervasive unit\n", time);
-#endif
- local_irq_restore(flags);
-
- return 0;
-}
-
-
-int cbe_cpufreq_get_pmode(int cpu)
-{
- int ret;
- struct cbe_pmd_regs __iomem *pmd_regs;
-
- pmd_regs = cbe_get_cpu_pmd_regs(cpu);
- ret = in_be64(&pmd_regs->pmsr) & 0x07;
-
- return ret;
-}
-
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
deleted file mode 100644
index 6f0c32592416..000000000000
--- a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
+++ /dev/null
@@ -1,150 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * pmi backend for the cbe_cpufreq driver
- *
- * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
- *
- * Author: Christian Krafft <krafft@de.ibm.com>
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/timer.h>
-#include <linux/init.h>
-#include <linux/pm_qos.h>
-#include <linux/slab.h>
-
-#include <asm/processor.h>
-#include <asm/pmi.h>
-#include <asm/cell-regs.h>
-
-#ifdef DEBUG
-#include <asm/time.h>
-#endif
-
-#include "ppc_cbe_cpufreq.h"
-
-bool cbe_cpufreq_has_pmi = false;
-EXPORT_SYMBOL_GPL(cbe_cpufreq_has_pmi);
-
-/*
- * hardware specific functions
- */
-
-int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode)
-{
- int ret;
- pmi_message_t pmi_msg;
-#ifdef DEBUG
- long time;
-#endif
- pmi_msg.type = PMI_TYPE_FREQ_CHANGE;
- pmi_msg.data1 = cbe_cpu_to_node(cpu);
- pmi_msg.data2 = pmode;
-
-#ifdef DEBUG
- time = jiffies;
-#endif
- pmi_send_message(pmi_msg);
-
-#ifdef DEBUG
- time = jiffies - time;
- time = jiffies_to_msecs(time);
- pr_debug("had to wait %lu ms for a transition using " \
- "PMI\n", time);
-#endif
- ret = pmi_msg.data2;
- pr_debug("PMI returned slow mode %d\n", ret);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(cbe_cpufreq_set_pmode_pmi);
-
-
-static void cbe_cpufreq_handle_pmi(pmi_message_t pmi_msg)
-{
- struct cpufreq_policy *policy;
- struct freq_qos_request *req;
- u8 node, slow_mode;
- int cpu, ret;
-
- BUG_ON(pmi_msg.type != PMI_TYPE_FREQ_CHANGE);
-
- node = pmi_msg.data1;
- slow_mode = pmi_msg.data2;
-
- cpu = cbe_node_to_cpu(node);
-
- pr_debug("cbe_handle_pmi: node: %d max_freq: %d\n", node, slow_mode);
-
- policy = cpufreq_cpu_get(cpu);
- if (!policy) {
- pr_warn("cpufreq policy not found cpu%d\n", cpu);
- return;
- }
-
- req = policy->driver_data;
-
- ret = freq_qos_update_request(req,
- policy->freq_table[slow_mode].frequency);
- if (ret < 0)
- pr_warn("Failed to update freq constraint: %d\n", ret);
- else
- pr_debug("limiting node %d to slow mode %d\n", node, slow_mode);
-
- cpufreq_cpu_put(policy);
-}
-
-static struct pmi_handler cbe_pmi_handler = {
- .type = PMI_TYPE_FREQ_CHANGE,
- .handle_pmi_message = cbe_cpufreq_handle_pmi,
-};
-
-void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy)
-{
- struct freq_qos_request *req;
- int ret;
-
- if (!cbe_cpufreq_has_pmi)
- return;
-
- req = kzalloc(sizeof(*req), GFP_KERNEL);
- if (!req)
- return;
-
- ret = freq_qos_add_request(&policy->constraints, req, FREQ_QOS_MAX,
- policy->freq_table[0].frequency);
- if (ret < 0) {
- pr_err("Failed to add freq constraint (%d)\n", ret);
- kfree(req);
- return;
- }
-
- policy->driver_data = req;
-}
-EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_policy_init);
-
-void cbe_cpufreq_pmi_policy_exit(struct cpufreq_policy *policy)
-{
- struct freq_qos_request *req = policy->driver_data;
-
- if (cbe_cpufreq_has_pmi) {
- freq_qos_remove_request(req);
- kfree(req);
- }
-}
-EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_policy_exit);
-
-void cbe_cpufreq_pmi_init(void)
-{
- if (!pmi_register_handler(&cbe_pmi_handler))
- cbe_cpufreq_has_pmi = true;
-}
-EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_init);
-
-void cbe_cpufreq_pmi_exit(void)
-{
- pmi_unregister_handler(&cbe_pmi_handler);
- cbe_cpufreq_has_pmi = false;
-}
-EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_exit);
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index ebdd972e6f19..aadc395ee168 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -225,6 +225,7 @@ config TH1520_AON_PROTOCOL
config TI_SCI_PROTOCOL
tristate "TI System Control Interface (TISCI) Message Protocol"
depends on TI_MESSAGE_MANAGER
+ default ARCH_K3
help
TI System Control Interface (TISCI) Message Protocol is used to manage
compute systems such as ARM, DSP etc with the system controller in
@@ -277,6 +278,7 @@ source "drivers/firmware/meson/Kconfig"
source "drivers/firmware/microchip/Kconfig"
source "drivers/firmware/psci/Kconfig"
source "drivers/firmware/qcom/Kconfig"
+source "drivers/firmware/samsung/Kconfig"
source "drivers/firmware/smccc/Kconfig"
source "drivers/firmware/tegra/Kconfig"
source "drivers/firmware/xilinx/Kconfig"
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 5db9c042430c..4ddec2820c96 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -34,6 +34,7 @@ obj-y += efi/
obj-y += imx/
obj-y += psci/
obj-y += qcom/
+obj-y += samsung/
obj-y += smccc/
obj-y += tegra/
obj-y += xilinx/
diff --git a/drivers/firmware/arm_ffa/bus.c b/drivers/firmware/arm_ffa/bus.c
index dfda5ffc14db..50bfe56c755e 100644
--- a/drivers/firmware/arm_ffa/bus.c
+++ b/drivers/firmware/arm_ffa/bus.c
@@ -15,7 +15,7 @@
#include "common.h"
-#define SCMI_UEVENT_MODALIAS_FMT "arm_ffa:%04x:%pUb"
+#define FFA_UEVENT_MODALIAS_FMT "arm_ffa:%04x:%pUb"
static DEFINE_IDA(ffa_bus_id);
@@ -68,7 +68,7 @@ static int ffa_device_uevent(const struct device *dev, struct kobj_uevent_env *e
{
const struct ffa_device *ffa_dev = to_ffa_dev(dev);
- return add_uevent_var(env, "MODALIAS=" SCMI_UEVENT_MODALIAS_FMT,
+ return add_uevent_var(env, "MODALIAS=" FFA_UEVENT_MODALIAS_FMT,
ffa_dev->vm_id, &ffa_dev->uuid);
}
@@ -77,7 +77,7 @@ static ssize_t modalias_show(struct device *dev,
{
struct ffa_device *ffa_dev = to_ffa_dev(dev);
- return sysfs_emit(buf, SCMI_UEVENT_MODALIAS_FMT, ffa_dev->vm_id,
+ return sysfs_emit(buf, FFA_UEVENT_MODALIAS_FMT, ffa_dev->vm_id,
&ffa_dev->uuid);
}
static DEVICE_ATTR_RO(modalias);
@@ -160,11 +160,12 @@ static int __ffa_devices_unregister(struct device *dev, void *data)
return 0;
}
-static void ffa_devices_unregister(void)
+void ffa_devices_unregister(void)
{
bus_for_each_dev(&ffa_bus_type, NULL, NULL,
__ffa_devices_unregister);
}
+EXPORT_SYMBOL_GPL(ffa_devices_unregister);
bool ffa_device_is_valid(struct ffa_device *ffa_dev)
{
@@ -192,7 +193,6 @@ ffa_device_register(const struct ffa_partition_info *part_info,
const struct ffa_ops *ops)
{
int id, ret;
- uuid_t uuid;
struct device *dev;
struct ffa_device *ffa_dev;
@@ -212,14 +212,14 @@ ffa_device_register(const struct ffa_partition_info *part_info,
dev = &ffa_dev->dev;
dev->bus = &ffa_bus_type;
dev->release = ffa_release_device;
+ dev->dma_mask = &dev->coherent_dma_mask;
dev_set_name(&ffa_dev->dev, "arm-ffa-%d", id);
ffa_dev->id = id;
ffa_dev->vm_id = part_info->id;
ffa_dev->properties = part_info->properties;
ffa_dev->ops = ops;
- import_uuid(&uuid, (u8 *)part_info->uuid);
- uuid_copy(&ffa_dev->uuid, &uuid);
+ uuid_copy(&ffa_dev->uuid, &part_info->uuid);
ret = device_register(&ffa_dev->dev);
if (ret) {
diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
index 2c2ec3c35f15..19295282de24 100644
--- a/drivers/firmware/arm_ffa/driver.c
+++ b/drivers/firmware/arm_ffa/driver.c
@@ -44,7 +44,7 @@
#include "common.h"
-#define FFA_DRIVER_VERSION FFA_VERSION_1_1
+#define FFA_DRIVER_VERSION FFA_VERSION_1_2
#define FFA_MIN_VERSION FFA_VERSION_1_0
#define SENDER_ID_MASK GENMASK(31, 16)
@@ -114,7 +114,6 @@ struct ffa_drv_info {
};
static struct ffa_drv_info *drv_info;
-static void ffa_partitions_cleanup(void);
/*
* The driver must be able to support all the versions from the earliest
@@ -145,11 +144,19 @@ static int ffa_version_check(u32 *version)
.a0 = FFA_VERSION, .a1 = FFA_DRIVER_VERSION,
}, &ver);
- if (ver.a0 == FFA_RET_NOT_SUPPORTED) {
+ if ((s32)ver.a0 == FFA_RET_NOT_SUPPORTED) {
pr_info("FFA_VERSION returned not supported\n");
return -EOPNOTSUPP;
}
+ if (FFA_MAJOR_VERSION(ver.a0) > FFA_MAJOR_VERSION(FFA_DRIVER_VERSION)) {
+ pr_err("Incompatible v%d.%d! Latest supported v%d.%d\n",
+ FFA_MAJOR_VERSION(ver.a0), FFA_MINOR_VERSION(ver.a0),
+ FFA_MAJOR_VERSION(FFA_DRIVER_VERSION),
+ FFA_MINOR_VERSION(FFA_DRIVER_VERSION));
+ return -EINVAL;
+ }
+
if (ver.a0 < FFA_MIN_VERSION) {
pr_err("Incompatible v%d.%d! Earliest supported v%d.%d\n",
FFA_MAJOR_VERSION(ver.a0), FFA_MINOR_VERSION(ver.a0),
@@ -276,9 +283,21 @@ __ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
}
if (buffer && count <= num_partitions)
- for (idx = 0; idx < count; idx++)
- memcpy(buffer + idx, drv_info->rx_buffer + idx * sz,
- buf_sz);
+ for (idx = 0; idx < count; idx++) {
+ struct ffa_partition_info_le {
+ __le16 id;
+ __le16 exec_ctxt;
+ __le32 properties;
+ uuid_t uuid;
+ } *rx_buf = drv_info->rx_buffer + idx * sz;
+ struct ffa_partition_info *buf = buffer + idx;
+
+ buf->id = le16_to_cpu(rx_buf->id);
+ buf->exec_ctxt = le16_to_cpu(rx_buf->exec_ctxt);
+ buf->properties = le32_to_cpu(rx_buf->properties);
+ if (buf_sz > 8)
+ import_uuid(&buf->uuid, (u8 *)&rx_buf->uuid);
+ }
ffa_rx_release();
@@ -295,14 +314,24 @@ __ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
#define CURRENT_INDEX(x) ((u16)(FIELD_GET(CURRENT_INDEX_MASK, (x))))
#define UUID_INFO_TAG(x) ((u16)(FIELD_GET(UUID_INFO_TAG_MASK, (x))))
#define PARTITION_INFO_SZ(x) ((u16)(FIELD_GET(PARTITION_INFO_SZ_MASK, (x))))
+#define PART_INFO_ID_MASK GENMASK(15, 0)
+#define PART_INFO_EXEC_CXT_MASK GENMASK(31, 16)
+#define PART_INFO_PROPS_MASK GENMASK(63, 32)
+#define PART_INFO_ID(x) ((u16)(FIELD_GET(PART_INFO_ID_MASK, (x))))
+#define PART_INFO_EXEC_CXT(x) ((u16)(FIELD_GET(PART_INFO_EXEC_CXT_MASK, (x))))
+#define PART_INFO_PROPERTIES(x) ((u32)(FIELD_GET(PART_INFO_PROPS_MASK, (x))))
static int
__ffa_partition_info_get_regs(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
struct ffa_partition_info *buffer, int num_parts)
{
u16 buf_sz, start_idx, cur_idx, count = 0, prev_idx = 0, tag = 0;
+ struct ffa_partition_info *buf = buffer;
ffa_value_t partition_info;
do {
+ __le64 *regs;
+ int idx;
+
start_idx = prev_idx ? prev_idx + 1 : 0;
invoke_ffa_fn((ffa_value_t){
@@ -326,8 +355,25 @@ __ffa_partition_info_get_regs(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
if (buf_sz > sizeof(*buffer))
buf_sz = sizeof(*buffer);
- memcpy(buffer + prev_idx * buf_sz, &partition_info.a3,
- (cur_idx - start_idx + 1) * buf_sz);
+ regs = (void *)&partition_info.a3;
+ for (idx = 0; idx < cur_idx - start_idx + 1; idx++, buf++) {
+ union {
+ uuid_t uuid;
+ u64 regs[2];
+ } uuid_regs = {
+ .regs = {
+ le64_to_cpu(*(regs + 1)),
+ le64_to_cpu(*(regs + 2)),
+ }
+ };
+ u64 val = *(u64 *)regs;
+
+ buf->id = PART_INFO_ID(val);
+ buf->exec_ctxt = PART_INFO_EXEC_CXT(val);
+ buf->properties = PART_INFO_PROPERTIES(val);
+ uuid_copy(&buf->uuid, &uuid_regs.uuid);
+ regs += 3;
+ }
prev_idx = cur_idx;
} while (cur_idx < (count - 1));
@@ -445,9 +491,9 @@ static int ffa_msg_send_direct_req(u16 src_id, u16 dst_id, bool mode_32bit,
return -EINVAL;
}
-static int ffa_msg_send2(u16 src_id, u16 dst_id, void *buf, size_t sz)
+static int ffa_msg_send2(struct ffa_device *dev, u16 src_id, void *buf, size_t sz)
{
- u32 src_dst_ids = PACK_TARGET_INFO(src_id, dst_id);
+ u32 src_dst_ids = PACK_TARGET_INFO(src_id, dev->vm_id);
struct ffa_indirect_msg_hdr *msg;
ffa_value_t ret;
int retval = 0;
@@ -463,6 +509,7 @@ static int ffa_msg_send2(u16 src_id, u16 dst_id, void *buf, size_t sz)
msg->offset = sizeof(*msg);
msg->send_recv_id = src_dst_ids;
msg->size = sz;
+ uuid_copy(&msg->uuid, &dev->uuid);
memcpy((u8 *)msg + msg->offset, buf, sz);
/* flags = 0, sender VMID = 0 works for both physical/virtual NS */
@@ -760,6 +807,13 @@ static int ffa_notification_bitmap_destroy(void)
return 0;
}
+enum notify_type {
+ SECURE_PARTITION,
+ NON_SECURE_VM,
+ SPM_FRAMEWORK,
+ NS_HYP_FRAMEWORK,
+};
+
#define NOTIFICATION_LOW_MASK GENMASK(31, 0)
#define NOTIFICATION_HIGH_MASK GENMASK(63, 32)
#define NOTIFICATION_BITMAP_HIGH(x) \
@@ -783,10 +837,22 @@ static int ffa_notification_bitmap_destroy(void)
#define MAX_IDS_32 10
#define PER_VCPU_NOTIFICATION_FLAG BIT(0)
-#define SECURE_PARTITION_BITMAP BIT(0)
-#define NON_SECURE_VM_BITMAP BIT(1)
-#define SPM_FRAMEWORK_BITMAP BIT(2)
-#define NS_HYP_FRAMEWORK_BITMAP BIT(3)
+#define SECURE_PARTITION_BITMAP_ENABLE BIT(SECURE_PARTITION)
+#define NON_SECURE_VM_BITMAP_ENABLE BIT(NON_SECURE_VM)
+#define SPM_FRAMEWORK_BITMAP_ENABLE BIT(SPM_FRAMEWORK)
+#define NS_HYP_FRAMEWORK_BITMAP_ENABLE BIT(NS_HYP_FRAMEWORK)
+#define FFA_BITMAP_SECURE_ENABLE_MASK \
+ (SECURE_PARTITION_BITMAP_ENABLE | SPM_FRAMEWORK_BITMAP_ENABLE)
+#define FFA_BITMAP_NS_ENABLE_MASK \
+ (NON_SECURE_VM_BITMAP_ENABLE | NS_HYP_FRAMEWORK_BITMAP_ENABLE)
+#define FFA_BITMAP_ALL_ENABLE_MASK \
+ (FFA_BITMAP_SECURE_ENABLE_MASK | FFA_BITMAP_NS_ENABLE_MASK)
+
+#define FFA_SECURE_PARTITION_ID_FLAG BIT(15)
+
+#define SPM_FRAMEWORK_BITMAP(x) NOTIFICATION_BITMAP_LOW(x)
+#define NS_HYP_FRAMEWORK_BITMAP(x) NOTIFICATION_BITMAP_HIGH(x)
+#define FRAMEWORK_NOTIFY_RX_BUFFER_FULL BIT(0)
static int ffa_notification_bind_common(u16 dst_id, u64 bitmap,
u32 flags, bool is_bind)
@@ -852,9 +918,15 @@ static int ffa_notification_get(u32 flags, struct ffa_notify_bitmaps *notify)
else if (ret.a0 != FFA_SUCCESS)
return -EINVAL; /* Something else went wrong. */
- notify->sp_map = PACK_NOTIFICATION_BITMAP(ret.a2, ret.a3);
- notify->vm_map = PACK_NOTIFICATION_BITMAP(ret.a4, ret.a5);
- notify->arch_map = PACK_NOTIFICATION_BITMAP(ret.a6, ret.a7);
+ if (flags & SECURE_PARTITION_BITMAP_ENABLE)
+ notify->sp_map = PACK_NOTIFICATION_BITMAP(ret.a2, ret.a3);
+ if (flags & NON_SECURE_VM_BITMAP_ENABLE)
+ notify->vm_map = PACK_NOTIFICATION_BITMAP(ret.a4, ret.a5);
+ if (flags & SPM_FRAMEWORK_BITMAP_ENABLE)
+ notify->arch_map = SPM_FRAMEWORK_BITMAP(ret.a6);
+ if (flags & NS_HYP_FRAMEWORK_BITMAP_ENABLE)
+ notify->arch_map = PACK_NOTIFICATION_BITMAP(notify->arch_map,
+ ret.a7);
return 0;
}
@@ -863,27 +935,32 @@ struct ffa_dev_part_info {
ffa_sched_recv_cb callback;
void *cb_data;
rwlock_t rw_lock;
+ struct ffa_device *dev;
+ struct list_head node;
};
static void __do_sched_recv_cb(u16 part_id, u16 vcpu, bool is_per_vcpu)
{
- struct ffa_dev_part_info *partition;
+ struct ffa_dev_part_info *partition = NULL, *tmp;
ffa_sched_recv_cb callback;
+ struct list_head *phead;
void *cb_data;
- partition = xa_load(&drv_info->partition_info, part_id);
- if (!partition) {
+ phead = xa_load(&drv_info->partition_info, part_id);
+ if (!phead) {
pr_err("%s: Invalid partition ID 0x%x\n", __func__, part_id);
return;
}
- read_lock(&partition->rw_lock);
- callback = partition->callback;
- cb_data = partition->cb_data;
- read_unlock(&partition->rw_lock);
+ list_for_each_entry_safe(partition, tmp, phead, node) {
+ read_lock(&partition->rw_lock);
+ callback = partition->callback;
+ cb_data = partition->cb_data;
+ read_unlock(&partition->rw_lock);
- if (callback)
- callback(vcpu, is_per_vcpu, cb_data);
+ if (callback)
+ callback(vcpu, is_per_vcpu, cb_data);
+ }
}
static void ffa_notification_info_get(void)
@@ -899,7 +976,7 @@ static void ffa_notification_info_get(void)
}, &ret);
if (ret.a0 != FFA_FN_NATIVE(SUCCESS) && ret.a0 != FFA_SUCCESS) {
- if (ret.a2 != FFA_RET_NO_DATA)
+ if ((s32)ret.a2 != FFA_RET_NO_DATA)
pr_err("Notification Info fetch failed: 0x%lx (0x%lx)",
ret.a0, ret.a2);
return;
@@ -935,7 +1012,7 @@ static void ffa_notification_info_get(void)
}
/* Per vCPU Notification */
- for (idx = 0; idx < ids_count[list]; idx++) {
+ for (idx = 1; idx < ids_count[list]; idx++) {
if (ids_processed >= max_ids - 1)
break;
@@ -1015,17 +1092,17 @@ static int ffa_sync_send_receive(struct ffa_device *dev,
static int ffa_indirect_msg_send(struct ffa_device *dev, void *buf, size_t sz)
{
- return ffa_msg_send2(drv_info->vm_id, dev->vm_id, buf, sz);
+ return ffa_msg_send2(dev, drv_info->vm_id, buf, sz);
}
-static int ffa_sync_send_receive2(struct ffa_device *dev, const uuid_t *uuid,
+static int ffa_sync_send_receive2(struct ffa_device *dev,
struct ffa_send_direct_data2 *data)
{
if (!drv_info->msg_direct_req2_supp)
return -EOPNOTSUPP;
return ffa_msg_send_direct_req2(drv_info->vm_id, dev->vm_id,
- uuid, data);
+ &dev->uuid, data);
}
static int ffa_memory_share(struct ffa_mem_ops_args *args)
@@ -1051,35 +1128,39 @@ static int ffa_memory_lend(struct ffa_mem_ops_args *args)
return ffa_memory_ops(FFA_MEM_LEND, args);
}
-#define FFA_SECURE_PARTITION_ID_FLAG BIT(15)
-
#define ffa_notifications_disabled() (!drv_info->notif_enabled)
-enum notify_type {
- NON_SECURE_VM,
- SECURE_PARTITION,
- FRAMEWORK,
-};
-
struct notifier_cb_info {
struct hlist_node hnode;
+ struct ffa_device *dev;
+ ffa_fwk_notifier_cb fwk_cb;
ffa_notifier_cb cb;
void *cb_data;
- enum notify_type type;
};
-static int ffa_sched_recv_cb_update(u16 part_id, ffa_sched_recv_cb callback,
- void *cb_data, bool is_registration)
+static int
+ffa_sched_recv_cb_update(struct ffa_device *dev, ffa_sched_recv_cb callback,
+ void *cb_data, bool is_registration)
{
- struct ffa_dev_part_info *partition;
+ struct ffa_dev_part_info *partition = NULL, *tmp;
+ struct list_head *phead;
bool cb_valid;
if (ffa_notifications_disabled())
return -EOPNOTSUPP;
- partition = xa_load(&drv_info->partition_info, part_id);
+ phead = xa_load(&drv_info->partition_info, dev->vm_id);
+ if (!phead) {
+ pr_err("%s: Invalid partition ID 0x%x\n", __func__, dev->vm_id);
+ return -EINVAL;
+ }
+
+ list_for_each_entry_safe(partition, tmp, phead, node)
+ if (partition->dev == dev)
+ break;
+
if (!partition) {
- pr_err("%s: Invalid partition ID 0x%x\n", __func__, part_id);
+ pr_err("%s: No such partition ID 0x%x\n", __func__, dev->vm_id);
return -EINVAL;
}
@@ -1101,12 +1182,12 @@ static int ffa_sched_recv_cb_update(u16 part_id, ffa_sched_recv_cb callback,
static int ffa_sched_recv_cb_register(struct ffa_device *dev,
ffa_sched_recv_cb cb, void *cb_data)
{
- return ffa_sched_recv_cb_update(dev->vm_id, cb, cb_data, true);
+ return ffa_sched_recv_cb_update(dev, cb, cb_data, true);
}
static int ffa_sched_recv_cb_unregister(struct ffa_device *dev)
{
- return ffa_sched_recv_cb_update(dev->vm_id, NULL, NULL, false);
+ return ffa_sched_recv_cb_update(dev, NULL, NULL, false);
}
static int ffa_notification_bind(u16 dst_id, u64 bitmap, u32 flags)
@@ -1119,27 +1200,69 @@ static int ffa_notification_unbind(u16 dst_id, u64 bitmap)
return ffa_notification_bind_common(dst_id, bitmap, 0, false);
}
-/* Should be called while the notify_lock is taken */
+static enum notify_type ffa_notify_type_get(u16 vm_id)
+{
+ if (vm_id & FFA_SECURE_PARTITION_ID_FLAG)
+ return SECURE_PARTITION;
+ else
+ return NON_SECURE_VM;
+}
+
+/* notifier_hnode_get* should be called with notify_lock held */
static struct notifier_cb_info *
-notifier_hash_node_get(u16 notify_id, enum notify_type type)
+notifier_hnode_get_by_vmid(u16 notify_id, int vmid)
{
struct notifier_cb_info *node;
hash_for_each_possible(drv_info->notifier_hash, node, hnode, notify_id)
- if (type == node->type)
+ if (node->fwk_cb && vmid == node->dev->vm_id)
+ return node;
+
+ return NULL;
+}
+
+static struct notifier_cb_info *
+notifier_hnode_get_by_vmid_uuid(u16 notify_id, int vmid, const uuid_t *uuid)
+{
+ struct notifier_cb_info *node;
+
+ if (uuid_is_null(uuid))
+ return notifier_hnode_get_by_vmid(notify_id, vmid);
+
+ hash_for_each_possible(drv_info->notifier_hash, node, hnode, notify_id)
+ if (node->fwk_cb && vmid == node->dev->vm_id &&
+ uuid_equal(&node->dev->uuid, uuid))
+ return node;
+
+ return NULL;
+}
+
+static struct notifier_cb_info *
+notifier_hnode_get_by_type(u16 notify_id, enum notify_type type)
+{
+ struct notifier_cb_info *node;
+
+ hash_for_each_possible(drv_info->notifier_hash, node, hnode, notify_id)
+ if (node->cb && type == ffa_notify_type_get(node->dev->vm_id))
return node;
return NULL;
}
static int
-update_notifier_cb(int notify_id, enum notify_type type, ffa_notifier_cb cb,
- void *cb_data, bool is_registration)
+update_notifier_cb(struct ffa_device *dev, int notify_id, void *cb,
+ void *cb_data, bool is_registration, bool is_framework)
{
struct notifier_cb_info *cb_info = NULL;
+ enum notify_type type = ffa_notify_type_get(dev->vm_id);
bool cb_found;
- cb_info = notifier_hash_node_get(notify_id, type);
+ if (is_framework)
+ cb_info = notifier_hnode_get_by_vmid_uuid(notify_id, dev->vm_id,
+ &dev->uuid);
+ else
+ cb_info = notifier_hnode_get_by_type(notify_id, type);
+
cb_found = !!cb_info;
if (!(is_registration ^ cb_found))
@@ -1150,9 +1273,12 @@ update_notifier_cb(int notify_id, enum notify_type type, ffa_notifier_cb cb,
if (!cb_info)
return -ENOMEM;
- cb_info->type = type;
- cb_info->cb = cb;
+ cb_info->dev = dev;
cb_info->cb_data = cb_data;
+ if (is_framework)
+ cb_info->fwk_cb = cb;
+ else
+ cb_info->cb = cb;
hash_add(drv_info->notifier_hash, &cb_info->hnode, notify_id);
} else {
@@ -1162,18 +1288,10 @@ update_notifier_cb(int notify_id, enum notify_type type, ffa_notifier_cb cb,
return 0;
}
-static enum notify_type ffa_notify_type_get(u16 vm_id)
-{
- if (vm_id & FFA_SECURE_PARTITION_ID_FLAG)
- return SECURE_PARTITION;
- else
- return NON_SECURE_VM;
-}
-
-static int ffa_notify_relinquish(struct ffa_device *dev, int notify_id)
+static int __ffa_notify_relinquish(struct ffa_device *dev, int notify_id,
+ bool is_framework)
{
int rc;
- enum notify_type type = ffa_notify_type_get(dev->vm_id);
if (ffa_notifications_disabled())
return -EOPNOTSUPP;
@@ -1183,26 +1301,38 @@ static int ffa_notify_relinquish(struct ffa_device *dev, int notify_id)
mutex_lock(&drv_info->notify_lock);
- rc = update_notifier_cb(notify_id, type, NULL, NULL, false);
+ rc = update_notifier_cb(dev, notify_id, NULL, NULL, false,
+ is_framework);
if (rc) {
pr_err("Could not unregister notification callback\n");
mutex_unlock(&drv_info->notify_lock);
return rc;
}
- rc = ffa_notification_unbind(dev->vm_id, BIT(notify_id));
+ if (!is_framework)
+ rc = ffa_notification_unbind(dev->vm_id, BIT(notify_id));
mutex_unlock(&drv_info->notify_lock);
return rc;
}
-static int ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu,
- ffa_notifier_cb cb, void *cb_data, int notify_id)
+static int ffa_notify_relinquish(struct ffa_device *dev, int notify_id)
+{
+ return __ffa_notify_relinquish(dev, notify_id, false);
+}
+
+static int ffa_fwk_notify_relinquish(struct ffa_device *dev, int notify_id)
+{
+ return __ffa_notify_relinquish(dev, notify_id, true);
+}
+
+static int __ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu,
+ void *cb, void *cb_data,
+ int notify_id, bool is_framework)
{
int rc;
u32 flags = 0;
- enum notify_type type = ffa_notify_type_get(dev->vm_id);
if (ffa_notifications_disabled())
return -EOPNOTSUPP;
@@ -1212,26 +1342,44 @@ static int ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu,
mutex_lock(&drv_info->notify_lock);
- if (is_per_vcpu)
- flags = PER_VCPU_NOTIFICATION_FLAG;
+ if (!is_framework) {
+ if (is_per_vcpu)
+ flags = PER_VCPU_NOTIFICATION_FLAG;
- rc = ffa_notification_bind(dev->vm_id, BIT(notify_id), flags);
- if (rc) {
- mutex_unlock(&drv_info->notify_lock);
- return rc;
+ rc = ffa_notification_bind(dev->vm_id, BIT(notify_id), flags);
+ if (rc) {
+ mutex_unlock(&drv_info->notify_lock);
+ return rc;
+ }
}
- rc = update_notifier_cb(notify_id, type, cb, cb_data, true);
+ rc = update_notifier_cb(dev, notify_id, cb, cb_data, true,
+ is_framework);
if (rc) {
pr_err("Failed to register callback for %d - %d\n",
notify_id, rc);
- ffa_notification_unbind(dev->vm_id, BIT(notify_id));
+ if (!is_framework)
+ ffa_notification_unbind(dev->vm_id, BIT(notify_id));
}
mutex_unlock(&drv_info->notify_lock);
return rc;
}
+static int ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu,
+ ffa_notifier_cb cb, void *cb_data, int notify_id)
+{
+ return __ffa_notify_request(dev, is_per_vcpu, cb, cb_data, notify_id,
+ false);
+}
+
+static int
+ffa_fwk_notify_request(struct ffa_device *dev, ffa_fwk_notifier_cb cb,
+ void *cb_data, int notify_id)
+{
+ return __ffa_notify_request(dev, false, cb, cb_data, notify_id, true);
+}
+
static int ffa_notify_send(struct ffa_device *dev, int notify_id,
bool is_per_vcpu, u16 vcpu)
{
@@ -1258,7 +1406,7 @@ static void handle_notif_callbacks(u64 bitmap, enum notify_type type)
continue;
mutex_lock(&drv_info->notify_lock);
- cb_info = notifier_hash_node_get(notify_id, type);
+ cb_info = notifier_hnode_get_by_type(notify_id, type);
mutex_unlock(&drv_info->notify_lock);
if (cb_info && cb_info->cb)
@@ -1266,21 +1414,68 @@ static void handle_notif_callbacks(u64 bitmap, enum notify_type type)
}
}
-static void notif_get_and_handle(void *unused)
+static void handle_fwk_notif_callbacks(u32 bitmap)
+{
+ void *buf;
+ uuid_t uuid;
+ int notify_id = 0, target;
+ struct ffa_indirect_msg_hdr *msg;
+ struct notifier_cb_info *cb_info = NULL;
+
+ /* Only one framework notification defined and supported for now */
+ if (!(bitmap & FRAMEWORK_NOTIFY_RX_BUFFER_FULL))
+ return;
+
+ mutex_lock(&drv_info->rx_lock);
+
+ msg = drv_info->rx_buffer;
+ buf = kmemdup((void *)msg + msg->offset, msg->size, GFP_KERNEL);
+ if (!buf) {
+ mutex_unlock(&drv_info->rx_lock);
+ return;
+ }
+
+ target = SENDER_ID(msg->send_recv_id);
+ if (msg->offset >= sizeof(*msg))
+ uuid_copy(&uuid, &msg->uuid);
+ else
+ uuid_copy(&uuid, &uuid_null);
+
+ mutex_unlock(&drv_info->rx_lock);
+
+ ffa_rx_release();
+
+ mutex_lock(&drv_info->notify_lock);
+ cb_info = notifier_hnode_get_by_vmid_uuid(notify_id, target, &uuid);
+ mutex_unlock(&drv_info->notify_lock);
+
+ if (cb_info && cb_info->fwk_cb)
+ cb_info->fwk_cb(notify_id, cb_info->cb_data, buf);
+ kfree(buf);
+}
+
+static void notif_get_and_handle(void *cb_data)
{
int rc;
- struct ffa_notify_bitmaps bitmaps;
+ u32 flags;
+ struct ffa_drv_info *info = cb_data;
+ struct ffa_notify_bitmaps bitmaps = { 0 };
+
+ if (info->vm_id == 0) /* Non secure physical instance */
+ flags = FFA_BITMAP_SECURE_ENABLE_MASK;
+ else
+ flags = FFA_BITMAP_ALL_ENABLE_MASK;
- rc = ffa_notification_get(SECURE_PARTITION_BITMAP |
- SPM_FRAMEWORK_BITMAP, &bitmaps);
+ rc = ffa_notification_get(flags, &bitmaps);
if (rc) {
pr_err("Failed to retrieve notifications with %d!\n", rc);
return;
}
+ handle_fwk_notif_callbacks(SPM_FRAMEWORK_BITMAP(bitmaps.arch_map));
+ handle_fwk_notif_callbacks(NS_HYP_FRAMEWORK_BITMAP(bitmaps.arch_map));
handle_notif_callbacks(bitmaps.vm_map, NON_SECURE_VM);
handle_notif_callbacks(bitmaps.sp_map, SECURE_PARTITION);
- handle_notif_callbacks(bitmaps.arch_map, FRAMEWORK);
}
static void
@@ -1329,6 +1524,8 @@ static const struct ffa_notifier_ops ffa_drv_notifier_ops = {
.sched_recv_cb_unregister = ffa_sched_recv_cb_unregister,
.notify_request = ffa_notify_request,
.notify_relinquish = ffa_notify_relinquish,
+ .fwk_notify_request = ffa_fwk_notify_request,
+ .fwk_notify_relinquish = ffa_fwk_notify_relinquish,
.notify_send = ffa_notify_send,
};
@@ -1384,11 +1581,110 @@ static struct notifier_block ffa_bus_nb = {
.notifier_call = ffa_bus_notifier,
};
+static int ffa_xa_add_partition_info(struct ffa_device *dev)
+{
+ struct ffa_dev_part_info *info;
+ struct list_head *head, *phead;
+ int ret = -ENOMEM;
+
+ phead = xa_load(&drv_info->partition_info, dev->vm_id);
+ if (phead) {
+ head = phead;
+ list_for_each_entry(info, head, node) {
+ if (info->dev == dev) {
+ pr_err("%s: duplicate dev %p part ID 0x%x\n",
+ __func__, dev, dev->vm_id);
+ return -EEXIST;
+ }
+ }
+ }
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return ret;
+
+ rwlock_init(&info->rw_lock);
+ info->dev = dev;
+
+ if (!phead) {
+ phead = kzalloc(sizeof(*phead), GFP_KERNEL);
+ if (!phead)
+ goto free_out;
+
+ INIT_LIST_HEAD(phead);
+
+ ret = xa_insert(&drv_info->partition_info, dev->vm_id, phead,
+ GFP_KERNEL);
+ if (ret) {
+ pr_err("%s: failed to save part ID 0x%x Ret:%d\n",
+ __func__, dev->vm_id, ret);
+ goto free_out;
+ }
+ }
+ list_add(&info->node, phead);
+ return 0;
+
+free_out:
+ kfree(phead);
+ kfree(info);
+ return ret;
+}
+
+static int ffa_setup_host_partition(int vm_id)
+{
+ struct ffa_partition_info buf = { 0 };
+ struct ffa_device *ffa_dev;
+ int ret;
+
+ buf.id = vm_id;
+ ffa_dev = ffa_device_register(&buf, &ffa_drv_ops);
+ if (!ffa_dev) {
+ pr_err("%s: failed to register host partition ID 0x%x\n",
+ __func__, vm_id);
+ return -EINVAL;
+ }
+
+ ret = ffa_xa_add_partition_info(ffa_dev);
+ if (ret)
+ return ret;
+
+ if (ffa_notifications_disabled())
+ return 0;
+
+ ret = ffa_sched_recv_cb_update(ffa_dev, ffa_self_notif_handle,
+ drv_info, true);
+ if (ret)
+ pr_info("Failed to register driver sched callback %d\n", ret);
+
+ return ret;
+}
+
+static void ffa_partitions_cleanup(void)
+{
+ struct list_head *phead;
+ unsigned long idx;
+
+ /* Clean up/free all registered devices */
+ ffa_devices_unregister();
+
+ xa_for_each(&drv_info->partition_info, idx, phead) {
+ struct ffa_dev_part_info *info, *tmp;
+
+ xa_erase(&drv_info->partition_info, idx);
+ list_for_each_entry_safe(info, tmp, phead, node) {
+ list_del(&info->node);
+ kfree(info);
+ }
+ kfree(phead);
+ }
+
+ xa_destroy(&drv_info->partition_info);
+}
+
static int ffa_setup_partitions(void)
{
int count, idx, ret;
struct ffa_device *ffa_dev;
- struct ffa_dev_part_info *info;
struct ffa_partition_info *pbuf, *tpbuf;
if (drv_info->version == FFA_VERSION_1_0) {
@@ -1422,59 +1718,30 @@ static int ffa_setup_partitions(void)
!(tpbuf->properties & FFA_PARTITION_AARCH64_EXEC))
ffa_mode_32bit_set(ffa_dev);
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info) {
+ if (ffa_xa_add_partition_info(ffa_dev)) {
ffa_device_unregister(ffa_dev);
continue;
}
- rwlock_init(&info->rw_lock);
- ret = xa_insert(&drv_info->partition_info, tpbuf->id,
- info, GFP_KERNEL);
- if (ret) {
- pr_err("%s: failed to save partition ID 0x%x - ret:%d\n",
- __func__, tpbuf->id, ret);
- ffa_device_unregister(ffa_dev);
- kfree(info);
- }
}
kfree(pbuf);
- /* Allocate for the host */
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info) {
- /* Already registered devices are freed on bus_exit */
- ffa_partitions_cleanup();
- return -ENOMEM;
- }
+ /*
+ * Check if the host is already added as part of partition info
+ * No multiple UUID possible for the host, so just checking if
+ * there is an entry will suffice
+ */
+ if (xa_load(&drv_info->partition_info, drv_info->vm_id))
+ return 0;
- rwlock_init(&info->rw_lock);
- ret = xa_insert(&drv_info->partition_info, drv_info->vm_id,
- info, GFP_KERNEL);
- if (ret) {
- pr_err("%s: failed to save Host partition ID 0x%x - ret:%d. Abort.\n",
- __func__, drv_info->vm_id, ret);
- kfree(info);
- /* Already registered devices are freed on bus_exit */
+ /* Allocate for the host */
+ ret = ffa_setup_host_partition(drv_info->vm_id);
+ if (ret)
ffa_partitions_cleanup();
- }
return ret;
}
-static void ffa_partitions_cleanup(void)
-{
- struct ffa_dev_part_info *info;
- unsigned long idx;
-
- xa_for_each(&drv_info->partition_info, idx, info) {
- xa_erase(&drv_info->partition_info, idx);
- kfree(info);
- }
-
- xa_destroy(&drv_info->partition_info);
-}
-
/* FFA FEATURE IDs */
#define FFA_FEAT_NOTIFICATION_PENDING_INT (1)
#define FFA_FEAT_SCHEDULE_RECEIVER_INT (2)
@@ -1777,19 +2044,10 @@ static int __init ffa_init(void)
ffa_notifications_setup();
ret = ffa_setup_partitions();
- if (ret) {
- pr_err("failed to setup partitions\n");
- goto cleanup_notifs;
- }
-
- ret = ffa_sched_recv_cb_update(drv_info->vm_id, ffa_self_notif_handle,
- drv_info, true);
- if (ret)
- pr_info("Failed to register driver sched callback %d\n", ret);
-
- return 0;
+ if (!ret)
+ return ret;
-cleanup_notifs:
+ pr_err("failed to setup partitions\n");
ffa_notifications_cleanup();
free_pages:
if (drv_info->tx_buffer)
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
index a3386bf36de5..7af01664ce7e 100644
--- a/drivers/firmware/arm_scmi/bus.c
+++ b/drivers/firmware/arm_scmi/bus.c
@@ -17,6 +17,8 @@
#include "common.h"
+#define SCMI_UEVENT_MODALIAS_FMT "%s:%02x:%s"
+
BLOCKING_NOTIFIER_HEAD(scmi_requested_devices_nh);
EXPORT_SYMBOL_GPL(scmi_requested_devices_nh);
@@ -42,7 +44,7 @@ static atomic_t scmi_syspower_registered = ATOMIC_INIT(0);
* This helper let an SCMI driver request specific devices identified by the
* @id_table to be created for each active SCMI instance.
*
- * The requested device name MUST NOT be already existent for any protocol;
+ * The requested device name MUST NOT be already existent for this protocol;
* at first the freshly requested @id_table is annotated in the IDR table
* @scmi_requested_devices and then the requested device is advertised to any
* registered party via the @scmi_requested_devices_nh notification chain.
@@ -52,7 +54,6 @@ static atomic_t scmi_syspower_registered = ATOMIC_INIT(0);
static int scmi_protocol_device_request(const struct scmi_device_id *id_table)
{
int ret = 0;
- unsigned int id = 0;
struct list_head *head, *phead = NULL;
struct scmi_requested_dev *rdev;
@@ -67,19 +68,13 @@ static int scmi_protocol_device_request(const struct scmi_device_id *id_table)
}
/*
- * Search for the matching protocol rdev list and then search
- * of any existent equally named device...fails if any duplicate found.
+ * Find the matching protocol rdev list and then search of any
+ * existent equally named device...fails if any duplicate found.
*/
mutex_lock(&scmi_requested_devices_mtx);
- idr_for_each_entry(&scmi_requested_devices, head, id) {
- if (!phead) {
- /* A list found registered in the IDR is never empty */
- rdev = list_first_entry(head, struct scmi_requested_dev,
- node);
- if (rdev->id_table->protocol_id ==
- id_table->protocol_id)
- phead = head;
- }
+ phead = idr_find(&scmi_requested_devices, id_table->protocol_id);
+ if (phead) {
+ head = phead;
list_for_each_entry(rdev, head, node) {
if (!strcmp(rdev->id_table->name, id_table->name)) {
pr_err("Ignoring duplicate request [%d] %s\n",
@@ -283,11 +278,59 @@ static void scmi_dev_remove(struct device *dev)
scmi_drv->remove(scmi_dev);
}
+static int scmi_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
+{
+ const struct scmi_device *scmi_dev = to_scmi_dev(dev);
+
+ return add_uevent_var(env, "MODALIAS=" SCMI_UEVENT_MODALIAS_FMT,
+ dev_name(&scmi_dev->dev), scmi_dev->protocol_id,
+ scmi_dev->name);
+}
+
+static ssize_t modalias_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scmi_device *scmi_dev = to_scmi_dev(dev);
+
+ return sysfs_emit(buf, SCMI_UEVENT_MODALIAS_FMT,
+ dev_name(&scmi_dev->dev), scmi_dev->protocol_id,
+ scmi_dev->name);
+}
+static DEVICE_ATTR_RO(modalias);
+
+static ssize_t protocol_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scmi_device *scmi_dev = to_scmi_dev(dev);
+
+ return sprintf(buf, "0x%02x\n", scmi_dev->protocol_id);
+}
+static DEVICE_ATTR_RO(protocol_id);
+
+static ssize_t name_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scmi_device *scmi_dev = to_scmi_dev(dev);
+
+ return sprintf(buf, "%s\n", scmi_dev->name);
+}
+static DEVICE_ATTR_RO(name);
+
+static struct attribute *scmi_device_attributes_attrs[] = {
+ &dev_attr_protocol_id.attr,
+ &dev_attr_name.attr,
+ &dev_attr_modalias.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(scmi_device_attributes);
+
const struct bus_type scmi_bus_type = {
.name = "scmi_protocol",
.match = scmi_dev_match,
.probe = scmi_dev_probe,
.remove = scmi_dev_remove,
+ .uevent = scmi_device_uevent,
+ .dev_groups = scmi_device_attributes_groups,
};
EXPORT_SYMBOL_GPL(scmi_bus_type);
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 60050da54bf2..1c75a4c9c371 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -1997,17 +1997,7 @@ static void scmi_common_fastchannel_db_ring(struct scmi_fc_db_info *db)
else if (db->width == 4)
SCMI_PROTO_FC_RING_DB(32);
else /* db->width == 8 */
-#ifdef CONFIG_64BIT
SCMI_PROTO_FC_RING_DB(64);
-#else
- {
- u64 val = 0;
-
- if (db->mask)
- val = ioread64_hi_lo(db->addr) & db->mask;
- iowrite64_hi_lo(db->set | val, db->addr);
- }
-#endif
}
/**
diff --git a/drivers/firmware/samsung/Kconfig b/drivers/firmware/samsung/Kconfig
new file mode 100644
index 000000000000..16d81aeb1d41
--- /dev/null
+++ b/drivers/firmware/samsung/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config EXYNOS_ACPM_PROTOCOL
+ tristate "Exynos Alive Clock and Power Manager (ACPM) Message Protocol"
+ depends on ARCH_EXYNOS || COMPILE_TEST
+ depends on MAILBOX
+ help
+ Alive Clock and Power Manager (ACPM) Message Protocol is defined for
+ the purpose of communication between the ACPM firmware and masters
+ (AP, AOC, ...). ACPM firmware operates on the Active Power Management
+ (APM) module that handles overall power activities.
+
+ This protocol driver provides interface for all the client drivers
+ making use of the features offered by the APM.
diff --git a/drivers/firmware/samsung/Makefile b/drivers/firmware/samsung/Makefile
new file mode 100644
index 000000000000..7b4c9f6f34f5
--- /dev/null
+++ b/drivers/firmware/samsung/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+acpm-protocol-objs := exynos-acpm.o exynos-acpm-pmic.o
+obj-$(CONFIG_EXYNOS_ACPM_PROTOCOL) += acpm-protocol.o
diff --git a/drivers/firmware/samsung/exynos-acpm-pmic.c b/drivers/firmware/samsung/exynos-acpm-pmic.c
new file mode 100644
index 000000000000..85e90d236da2
--- /dev/null
+++ b/drivers/firmware/samsung/exynos-acpm-pmic.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2020 Samsung Electronics Co., Ltd.
+ * Copyright 2020 Google LLC.
+ * Copyright 2024 Linaro Ltd.
+ */
+#include <linux/bitfield.h>
+#include <linux/firmware/samsung/exynos-acpm-protocol.h>
+#include <linux/ktime.h>
+#include <linux/types.h>
+
+#include "exynos-acpm.h"
+#include "exynos-acpm-pmic.h"
+
+#define ACPM_PMIC_CHANNEL GENMASK(15, 12)
+#define ACPM_PMIC_TYPE GENMASK(11, 8)
+#define ACPM_PMIC_REG GENMASK(7, 0)
+
+#define ACPM_PMIC_RETURN GENMASK(31, 24)
+#define ACPM_PMIC_MASK GENMASK(23, 16)
+#define ACPM_PMIC_VALUE GENMASK(15, 8)
+#define ACPM_PMIC_FUNC GENMASK(7, 0)
+
+#define ACPM_PMIC_BULK_SHIFT 8
+#define ACPM_PMIC_BULK_MASK GENMASK(7, 0)
+#define ACPM_PMIC_BULK_MAX_COUNT 8
+
+enum exynos_acpm_pmic_func {
+ ACPM_PMIC_READ,
+ ACPM_PMIC_WRITE,
+ ACPM_PMIC_UPDATE,
+ ACPM_PMIC_BULK_READ,
+ ACPM_PMIC_BULK_WRITE,
+};
+
+static inline u32 acpm_pmic_set_bulk(u32 data, unsigned int i)
+{
+ return (data & ACPM_PMIC_BULK_MASK) << (ACPM_PMIC_BULK_SHIFT * i);
+}
+
+static inline u32 acpm_pmic_get_bulk(u32 data, unsigned int i)
+{
+ return (data >> (ACPM_PMIC_BULK_SHIFT * i)) & ACPM_PMIC_BULK_MASK;
+}
+
+static void acpm_pmic_set_xfer(struct acpm_xfer *xfer, u32 *cmd,
+ unsigned int acpm_chan_id)
+{
+ xfer->txd = cmd;
+ xfer->rxd = cmd;
+ xfer->txlen = sizeof(cmd);
+ xfer->rxlen = sizeof(cmd);
+ xfer->acpm_chan_id = acpm_chan_id;
+}
+
+static void acpm_pmic_init_read_cmd(u32 cmd[4], u8 type, u8 reg, u8 chan)
+{
+ cmd[0] = FIELD_PREP(ACPM_PMIC_TYPE, type) |
+ FIELD_PREP(ACPM_PMIC_REG, reg) |
+ FIELD_PREP(ACPM_PMIC_CHANNEL, chan);
+ cmd[1] = FIELD_PREP(ACPM_PMIC_FUNC, ACPM_PMIC_READ);
+ cmd[3] = ktime_to_ms(ktime_get());
+}
+
+int acpm_pmic_read_reg(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 *buf)
+{
+ struct acpm_xfer xfer;
+ u32 cmd[4] = {0};
+ int ret;
+
+ acpm_pmic_init_read_cmd(cmd, type, reg, chan);
+ acpm_pmic_set_xfer(&xfer, cmd, acpm_chan_id);
+
+ ret = acpm_do_xfer(handle, &xfer);
+ if (ret)
+ return ret;
+
+ *buf = FIELD_GET(ACPM_PMIC_VALUE, xfer.rxd[1]);
+
+ return FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]);
+}
+
+static void acpm_pmic_init_bulk_read_cmd(u32 cmd[4], u8 type, u8 reg, u8 chan,
+ u8 count)
+{
+ cmd[0] = FIELD_PREP(ACPM_PMIC_TYPE, type) |
+ FIELD_PREP(ACPM_PMIC_REG, reg) |
+ FIELD_PREP(ACPM_PMIC_CHANNEL, chan);
+ cmd[1] = FIELD_PREP(ACPM_PMIC_FUNC, ACPM_PMIC_BULK_READ) |
+ FIELD_PREP(ACPM_PMIC_VALUE, count);
+}
+
+int acpm_pmic_bulk_read(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 count, u8 *buf)
+{
+ struct acpm_xfer xfer;
+ u32 cmd[4] = {0};
+ int i, ret;
+
+ if (count > ACPM_PMIC_BULK_MAX_COUNT)
+ return -EINVAL;
+
+ acpm_pmic_init_bulk_read_cmd(cmd, type, reg, chan, count);
+ acpm_pmic_set_xfer(&xfer, cmd, acpm_chan_id);
+
+ ret = acpm_do_xfer(handle, &xfer);
+ if (ret)
+ return ret;
+
+ ret = FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < count; i++) {
+ if (i < 4)
+ buf[i] = acpm_pmic_get_bulk(xfer.rxd[2], i);
+ else
+ buf[i] = acpm_pmic_get_bulk(xfer.rxd[3], i - 4);
+ }
+
+ return 0;
+}
+
+static void acpm_pmic_init_write_cmd(u32 cmd[4], u8 type, u8 reg, u8 chan,
+ u8 value)
+{
+ cmd[0] = FIELD_PREP(ACPM_PMIC_TYPE, type) |
+ FIELD_PREP(ACPM_PMIC_REG, reg) |
+ FIELD_PREP(ACPM_PMIC_CHANNEL, chan);
+ cmd[1] = FIELD_PREP(ACPM_PMIC_FUNC, ACPM_PMIC_WRITE) |
+ FIELD_PREP(ACPM_PMIC_VALUE, value);
+ cmd[3] = ktime_to_ms(ktime_get());
+}
+
+int acpm_pmic_write_reg(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 value)
+{
+ struct acpm_xfer xfer;
+ u32 cmd[4] = {0};
+ int ret;
+
+ acpm_pmic_init_write_cmd(cmd, type, reg, chan, value);
+ acpm_pmic_set_xfer(&xfer, cmd, acpm_chan_id);
+
+ ret = acpm_do_xfer(handle, &xfer);
+ if (ret)
+ return ret;
+
+ return FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]);
+}
+
+static void acpm_pmic_init_bulk_write_cmd(u32 cmd[4], u8 type, u8 reg, u8 chan,
+ u8 count, const u8 *buf)
+{
+ int i;
+
+ cmd[0] = FIELD_PREP(ACPM_PMIC_TYPE, type) |
+ FIELD_PREP(ACPM_PMIC_REG, reg) |
+ FIELD_PREP(ACPM_PMIC_CHANNEL, chan);
+ cmd[1] = FIELD_PREP(ACPM_PMIC_FUNC, ACPM_PMIC_BULK_WRITE) |
+ FIELD_PREP(ACPM_PMIC_VALUE, count);
+
+ for (i = 0; i < count; i++) {
+ if (i < 4)
+ cmd[2] |= acpm_pmic_set_bulk(buf[i], i);
+ else
+ cmd[3] |= acpm_pmic_set_bulk(buf[i], i - 4);
+ }
+}
+
+int acpm_pmic_bulk_write(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 count, const u8 *buf)
+{
+ struct acpm_xfer xfer;
+ u32 cmd[4] = {0};
+ int ret;
+
+ if (count > ACPM_PMIC_BULK_MAX_COUNT)
+ return -EINVAL;
+
+ acpm_pmic_init_bulk_write_cmd(cmd, type, reg, chan, count, buf);
+ acpm_pmic_set_xfer(&xfer, cmd, acpm_chan_id);
+
+ ret = acpm_do_xfer(handle, &xfer);
+ if (ret)
+ return ret;
+
+ return FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]);
+}
+
+static void acpm_pmic_init_update_cmd(u32 cmd[4], u8 type, u8 reg, u8 chan,
+ u8 value, u8 mask)
+{
+ cmd[0] = FIELD_PREP(ACPM_PMIC_TYPE, type) |
+ FIELD_PREP(ACPM_PMIC_REG, reg) |
+ FIELD_PREP(ACPM_PMIC_CHANNEL, chan);
+ cmd[1] = FIELD_PREP(ACPM_PMIC_FUNC, ACPM_PMIC_UPDATE) |
+ FIELD_PREP(ACPM_PMIC_VALUE, value) |
+ FIELD_PREP(ACPM_PMIC_MASK, mask);
+ cmd[3] = ktime_to_ms(ktime_get());
+}
+
+int acpm_pmic_update_reg(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 value, u8 mask)
+{
+ struct acpm_xfer xfer;
+ u32 cmd[4] = {0};
+ int ret;
+
+ acpm_pmic_init_update_cmd(cmd, type, reg, chan, value, mask);
+ acpm_pmic_set_xfer(&xfer, cmd, acpm_chan_id);
+
+ ret = acpm_do_xfer(handle, &xfer);
+ if (ret)
+ return ret;
+
+ return FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]);
+}
diff --git a/drivers/firmware/samsung/exynos-acpm-pmic.h b/drivers/firmware/samsung/exynos-acpm-pmic.h
new file mode 100644
index 000000000000..078421888a14
--- /dev/null
+++ b/drivers/firmware/samsung/exynos-acpm-pmic.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2020 Samsung Electronics Co., Ltd.
+ * Copyright 2020 Google LLC.
+ * Copyright 2024 Linaro Ltd.
+ */
+#ifndef __EXYNOS_ACPM_PMIC_H__
+#define __EXYNOS_ACPM_PMIC_H__
+
+#include <linux/types.h>
+
+struct acpm_handle;
+
+int acpm_pmic_read_reg(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 *buf);
+int acpm_pmic_bulk_read(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 count, u8 *buf);
+int acpm_pmic_write_reg(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 value);
+int acpm_pmic_bulk_write(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 count, const u8 *buf);
+int acpm_pmic_update_reg(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 value, u8 mask);
+#endif /* __EXYNOS_ACPM_PMIC_H__ */
diff --git a/drivers/firmware/samsung/exynos-acpm.c b/drivers/firmware/samsung/exynos-acpm.c
new file mode 100644
index 000000000000..a85b2dbdd9f0
--- /dev/null
+++ b/drivers/firmware/samsung/exynos-acpm.c
@@ -0,0 +1,769 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2020 Samsung Electronics Co., Ltd.
+ * Copyright 2020 Google LLC.
+ * Copyright 2024 Linaro Ltd.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/container_of.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/firmware/samsung/exynos-acpm-protocol.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/mailbox/exynos-message.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/math.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "exynos-acpm.h"
+#include "exynos-acpm-pmic.h"
+
+#define ACPM_PROTOCOL_SEQNUM GENMASK(21, 16)
+
+/* The unit of counter is 20 us. 5000 * 20 = 100 ms */
+#define ACPM_POLL_TIMEOUT 5000
+#define ACPM_TX_TIMEOUT_US 500000
+
+#define ACPM_GS101_INITDATA_BASE 0xa000
+
+/**
+ * struct acpm_shmem - shared memory configuration information.
+ * @reserved: unused fields.
+ * @chans: offset to array of struct acpm_chan_shmem.
+ * @reserved1: unused fields.
+ * @num_chans: number of channels.
+ */
+struct acpm_shmem {
+ u32 reserved[2];
+ u32 chans;
+ u32 reserved1[3];
+ u32 num_chans;
+};
+
+/**
+ * struct acpm_chan_shmem - descriptor of a shared memory channel.
+ *
+ * @id: channel ID.
+ * @reserved: unused fields.
+ * @rx_rear: rear pointer of APM RX queue (TX for AP).
+ * @rx_front: front pointer of APM RX queue (TX for AP).
+ * @rx_base: base address of APM RX queue (TX for AP).
+ * @reserved1: unused fields.
+ * @tx_rear: rear pointer of APM TX queue (RX for AP).
+ * @tx_front: front pointer of APM TX queue (RX for AP).
+ * @tx_base: base address of APM TX queue (RX for AP).
+ * @qlen: queue length. Applies to both TX/RX queues.
+ * @mlen: message length. Applies to both TX/RX queues.
+ * @reserved2: unused fields.
+ * @poll_completion: true when the channel works on polling.
+ */
+struct acpm_chan_shmem {
+ u32 id;
+ u32 reserved[3];
+ u32 rx_rear;
+ u32 rx_front;
+ u32 rx_base;
+ u32 reserved1[3];
+ u32 tx_rear;
+ u32 tx_front;
+ u32 tx_base;
+ u32 qlen;
+ u32 mlen;
+ u32 reserved2[2];
+ u32 poll_completion;
+};
+
+/**
+ * struct acpm_queue - exynos acpm queue.
+ *
+ * @rear: rear address of the queue.
+ * @front: front address of the queue.
+ * @base: base address of the queue.
+ */
+struct acpm_queue {
+ void __iomem *rear;
+ void __iomem *front;
+ void __iomem *base;
+};
+
+/**
+ * struct acpm_rx_data - RX queue data.
+ *
+ * @cmd: pointer to where the data shall be saved.
+ * @n_cmd: number of 32-bit commands.
+ * @response: true if the client expects the RX data.
+ */
+struct acpm_rx_data {
+ u32 *cmd;
+ size_t n_cmd;
+ bool response;
+};
+
+#define ACPM_SEQNUM_MAX 64
+
+/**
+ * struct acpm_chan - driver internal representation of a channel.
+ * @cl: mailbox client.
+ * @chan: mailbox channel.
+ * @acpm: pointer to driver private data.
+ * @tx: TX queue. The enqueue is done by the host.
+ * - front index is written by the host.
+ * - rear index is written by the firmware.
+ *
+ * @rx: RX queue. The enqueue is done by the firmware.
+ * - front index is written by the firmware.
+ * - rear index is written by the host.
+ * @tx_lock: protects TX queue.
+ * @rx_lock: protects RX queue.
+ * @qlen: queue length. Applies to both TX/RX queues.
+ * @mlen: message length. Applies to both TX/RX queues.
+ * @seqnum: sequence number of the last message enqueued on TX queue.
+ * @id: channel ID.
+ * @poll_completion: indicates if the transfer needs to be polled for
+ * completion or interrupt mode is used.
+ * @bitmap_seqnum: bitmap that tracks the messages on the TX/RX queues.
+ * @rx_data: internal buffer used to drain the RX queue.
+ */
+struct acpm_chan {
+ struct mbox_client cl;
+ struct mbox_chan *chan;
+ struct acpm_info *acpm;
+ struct acpm_queue tx;
+ struct acpm_queue rx;
+ struct mutex tx_lock;
+ struct mutex rx_lock;
+
+ unsigned int qlen;
+ unsigned int mlen;
+ u8 seqnum;
+ u8 id;
+ bool poll_completion;
+
+ DECLARE_BITMAP(bitmap_seqnum, ACPM_SEQNUM_MAX - 1);
+ struct acpm_rx_data rx_data[ACPM_SEQNUM_MAX];
+};
+
+/**
+ * struct acpm_info - driver's private data.
+ * @shmem: pointer to the SRAM configuration data.
+ * @sram_base: base address of SRAM.
+ * @chans: pointer to the ACPM channel parameters retrieved from SRAM.
+ * @dev: pointer to the exynos-acpm device.
+ * @handle: instance of acpm_handle to send to clients.
+ * @num_chans: number of channels available for this controller.
+ */
+struct acpm_info {
+ struct acpm_shmem __iomem *shmem;
+ void __iomem *sram_base;
+ struct acpm_chan *chans;
+ struct device *dev;
+ struct acpm_handle handle;
+ u32 num_chans;
+};
+
+/**
+ * struct acpm_match_data - of_device_id data.
+ * @initdata_base: offset in SRAM where the channels configuration resides.
+ */
+struct acpm_match_data {
+ loff_t initdata_base;
+};
+
+#define client_to_acpm_chan(c) container_of(c, struct acpm_chan, cl)
+#define handle_to_acpm_info(h) container_of(h, struct acpm_info, handle)
+
+/**
+ * acpm_get_rx() - get response from RX queue.
+ * @achan: ACPM channel info.
+ * @xfer: reference to the transfer to get response for.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int acpm_get_rx(struct acpm_chan *achan, const struct acpm_xfer *xfer)
+{
+ u32 rx_front, rx_seqnum, tx_seqnum, seqnum;
+ const void __iomem *base, *addr;
+ struct acpm_rx_data *rx_data;
+ u32 i, val, mlen;
+ bool rx_set = false;
+
+ guard(mutex)(&achan->rx_lock);
+
+ rx_front = readl(achan->rx.front);
+ i = readl(achan->rx.rear);
+
+ /* Bail out if RX is empty. */
+ if (i == rx_front)
+ return 0;
+
+ base = achan->rx.base;
+ mlen = achan->mlen;
+
+ tx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, xfer->txd[0]);
+
+ /* Drain RX queue. */
+ do {
+ /* Read RX seqnum. */
+ addr = base + mlen * i;
+ val = readl(addr);
+
+ rx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, val);
+ if (!rx_seqnum)
+ return -EIO;
+ /*
+ * mssg seqnum starts with value 1, whereas the driver considers
+ * the first mssg at index 0.
+ */
+ seqnum = rx_seqnum - 1;
+ rx_data = &achan->rx_data[seqnum];
+
+ if (rx_data->response) {
+ if (rx_seqnum == tx_seqnum) {
+ __ioread32_copy(xfer->rxd, addr,
+ xfer->rxlen / 4);
+ rx_set = true;
+ clear_bit(seqnum, achan->bitmap_seqnum);
+ } else {
+ /*
+ * The RX data corresponds to another request.
+ * Save the data to drain the queue, but don't
+ * clear yet the bitmap. It will be cleared
+ * after the response is copied to the request.
+ */
+ __ioread32_copy(rx_data->cmd, addr,
+ xfer->rxlen / 4);
+ }
+ } else {
+ clear_bit(seqnum, achan->bitmap_seqnum);
+ }
+
+ i = (i + 1) % achan->qlen;
+ } while (i != rx_front);
+
+ /* We saved all responses, mark RX empty. */
+ writel(rx_front, achan->rx.rear);
+
+ /*
+ * If the response was not in this iteration of the queue, check if the
+ * RX data was previously saved.
+ */
+ rx_data = &achan->rx_data[tx_seqnum - 1];
+ if (!rx_set && rx_data->response) {
+ rx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM,
+ rx_data->cmd[0]);
+
+ if (rx_seqnum == tx_seqnum) {
+ memcpy(xfer->rxd, rx_data->cmd, xfer->rxlen);
+ clear_bit(rx_seqnum - 1, achan->bitmap_seqnum);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * acpm_dequeue_by_polling() - RX dequeue by polling.
+ * @achan: ACPM channel info.
+ * @xfer: reference to the transfer being waited for.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int acpm_dequeue_by_polling(struct acpm_chan *achan,
+ const struct acpm_xfer *xfer)
+{
+ struct device *dev = achan->acpm->dev;
+ unsigned int cnt_20us = 0;
+ u32 seqnum;
+ int ret;
+
+ seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, xfer->txd[0]);
+
+ do {
+ ret = acpm_get_rx(achan, xfer);
+ if (ret)
+ return ret;
+
+ if (!test_bit(seqnum - 1, achan->bitmap_seqnum))
+ return 0;
+
+ /* Determined experimentally. */
+ usleep_range(20, 30);
+ cnt_20us++;
+ } while (cnt_20us < ACPM_POLL_TIMEOUT);
+
+ dev_err(dev, "Timeout! ch:%u s:%u bitmap:%lx, cnt_20us = %d.\n",
+ achan->id, seqnum, achan->bitmap_seqnum[0], cnt_20us);
+
+ return -ETIME;
+}
+
+/**
+ * acpm_wait_for_queue_slots() - wait for queue slots.
+ *
+ * @achan: ACPM channel info.
+ * @next_tx_front: next front index of the TX queue.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int acpm_wait_for_queue_slots(struct acpm_chan *achan, u32 next_tx_front)
+{
+ u32 val, ret;
+
+ /*
+ * Wait for RX front to keep up with TX front. Make sure there's at
+ * least one element between them.
+ */
+ ret = readl_poll_timeout(achan->rx.front, val, next_tx_front != val, 0,
+ ACPM_TX_TIMEOUT_US);
+ if (ret) {
+ dev_err(achan->acpm->dev, "RX front can not keep up with TX front.\n");
+ return ret;
+ }
+
+ ret = readl_poll_timeout(achan->tx.rear, val, next_tx_front != val, 0,
+ ACPM_TX_TIMEOUT_US);
+ if (ret)
+ dev_err(achan->acpm->dev, "TX queue is full.\n");
+
+ return ret;
+}
+
+/**
+ * acpm_prepare_xfer() - prepare a transfer before writing the message to the
+ * TX queue.
+ * @achan: ACPM channel info.
+ * @xfer: reference to the transfer being prepared.
+ */
+static void acpm_prepare_xfer(struct acpm_chan *achan,
+ const struct acpm_xfer *xfer)
+{
+ struct acpm_rx_data *rx_data;
+ u32 *txd = (u32 *)xfer->txd;
+
+ /* Prevent chan->seqnum from being re-used */
+ do {
+ if (++achan->seqnum == ACPM_SEQNUM_MAX)
+ achan->seqnum = 1;
+ } while (test_bit(achan->seqnum - 1, achan->bitmap_seqnum));
+
+ txd[0] |= FIELD_PREP(ACPM_PROTOCOL_SEQNUM, achan->seqnum);
+
+ /* Clear data for upcoming responses */
+ rx_data = &achan->rx_data[achan->seqnum - 1];
+ memset(rx_data->cmd, 0, sizeof(*rx_data->cmd) * rx_data->n_cmd);
+ if (xfer->rxd)
+ rx_data->response = true;
+
+ /* Flag the index based on seqnum. (seqnum: 1~63, bitmap: 0~62) */
+ set_bit(achan->seqnum - 1, achan->bitmap_seqnum);
+}
+
+/**
+ * acpm_wait_for_message_response - an helper to group all possible ways of
+ * waiting for a synchronous message response.
+ *
+ * @achan: ACPM channel info.
+ * @xfer: reference to the transfer being waited for.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int acpm_wait_for_message_response(struct acpm_chan *achan,
+ const struct acpm_xfer *xfer)
+{
+ /* Just polling mode supported for now. */
+ return acpm_dequeue_by_polling(achan, xfer);
+}
+
+/**
+ * acpm_do_xfer() - do one transfer.
+ * @handle: pointer to the acpm handle.
+ * @xfer: transfer to initiate and wait for response.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int acpm_do_xfer(const struct acpm_handle *handle, const struct acpm_xfer *xfer)
+{
+ struct acpm_info *acpm = handle_to_acpm_info(handle);
+ struct exynos_mbox_msg msg;
+ struct acpm_chan *achan;
+ u32 idx, tx_front;
+ int ret;
+
+ if (xfer->acpm_chan_id >= acpm->num_chans)
+ return -EINVAL;
+
+ achan = &acpm->chans[xfer->acpm_chan_id];
+
+ if (!xfer->txd || xfer->txlen > achan->mlen || xfer->rxlen > achan->mlen)
+ return -EINVAL;
+
+ if (!achan->poll_completion) {
+ dev_err(achan->acpm->dev, "Interrupt mode not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ scoped_guard(mutex, &achan->tx_lock) {
+ tx_front = readl(achan->tx.front);
+ idx = (tx_front + 1) % achan->qlen;
+
+ ret = acpm_wait_for_queue_slots(achan, idx);
+ if (ret)
+ return ret;
+
+ acpm_prepare_xfer(achan, xfer);
+
+ /* Write TX command. */
+ __iowrite32_copy(achan->tx.base + achan->mlen * tx_front,
+ xfer->txd, xfer->txlen / 4);
+
+ /* Advance TX front. */
+ writel(idx, achan->tx.front);
+ }
+
+ msg.chan_id = xfer->acpm_chan_id;
+ msg.chan_type = EXYNOS_MBOX_CHAN_TYPE_DOORBELL;
+ ret = mbox_send_message(achan->chan, (void *)&msg);
+ if (ret < 0)
+ return ret;
+
+ ret = acpm_wait_for_message_response(achan, xfer);
+
+ /*
+ * NOTE: we might prefer not to need the mailbox ticker to manage the
+ * transfer queueing since the protocol layer queues things by itself.
+ * Unfortunately, we have to kick the mailbox framework after we have
+ * received our message.
+ */
+ mbox_client_txdone(achan->chan, ret);
+
+ return ret;
+}
+
+/**
+ * acpm_chan_shmem_get_params() - get channel parameters and addresses of the
+ * TX/RX queues.
+ * @achan: ACPM channel info.
+ * @chan_shmem: __iomem pointer to a channel described in shared memory.
+ */
+static void acpm_chan_shmem_get_params(struct acpm_chan *achan,
+ struct acpm_chan_shmem __iomem *chan_shmem)
+{
+ void __iomem *base = achan->acpm->sram_base;
+ struct acpm_queue *rx = &achan->rx;
+ struct acpm_queue *tx = &achan->tx;
+
+ achan->mlen = readl(&chan_shmem->mlen);
+ achan->poll_completion = readl(&chan_shmem->poll_completion);
+ achan->id = readl(&chan_shmem->id);
+ achan->qlen = readl(&chan_shmem->qlen);
+
+ tx->base = base + readl(&chan_shmem->rx_base);
+ tx->rear = base + readl(&chan_shmem->rx_rear);
+ tx->front = base + readl(&chan_shmem->rx_front);
+
+ rx->base = base + readl(&chan_shmem->tx_base);
+ rx->rear = base + readl(&chan_shmem->tx_rear);
+ rx->front = base + readl(&chan_shmem->tx_front);
+
+ dev_vdbg(achan->acpm->dev, "ID = %d poll = %d, mlen = %d, qlen = %d\n",
+ achan->id, achan->poll_completion, achan->mlen, achan->qlen);
+}
+
+/**
+ * acpm_achan_alloc_cmds() - allocate buffers for retrieving data from the ACPM
+ * firmware.
+ * @achan: ACPM channel info.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int acpm_achan_alloc_cmds(struct acpm_chan *achan)
+{
+ struct device *dev = achan->acpm->dev;
+ struct acpm_rx_data *rx_data;
+ size_t cmd_size, n_cmd;
+ int i;
+
+ if (achan->mlen == 0)
+ return 0;
+
+ cmd_size = sizeof(*(achan->rx_data[0].cmd));
+ n_cmd = DIV_ROUND_UP_ULL(achan->mlen, cmd_size);
+
+ for (i = 0; i < ACPM_SEQNUM_MAX; i++) {
+ rx_data = &achan->rx_data[i];
+ rx_data->n_cmd = n_cmd;
+ rx_data->cmd = devm_kcalloc(dev, n_cmd, cmd_size, GFP_KERNEL);
+ if (!rx_data->cmd)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * acpm_free_mbox_chans() - free mailbox channels.
+ * @acpm: pointer to driver data.
+ */
+static void acpm_free_mbox_chans(struct acpm_info *acpm)
+{
+ int i;
+
+ for (i = 0; i < acpm->num_chans; i++)
+ if (!IS_ERR_OR_NULL(acpm->chans[i].chan))
+ mbox_free_channel(acpm->chans[i].chan);
+}
+
+/**
+ * acpm_channels_init() - initialize channels based on the configuration data in
+ * the shared memory.
+ * @acpm: pointer to driver data.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int acpm_channels_init(struct acpm_info *acpm)
+{
+ struct acpm_shmem __iomem *shmem = acpm->shmem;
+ struct acpm_chan_shmem __iomem *chans_shmem;
+ struct device *dev = acpm->dev;
+ int i, ret;
+
+ acpm->num_chans = readl(&shmem->num_chans);
+ acpm->chans = devm_kcalloc(dev, acpm->num_chans, sizeof(*acpm->chans),
+ GFP_KERNEL);
+ if (!acpm->chans)
+ return -ENOMEM;
+
+ chans_shmem = acpm->sram_base + readl(&shmem->chans);
+
+ for (i = 0; i < acpm->num_chans; i++) {
+ struct acpm_chan_shmem __iomem *chan_shmem = &chans_shmem[i];
+ struct acpm_chan *achan = &acpm->chans[i];
+ struct mbox_client *cl = &achan->cl;
+
+ achan->acpm = acpm;
+
+ acpm_chan_shmem_get_params(achan, chan_shmem);
+
+ ret = acpm_achan_alloc_cmds(achan);
+ if (ret)
+ return ret;
+
+ mutex_init(&achan->rx_lock);
+ mutex_init(&achan->tx_lock);
+
+ cl->dev = dev;
+
+ achan->chan = mbox_request_channel(cl, 0);
+ if (IS_ERR(achan->chan)) {
+ acpm_free_mbox_chans(acpm);
+ return PTR_ERR(achan->chan);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * acpm_setup_ops() - setup the operations structures.
+ * @acpm: pointer to the driver data.
+ */
+static void acpm_setup_ops(struct acpm_info *acpm)
+{
+ struct acpm_pmic_ops *pmic_ops = &acpm->handle.ops.pmic_ops;
+
+ pmic_ops->read_reg = acpm_pmic_read_reg;
+ pmic_ops->bulk_read = acpm_pmic_bulk_read;
+ pmic_ops->write_reg = acpm_pmic_write_reg;
+ pmic_ops->bulk_write = acpm_pmic_bulk_write;
+ pmic_ops->update_reg = acpm_pmic_update_reg;
+}
+
+static int acpm_probe(struct platform_device *pdev)
+{
+ const struct acpm_match_data *match_data;
+ struct device *dev = &pdev->dev;
+ struct device_node *shmem;
+ struct acpm_info *acpm;
+ resource_size_t size;
+ struct resource res;
+ int ret;
+
+ acpm = devm_kzalloc(dev, sizeof(*acpm), GFP_KERNEL);
+ if (!acpm)
+ return -ENOMEM;
+
+ shmem = of_parse_phandle(dev->of_node, "shmem", 0);
+ ret = of_address_to_resource(shmem, 0, &res);
+ of_node_put(shmem);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to get shared memory.\n");
+
+ size = resource_size(&res);
+ acpm->sram_base = devm_ioremap(dev, res.start, size);
+ if (!acpm->sram_base)
+ return dev_err_probe(dev, -ENOMEM,
+ "Failed to ioremap shared memory.\n");
+
+ match_data = of_device_get_match_data(dev);
+ if (!match_data)
+ return dev_err_probe(dev, -EINVAL,
+ "Failed to get match data.\n");
+
+ acpm->shmem = acpm->sram_base + match_data->initdata_base;
+ acpm->dev = dev;
+
+ ret = acpm_channels_init(acpm);
+ if (ret)
+ return ret;
+
+ acpm_setup_ops(acpm);
+
+ platform_set_drvdata(pdev, acpm);
+
+ return 0;
+}
+
+/**
+ * acpm_handle_put() - release the handle acquired by acpm_get_by_phandle.
+ * @handle: Handle acquired by acpm_get_by_phandle.
+ */
+static void acpm_handle_put(const struct acpm_handle *handle)
+{
+ struct acpm_info *acpm = handle_to_acpm_info(handle);
+ struct device *dev = acpm->dev;
+
+ module_put(dev->driver->owner);
+ /* Drop reference taken with of_find_device_by_node(). */
+ put_device(dev);
+}
+
+/**
+ * devm_acpm_release() - devres release method.
+ * @dev: pointer to device.
+ * @res: pointer to resource.
+ */
+static void devm_acpm_release(struct device *dev, void *res)
+{
+ acpm_handle_put(*(struct acpm_handle **)res);
+}
+
+/**
+ * acpm_get_by_phandle() - get the ACPM handle using DT phandle.
+ * @dev: device pointer requesting ACPM handle.
+ * @property: property name containing phandle on ACPM node.
+ *
+ * Return: pointer to handle on success, ERR_PTR(-errno) otherwise.
+ */
+static const struct acpm_handle *acpm_get_by_phandle(struct device *dev,
+ const char *property)
+{
+ struct platform_device *pdev;
+ struct device_node *acpm_np;
+ struct device_link *link;
+ struct acpm_info *acpm;
+
+ acpm_np = of_parse_phandle(dev->of_node, property, 0);
+ if (!acpm_np)
+ return ERR_PTR(-ENODEV);
+
+ pdev = of_find_device_by_node(acpm_np);
+ if (!pdev) {
+ dev_err(dev, "Cannot find device node %s\n", acpm_np->name);
+ of_node_put(acpm_np);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ of_node_put(acpm_np);
+
+ acpm = platform_get_drvdata(pdev);
+ if (!acpm) {
+ dev_err(dev, "Cannot get drvdata from %s\n",
+ dev_name(&pdev->dev));
+ platform_device_put(pdev);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ if (!try_module_get(pdev->dev.driver->owner)) {
+ dev_err(dev, "Cannot get module reference.\n");
+ platform_device_put(pdev);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ link = device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER);
+ if (!link) {
+ dev_err(&pdev->dev,
+ "Failed to create device link to consumer %s.\n",
+ dev_name(dev));
+ platform_device_put(pdev);
+ module_put(pdev->dev.driver->owner);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return &acpm->handle;
+}
+
+/**
+ * devm_acpm_get_by_phandle() - managed get handle using phandle.
+ * @dev: device pointer requesting ACPM handle.
+ * @property: property name containing phandle on ACPM node.
+ *
+ * Return: pointer to handle on success, ERR_PTR(-errno) otherwise.
+ */
+const struct acpm_handle *devm_acpm_get_by_phandle(struct device *dev,
+ const char *property)
+{
+ const struct acpm_handle **ptr, *handle;
+
+ ptr = devres_alloc(devm_acpm_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ handle = acpm_get_by_phandle(dev, property);
+ if (!IS_ERR(handle)) {
+ *ptr = handle;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return handle;
+}
+
+static const struct acpm_match_data acpm_gs101 = {
+ .initdata_base = ACPM_GS101_INITDATA_BASE,
+};
+
+static const struct of_device_id acpm_match[] = {
+ {
+ .compatible = "google,gs101-acpm-ipc",
+ .data = &acpm_gs101,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, acpm_match);
+
+static struct platform_driver acpm_driver = {
+ .probe = acpm_probe,
+ .driver = {
+ .name = "exynos-acpm-protocol",
+ .of_match_table = acpm_match,
+ },
+};
+module_platform_driver(acpm_driver);
+
+MODULE_AUTHOR("Tudor Ambarus <tudor.ambarus@linaro.org>");
+MODULE_DESCRIPTION("Samsung Exynos ACPM mailbox protocol driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/samsung/exynos-acpm.h b/drivers/firmware/samsung/exynos-acpm.h
new file mode 100644
index 000000000000..2d14cb58f98c
--- /dev/null
+++ b/drivers/firmware/samsung/exynos-acpm.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2020 Samsung Electronics Co., Ltd.
+ * Copyright 2020 Google LLC.
+ * Copyright 2024 Linaro Ltd.
+ */
+#ifndef __EXYNOS_ACPM_H__
+#define __EXYNOS_ACPM_H__
+
+struct acpm_xfer {
+ const u32 *txd;
+ u32 *rxd;
+ size_t txlen;
+ size_t rxlen;
+ unsigned int acpm_chan_id;
+};
+
+struct acpm_handle;
+
+int acpm_do_xfer(const struct acpm_handle *handle,
+ const struct acpm_xfer *xfer);
+
+#endif /* __EXYNOS_ACPM_H__ */
diff --git a/drivers/firmware/smccc/soc_id.c b/drivers/firmware/smccc/soc_id.c
index 1990263fbba0..c24b3fca1cfe 100644
--- a/drivers/firmware/smccc/soc_id.c
+++ b/drivers/firmware/smccc/soc_id.c
@@ -32,6 +32,85 @@
static struct soc_device *soc_dev;
static struct soc_device_attribute *soc_dev_attr;
+#ifdef CONFIG_ARM64
+
+static char __ro_after_init smccc_soc_id_name[136] = "";
+
+static inline void str_fragment_from_reg(char *dst, unsigned long reg)
+{
+ dst[0] = (reg >> 0) & 0xff;
+ dst[1] = (reg >> 8) & 0xff;
+ dst[2] = (reg >> 16) & 0xff;
+ dst[3] = (reg >> 24) & 0xff;
+ dst[4] = (reg >> 32) & 0xff;
+ dst[5] = (reg >> 40) & 0xff;
+ dst[6] = (reg >> 48) & 0xff;
+ dst[7] = (reg >> 56) & 0xff;
+}
+
+static char __init *smccc_soc_name_init(void)
+{
+ struct arm_smccc_1_2_regs args;
+ struct arm_smccc_1_2_regs res;
+ size_t len;
+
+ /*
+ * Issue Number 1.6 of the Arm SMC Calling Convention
+ * specification introduces an optional "name" string
+ * to the ARM_SMCCC_ARCH_SOC_ID function. Fetch it if
+ * available.
+ */
+ args.a0 = ARM_SMCCC_ARCH_SOC_ID;
+ args.a1 = 2; /* SOC_ID name */
+ arm_smccc_1_2_invoke(&args, &res);
+
+ if ((u32)res.a0 == 0) {
+ /*
+ * Copy res.a1..res.a17 to the smccc_soc_id_name string
+ * 8 bytes at a time. As per Issue 1.6 of the Arm SMC
+ * Calling Convention, the string will be NUL terminated
+ * and padded, from the end of the string to the end of the
+ * 136 byte buffer, with NULs.
+ */
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 0, res.a1);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 1, res.a2);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 2, res.a3);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 3, res.a4);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 4, res.a5);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 5, res.a6);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 6, res.a7);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 7, res.a8);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 8, res.a9);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 9, res.a10);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 10, res.a11);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 11, res.a12);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 12, res.a13);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 13, res.a14);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 14, res.a15);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 15, res.a16);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 16, res.a17);
+
+ len = strnlen(smccc_soc_id_name, sizeof(smccc_soc_id_name));
+ if (len) {
+ if (len == sizeof(smccc_soc_id_name))
+ pr_warn(FW_BUG "Ignoring improperly formatted name\n");
+ else
+ return smccc_soc_id_name;
+ }
+ }
+
+ return NULL;
+}
+
+#else
+
+static char __init *smccc_soc_name_init(void)
+{
+ return NULL;
+}
+
+#endif
+
static int __init smccc_soc_init(void)
{
int soc_id_rev, soc_id_version;
@@ -72,6 +151,7 @@ static int __init smccc_soc_init(void)
soc_dev_attr->soc_id = soc_id_str;
soc_dev_attr->revision = soc_id_rev_str;
soc_dev_attr->family = soc_id_jep106_id_str;
+ soc_dev_attr->machine = smccc_soc_name_init();
soc_dev = soc_device_register(soc_dev_attr);
if (IS_ERR(soc_dev)) {
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index 720fa8b5d8e9..7356e860e65c 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -1139,17 +1139,13 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_fpga_get_status);
int zynqmp_pm_fpga_get_config_status(u32 *value)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
- u32 buf, lower_addr, upper_addr;
int ret;
if (!value)
return -EINVAL;
- lower_addr = lower_32_bits((u64)&buf);
- upper_addr = upper_32_bits((u64)&buf);
-
ret = zynqmp_pm_invoke_fn(PM_FPGA_READ, ret_payload, 4,
- XILINX_ZYNQMP_PM_FPGA_CONFIG_STAT_OFFSET, lower_addr, upper_addr,
+ XILINX_ZYNQMP_PM_FPGA_CONFIG_STAT_OFFSET, 0, 0,
XILINX_ZYNQMP_PM_FPGA_READ_CONFIG_REG);
*value = ret_payload[1];
diff --git a/drivers/gpu/drm/clients/drm_log.c b/drivers/gpu/drm/clients/drm_log.c
index 379850c83e51..d239f1e3c456 100644
--- a/drivers/gpu/drm/clients/drm_log.c
+++ b/drivers/gpu/drm/clients/drm_log.c
@@ -323,7 +323,7 @@ static int drm_log_client_suspend(struct drm_client_dev *client, bool _console_l
{
struct drm_log *dlog = client_to_drm_log(client);
- console_stop(&dlog->con);
+ console_suspend(&dlog->con);
return 0;
}
@@ -332,7 +332,7 @@ static int drm_log_client_resume(struct drm_client_dev *client, bool _console_lo
{
struct drm_log *dlog = client_to_drm_log(client);
- console_start(&dlog->con);
+ console_resume(&dlog->con);
return 0;
}
diff --git a/drivers/gpu/drm/drm_draw.c b/drivers/gpu/drm/drm_draw.c
index cb2ad12bce57..385eb5e10047 100644
--- a/drivers/gpu/drm/drm_draw.c
+++ b/drivers/gpu/drm/drm_draw.c
@@ -5,6 +5,8 @@
*/
#include <linux/bits.h>
+#include <linux/bug.h>
+#include <linux/export.h>
#include <linux/iosys-map.h>
#include <linux/types.h>
diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c
index cd789fa51519..0a25536a5d07 100644
--- a/drivers/irqchip/irq-meson-gpio.c
+++ b/drivers/irqchip/irq-meson-gpio.c
@@ -26,8 +26,6 @@
/* use for A1 like chips */
#define REG_PIN_A1_SEL 0x04
-/* Used for s4 chips */
-#define REG_EDGE_POL_S4 0x1c
/*
* Note: The S905X3 datasheet reports that BOTH_EDGE is controlled by
@@ -72,6 +70,7 @@ struct meson_gpio_irq_params {
bool support_edge_both;
unsigned int edge_both_offset;
unsigned int edge_single_offset;
+ unsigned int edge_pol_reg;
unsigned int pol_low_offset;
unsigned int pin_sel_mask;
struct irq_ctl_ops ops;
@@ -105,6 +104,18 @@ struct meson_gpio_irq_params {
.pin_sel_mask = 0x7f, \
.nr_channels = 8, \
+#define INIT_MESON_A4_AO_COMMON_DATA(irqs) \
+ INIT_MESON_COMMON(irqs, meson_a1_gpio_irq_init, \
+ meson_a1_gpio_irq_sel_pin, \
+ meson_s4_gpio_irq_set_type) \
+ .support_edge_both = true, \
+ .edge_both_offset = 0, \
+ .edge_single_offset = 12, \
+ .edge_pol_reg = 0x8, \
+ .pol_low_offset = 0, \
+ .pin_sel_mask = 0xff, \
+ .nr_channels = 2, \
+
#define INIT_MESON_S4_COMMON_DATA(irqs) \
INIT_MESON_COMMON(irqs, meson_a1_gpio_irq_init, \
meson_a1_gpio_irq_sel_pin, \
@@ -112,6 +123,7 @@ struct meson_gpio_irq_params {
.support_edge_both = true, \
.edge_both_offset = 0, \
.edge_single_offset = 12, \
+ .edge_pol_reg = 0x1c, \
.pol_low_offset = 0, \
.pin_sel_mask = 0xff, \
.nr_channels = 12, \
@@ -146,6 +158,18 @@ static const struct meson_gpio_irq_params a1_params = {
INIT_MESON_A1_COMMON_DATA(62)
};
+static const struct meson_gpio_irq_params a4_params = {
+ INIT_MESON_S4_COMMON_DATA(81)
+};
+
+static const struct meson_gpio_irq_params a4_ao_params = {
+ INIT_MESON_A4_AO_COMMON_DATA(8)
+};
+
+static const struct meson_gpio_irq_params a5_params = {
+ INIT_MESON_S4_COMMON_DATA(99)
+};
+
static const struct meson_gpio_irq_params s4_params = {
INIT_MESON_S4_COMMON_DATA(82)
};
@@ -168,6 +192,9 @@ static const struct of_device_id meson_irq_gpio_matches[] __maybe_unused = {
{ .compatible = "amlogic,meson-sm1-gpio-intc", .data = &sm1_params },
{ .compatible = "amlogic,meson-a1-gpio-intc", .data = &a1_params },
{ .compatible = "amlogic,meson-s4-gpio-intc", .data = &s4_params },
+ { .compatible = "amlogic,a4-gpio-ao-intc", .data = &a4_ao_params },
+ { .compatible = "amlogic,a4-gpio-intc", .data = &a4_params },
+ { .compatible = "amlogic,a5-gpio-intc", .data = &a5_params },
{ .compatible = "amlogic,c3-gpio-intc", .data = &c3_params },
{ .compatible = "amlogic,t7-gpio-intc", .data = &t7_params },
{ }
@@ -299,11 +326,10 @@ meson_gpio_irq_release_channel(struct meson_gpio_irq_controller *ctl,
static int meson8_gpio_irq_set_type(struct meson_gpio_irq_controller *ctl,
unsigned int type, u32 *channel_hwirq)
{
- u32 val = 0;
+ const struct meson_gpio_irq_params *params = ctl->params;
unsigned int idx;
- const struct meson_gpio_irq_params *params;
+ u32 val = 0;
- params = ctl->params;
idx = meson_gpio_irq_get_channel_idx(ctl, channel_hwirq);
/*
@@ -356,19 +382,19 @@ static int meson8_gpio_irq_set_type(struct meson_gpio_irq_controller *ctl,
static int meson_s4_gpio_irq_set_type(struct meson_gpio_irq_controller *ctl,
unsigned int type, u32 *channel_hwirq)
{
- u32 val = 0;
+ const struct meson_gpio_irq_params *params = ctl->params;
unsigned int idx;
+ u32 val = 0;
idx = meson_gpio_irq_get_channel_idx(ctl, channel_hwirq);
type &= IRQ_TYPE_SENSE_MASK;
- meson_gpio_irq_update_bits(ctl, REG_EDGE_POL_S4, BIT(idx), 0);
+ meson_gpio_irq_update_bits(ctl, params->edge_pol_reg, BIT(idx), 0);
if (type == IRQ_TYPE_EDGE_BOTH) {
- val |= BIT(ctl->params->edge_both_offset + idx);
- meson_gpio_irq_update_bits(ctl, REG_EDGE_POL_S4,
- BIT(ctl->params->edge_both_offset + idx), val);
+ val = BIT(ctl->params->edge_both_offset + idx);
+ meson_gpio_irq_update_bits(ctl, params->edge_pol_reg, val, val);
return 0;
}
@@ -378,7 +404,7 @@ static int meson_s4_gpio_irq_set_type(struct meson_gpio_irq_controller *ctl,
if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
val |= BIT(ctl->params->edge_single_offset + idx);
- meson_gpio_irq_update_bits(ctl, REG_EDGE_POL,
+ meson_gpio_irq_update_bits(ctl, params->edge_pol_reg,
BIT(idx) | BIT(12 + idx), val);
return 0;
};
diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
index 5710348f72f6..a8f5467d6b31 100644
--- a/drivers/memory/mtk-smi.c
+++ b/drivers/memory/mtk-smi.c
@@ -332,6 +332,38 @@ static const u8 mtk_smi_larb_mt8188_ostd[][SMI_LARB_PORT_NR_MAX] = {
[25] = {0x01},
};
+static const u8 mtk_smi_larb_mt8192_ostd[][SMI_LARB_PORT_NR_MAX] = {
+ [0] = {0x2, 0x2, 0x28, 0xa, 0xc, 0x28,},
+ [1] = {0x2, 0x2, 0x18, 0x18, 0x18, 0xa, 0xc, 0x28,},
+ [2] = {0x5, 0x5, 0x5, 0x5, 0x1,},
+ [3] = {},
+ [4] = {0x28, 0x19, 0xb, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x4, 0x1,},
+ [5] = {0x1, 0x1, 0x4, 0x1, 0x1, 0x1, 0x1, 0x16,},
+ [6] = {},
+ [7] = {0x1, 0x3, 0x2, 0x1, 0x1, 0x5, 0x2, 0x12, 0x13, 0x4, 0x4, 0x1,
+ 0x4, 0x2, 0x1,},
+ [8] = {},
+ [9] = {0xa, 0x7, 0xf, 0x8, 0x1, 0x8, 0x9, 0x3, 0x3, 0x6, 0x7, 0x4,
+ 0xa, 0x3, 0x4, 0xe, 0x1, 0x7, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1,
+ 0x1, 0x1, 0x1, 0x1, 0x1,},
+ [10] = {},
+ [11] = {0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1,
+ 0x1, 0x1, 0x1, 0xe, 0x1, 0x7, 0x8, 0x7, 0x7, 0x1, 0x6, 0x2,
+ 0xf, 0x8, 0x1, 0x1, 0x1,},
+ [12] = {},
+ [13] = {0x2, 0xc, 0xc, 0xe, 0x6, 0x6, 0x6, 0x6, 0x6, 0x12, 0x6, 0x28,
+ 0x2, 0xc, 0xc, 0x28, 0x12, 0x6,},
+ [14] = {},
+ [15] = {0x28, 0x14, 0x2, 0xc, 0x18, 0x4, 0x28, 0x14, 0x4, 0x4, 0x4, 0x2,
+ 0x4, 0x2, 0x8, 0x4, 0x4,},
+ [16] = {0x28, 0x14, 0x2, 0xc, 0x18, 0x4, 0x28, 0x14, 0x4, 0x4, 0x4, 0x2,
+ 0x4, 0x2, 0x8, 0x4, 0x4,},
+ [17] = {0x28, 0x14, 0x2, 0xc, 0x18, 0x4, 0x28, 0x14, 0x4, 0x4, 0x4, 0x2,
+ 0x4, 0x2, 0x8, 0x4, 0x4,},
+ [18] = {0x2, 0x2, 0x4, 0x2,},
+ [19] = {0x9, 0x9, 0x5, 0x5, 0x1, 0x1,},
+};
+
static const u8 mtk_smi_larb_mt8195_ostd[][SMI_LARB_PORT_NR_MAX] = {
[0] = {0x0a, 0xc, 0x22, 0x22, 0x01, 0x0a,}, /* larb0 */
[1] = {0x0a, 0xc, 0x22, 0x22, 0x01, 0x0a,}, /* larb1 */
@@ -427,6 +459,7 @@ static const struct mtk_smi_larb_gen mtk_smi_larb_mt8188 = {
static const struct mtk_smi_larb_gen mtk_smi_larb_mt8192 = {
.config_port = mtk_smi_larb_config_port_gen2_general,
+ .ostd = mtk_smi_larb_mt8192_ostd,
};
static const struct mtk_smi_larb_gen mtk_smi_larb_mt8195 = {
diff --git a/drivers/memory/tegra/tegra20-emc.c b/drivers/memory/tegra/tegra20-emc.c
index 9b7d30a21a5b..44ac55feacd3 100644
--- a/drivers/memory/tegra/tegra20-emc.c
+++ b/drivers/memory/tegra/tegra20-emc.c
@@ -1191,10 +1191,8 @@ static int tegra_emc_probe(struct platform_device *pdev)
int irq, err;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "please update your device tree\n");
+ if (irq < 0)
return irq;
- }
emc = devm_kzalloc(&pdev->dev, sizeof(*emc), GFP_KERNEL);
if (!emc)
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 3c383bce4928..57bd49eea777 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -1873,7 +1873,7 @@ static int sdhci_msm_ice_init(struct sdhci_msm_host *msm_host,
if (!(cqhci_readl(cq_host, CQHCI_CAP) & CQHCI_CAP_CS))
return 0;
- ice = of_qcom_ice_get(dev);
+ ice = devm_of_qcom_ice_get(dev);
if (ice == ERR_PTR(-EOPNOTSUPP)) {
dev_warn(dev, "Disabling inline encryption support\n");
ice = NULL;
diff --git a/drivers/net/ethernet/toshiba/Kconfig b/drivers/net/ethernet/toshiba/Kconfig
index 701e9b7c1c3b..b1e27e3b99eb 100644
--- a/drivers/net/ethernet/toshiba/Kconfig
+++ b/drivers/net/ethernet/toshiba/Kconfig
@@ -6,7 +6,7 @@
config NET_VENDOR_TOSHIBA
bool "Toshiba devices"
default y
- depends on PCI && (PPC_IBM_CELL_BLADE || MIPS) || PPC_PS3
+ depends on PCI && MIPS || PPC_PS3
help
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -39,15 +39,6 @@ config GELIC_WIRELESS
the driver automatically distinguishes the models, you can
safely enable this option even if you have a wireless-less model.
-config SPIDER_NET
- tristate "Spider Gigabit Ethernet driver"
- depends on PCI && PPC_IBM_CELL_BLADE
- select FW_LOADER
- select SUNGEM_PHY
- help
- This driver supports the Gigabit Ethernet chips present on the
- Cell Processor-Based Blades from IBM.
-
config TC35815
tristate "TOSHIBA TC35815 Ethernet support"
depends on PCI && MIPS
diff --git a/drivers/net/ethernet/toshiba/Makefile b/drivers/net/ethernet/toshiba/Makefile
index f434fd0f429e..27e2164cf7e9 100644
--- a/drivers/net/ethernet/toshiba/Makefile
+++ b/drivers/net/ethernet/toshiba/Makefile
@@ -6,6 +6,4 @@
obj-$(CONFIG_GELIC_NET) += ps3_gelic.o
gelic_wireless-$(CONFIG_GELIC_WIRELESS) += ps3_gelic_wireless.o
ps3_gelic-objs += ps3_gelic_net.o $(gelic_wireless-y)
-spidernet-y += spider_net.o spider_net_ethtool.o
-obj-$(CONFIG_SPIDER_NET) += spidernet.o
obj-$(CONFIG_TC35815) += tc35815.o
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
deleted file mode 100644
index a4937c18d7cb..000000000000
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ /dev/null
@@ -1,2556 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Network device driver for Cell Processor-Based Blade and Celleb platform
- *
- * (C) Copyright IBM Corp. 2005
- * (C) Copyright 2006 TOSHIBA CORPORATION
- *
- * Authors : Utz Bacher <utz.bacher@de.ibm.com>
- * Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
- */
-
-#include <linux/compiler.h>
-#include <linux/crc32.h>
-#include <linux/delay.h>
-#include <linux/etherdevice.h>
-#include <linux/ethtool.h>
-#include <linux/firmware.h>
-#include <linux/if_vlan.h>
-#include <linux/in.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/gfp.h>
-#include <linux/ioport.h>
-#include <linux/ip.h>
-#include <linux/kernel.h>
-#include <linux/mii.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/device.h>
-#include <linux/pci.h>
-#include <linux/skbuff.h>
-#include <linux/tcp.h>
-#include <linux/types.h>
-#include <linux/vmalloc.h>
-#include <linux/wait.h>
-#include <linux/workqueue.h>
-#include <linux/bitops.h>
-#include <linux/of.h>
-#include <net/checksum.h>
-
-#include "spider_net.h"
-
-MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com> and Jens Osterkamp " \
- "<Jens.Osterkamp@de.ibm.com>");
-MODULE_DESCRIPTION("Spider Southbridge Gigabit Ethernet driver");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(VERSION);
-MODULE_FIRMWARE(SPIDER_NET_FIRMWARE_NAME);
-
-static int rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_DEFAULT;
-static int tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_DEFAULT;
-
-module_param(rx_descriptors, int, 0444);
-module_param(tx_descriptors, int, 0444);
-
-MODULE_PARM_DESC(rx_descriptors, "number of descriptors used " \
- "in rx chains");
-MODULE_PARM_DESC(tx_descriptors, "number of descriptors used " \
- "in tx chain");
-
-char spider_net_driver_name[] = "spidernet";
-
-static const struct pci_device_id spider_net_pci_tbl[] = {
- { PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SPIDER_NET,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { 0, }
-};
-
-MODULE_DEVICE_TABLE(pci, spider_net_pci_tbl);
-
-/**
- * spider_net_read_reg - reads an SMMIO register of a card
- * @card: device structure
- * @reg: register to read from
- *
- * returns the content of the specified SMMIO register.
- */
-static inline u32
-spider_net_read_reg(struct spider_net_card *card, u32 reg)
-{
- /* We use the powerpc specific variants instead of readl_be() because
- * we know spidernet is not a real PCI device and we can thus avoid the
- * performance hit caused by the PCI workarounds.
- */
- return in_be32(card->regs + reg);
-}
-
-/**
- * spider_net_write_reg - writes to an SMMIO register of a card
- * @card: device structure
- * @reg: register to write to
- * @value: value to write into the specified SMMIO register
- */
-static inline void
-spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value)
-{
- /* We use the powerpc specific variants instead of writel_be() because
- * we know spidernet is not a real PCI device and we can thus avoid the
- * performance hit caused by the PCI workarounds.
- */
- out_be32(card->regs + reg, value);
-}
-
-/**
- * spider_net_write_phy - write to phy register
- * @netdev: adapter to be written to
- * @mii_id: id of MII
- * @reg: PHY register
- * @val: value to be written to phy register
- *
- * spider_net_write_phy_register writes to an arbitrary PHY
- * register via the spider GPCWOPCMD register. We assume the queue does
- * not run full (not more than 15 commands outstanding).
- **/
-static void
-spider_net_write_phy(struct net_device *netdev, int mii_id,
- int reg, int val)
-{
- struct spider_net_card *card = netdev_priv(netdev);
- u32 writevalue;
-
- writevalue = ((u32)mii_id << 21) |
- ((u32)reg << 16) | ((u32)val);
-
- spider_net_write_reg(card, SPIDER_NET_GPCWOPCMD, writevalue);
-}
-
-/**
- * spider_net_read_phy - read from phy register
- * @netdev: network device to be read from
- * @mii_id: id of MII
- * @reg: PHY register
- *
- * Returns value read from PHY register
- *
- * spider_net_write_phy reads from an arbitrary PHY
- * register via the spider GPCROPCMD register
- **/
-static int
-spider_net_read_phy(struct net_device *netdev, int mii_id, int reg)
-{
- struct spider_net_card *card = netdev_priv(netdev);
- u32 readvalue;
-
- readvalue = ((u32)mii_id << 21) | ((u32)reg << 16);
- spider_net_write_reg(card, SPIDER_NET_GPCROPCMD, readvalue);
-
- /* we don't use semaphores to wait for an SPIDER_NET_GPROPCMPINT
- * interrupt, as we poll for the completion of the read operation
- * in spider_net_read_phy. Should take about 50 us
- */
- do {
- readvalue = spider_net_read_reg(card, SPIDER_NET_GPCROPCMD);
- } while (readvalue & SPIDER_NET_GPREXEC);
-
- readvalue &= SPIDER_NET_GPRDAT_MASK;
-
- return readvalue;
-}
-
-/**
- * spider_net_setup_aneg - initial auto-negotiation setup
- * @card: device structure
- **/
-static void
-spider_net_setup_aneg(struct spider_net_card *card)
-{
- struct mii_phy *phy = &card->phy;
- u32 advertise = 0;
- u16 bmsr, estat;
-
- bmsr = spider_net_read_phy(card->netdev, phy->mii_id, MII_BMSR);
- estat = spider_net_read_phy(card->netdev, phy->mii_id, MII_ESTATUS);
-
- if (bmsr & BMSR_10HALF)
- advertise |= ADVERTISED_10baseT_Half;
- if (bmsr & BMSR_10FULL)
- advertise |= ADVERTISED_10baseT_Full;
- if (bmsr & BMSR_100HALF)
- advertise |= ADVERTISED_100baseT_Half;
- if (bmsr & BMSR_100FULL)
- advertise |= ADVERTISED_100baseT_Full;
-
- if ((bmsr & BMSR_ESTATEN) && (estat & ESTATUS_1000_TFULL))
- advertise |= SUPPORTED_1000baseT_Full;
- if ((bmsr & BMSR_ESTATEN) && (estat & ESTATUS_1000_THALF))
- advertise |= SUPPORTED_1000baseT_Half;
-
- sungem_phy_probe(phy, phy->mii_id);
- phy->def->ops->setup_aneg(phy, advertise);
-
-}
-
-/**
- * spider_net_rx_irq_off - switch off rx irq on this spider card
- * @card: device structure
- *
- * switches off rx irq by masking them out in the GHIINTnMSK register
- */
-static void
-spider_net_rx_irq_off(struct spider_net_card *card)
-{
- u32 regvalue;
-
- regvalue = SPIDER_NET_INT0_MASK_VALUE & (~SPIDER_NET_RXINT);
- spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
-}
-
-/**
- * spider_net_rx_irq_on - switch on rx irq on this spider card
- * @card: device structure
- *
- * switches on rx irq by enabling them in the GHIINTnMSK register
- */
-static void
-spider_net_rx_irq_on(struct spider_net_card *card)
-{
- u32 regvalue;
-
- regvalue = SPIDER_NET_INT0_MASK_VALUE | SPIDER_NET_RXINT;
- spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
-}
-
-/**
- * spider_net_set_promisc - sets the unicast address or the promiscuous mode
- * @card: card structure
- *
- * spider_net_set_promisc sets the unicast destination address filter and
- * thus either allows for non-promisc mode or promisc mode
- */
-static void
-spider_net_set_promisc(struct spider_net_card *card)
-{
- u32 macu, macl;
- struct net_device *netdev = card->netdev;
-
- if (netdev->flags & IFF_PROMISC) {
- /* clear destination entry 0 */
- spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR, 0);
- spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR + 0x04, 0);
- spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R,
- SPIDER_NET_PROMISC_VALUE);
- } else {
- macu = netdev->dev_addr[0];
- macu <<= 8;
- macu |= netdev->dev_addr[1];
- memcpy(&macl, &netdev->dev_addr[2], sizeof(macl));
-
- macu |= SPIDER_NET_UA_DESCR_VALUE;
- spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR, macu);
- spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR + 0x04, macl);
- spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R,
- SPIDER_NET_NONPROMISC_VALUE);
- }
-}
-
-/**
- * spider_net_get_descr_status -- returns the status of a descriptor
- * @hwdescr: descriptor to look at
- *
- * returns the status as in the dmac_cmd_status field of the descriptor
- */
-static inline int
-spider_net_get_descr_status(struct spider_net_hw_descr *hwdescr)
-{
- return hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_IND_PROC_MASK;
-}
-
-/**
- * spider_net_free_chain - free descriptor chain
- * @card: card structure
- * @chain: address of chain
- *
- */
-static void
-spider_net_free_chain(struct spider_net_card *card,
- struct spider_net_descr_chain *chain)
-{
- struct spider_net_descr *descr;
-
- descr = chain->ring;
- do {
- descr->bus_addr = 0;
- descr->hwdescr->next_descr_addr = 0;
- descr = descr->next;
- } while (descr != chain->ring);
-
- dma_free_coherent(&card->pdev->dev, chain->num_desc * sizeof(struct spider_net_hw_descr),
- chain->hwring, chain->dma_addr);
-}
-
-/**
- * spider_net_init_chain - alloc and link descriptor chain
- * @card: card structure
- * @chain: address of chain
- *
- * We manage a circular list that mirrors the hardware structure,
- * except that the hardware uses bus addresses.
- *
- * Returns 0 on success, <0 on failure
- */
-static int
-spider_net_init_chain(struct spider_net_card *card,
- struct spider_net_descr_chain *chain)
-{
- int i;
- struct spider_net_descr *descr;
- struct spider_net_hw_descr *hwdescr;
- dma_addr_t buf;
- size_t alloc_size;
-
- alloc_size = chain->num_desc * sizeof(struct spider_net_hw_descr);
-
- chain->hwring = dma_alloc_coherent(&card->pdev->dev, alloc_size,
- &chain->dma_addr, GFP_KERNEL);
- if (!chain->hwring)
- return -ENOMEM;
-
- /* Set up the hardware pointers in each descriptor */
- descr = chain->ring;
- hwdescr = chain->hwring;
- buf = chain->dma_addr;
- for (i=0; i < chain->num_desc; i++, descr++, hwdescr++) {
- hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
- hwdescr->next_descr_addr = 0;
-
- descr->hwdescr = hwdescr;
- descr->bus_addr = buf;
- descr->next = descr + 1;
- descr->prev = descr - 1;
-
- buf += sizeof(struct spider_net_hw_descr);
- }
- /* do actual circular list */
- (descr-1)->next = chain->ring;
- chain->ring->prev = descr-1;
-
- spin_lock_init(&chain->lock);
- chain->head = chain->ring;
- chain->tail = chain->ring;
- return 0;
-}
-
-/**
- * spider_net_free_rx_chain_contents - frees descr contents in rx chain
- * @card: card structure
- *
- * returns 0 on success, <0 on failure
- */
-static void
-spider_net_free_rx_chain_contents(struct spider_net_card *card)
-{
- struct spider_net_descr *descr;
-
- descr = card->rx_chain.head;
- do {
- if (descr->skb) {
- dma_unmap_single(&card->pdev->dev,
- descr->hwdescr->buf_addr,
- SPIDER_NET_MAX_FRAME,
- DMA_BIDIRECTIONAL);
- dev_kfree_skb(descr->skb);
- descr->skb = NULL;
- }
- descr = descr->next;
- } while (descr != card->rx_chain.head);
-}
-
-/**
- * spider_net_prepare_rx_descr - Reinitialize RX descriptor
- * @card: card structure
- * @descr: descriptor to re-init
- *
- * Return 0 on success, <0 on failure.
- *
- * Allocates a new rx skb, iommu-maps it and attaches it to the
- * descriptor. Mark the descriptor as activated, ready-to-use.
- */
-static int
-spider_net_prepare_rx_descr(struct spider_net_card *card,
- struct spider_net_descr *descr)
-{
- struct spider_net_hw_descr *hwdescr = descr->hwdescr;
- dma_addr_t buf;
- int offset;
- int bufsize;
-
- /* we need to round up the buffer size to a multiple of 128 */
- bufsize = (SPIDER_NET_MAX_FRAME + SPIDER_NET_RXBUF_ALIGN - 1) &
- (~(SPIDER_NET_RXBUF_ALIGN - 1));
-
- /* and we need to have it 128 byte aligned, therefore we allocate a
- * bit more
- */
- /* allocate an skb */
- descr->skb = netdev_alloc_skb(card->netdev,
- bufsize + SPIDER_NET_RXBUF_ALIGN - 1);
- if (!descr->skb) {
- if (netif_msg_rx_err(card) && net_ratelimit())
- dev_err(&card->netdev->dev,
- "Not enough memory to allocate rx buffer\n");
- card->spider_stats.alloc_rx_skb_error++;
- return -ENOMEM;
- }
- hwdescr->buf_size = bufsize;
- hwdescr->result_size = 0;
- hwdescr->valid_size = 0;
- hwdescr->data_status = 0;
- hwdescr->data_error = 0;
-
- offset = ((unsigned long)descr->skb->data) &
- (SPIDER_NET_RXBUF_ALIGN - 1);
- if (offset)
- skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset);
- /* iommu-map the skb */
- buf = dma_map_single(&card->pdev->dev, descr->skb->data,
- SPIDER_NET_MAX_FRAME, DMA_FROM_DEVICE);
- if (dma_mapping_error(&card->pdev->dev, buf)) {
- dev_kfree_skb_any(descr->skb);
- descr->skb = NULL;
- if (netif_msg_rx_err(card) && net_ratelimit())
- dev_err(&card->netdev->dev, "Could not iommu-map rx buffer\n");
- card->spider_stats.rx_iommu_map_error++;
- hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
- } else {
- hwdescr->buf_addr = buf;
- wmb();
- hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_CARDOWNED |
- SPIDER_NET_DMAC_NOINTR_COMPLETE;
- }
-
- return 0;
-}
-
-/**
- * spider_net_enable_rxchtails - sets RX dmac chain tail addresses
- * @card: card structure
- *
- * spider_net_enable_rxchtails sets the RX DMAC chain tail addresses in the
- * chip by writing to the appropriate register. DMA is enabled in
- * spider_net_enable_rxdmac.
- */
-static inline void
-spider_net_enable_rxchtails(struct spider_net_card *card)
-{
- /* assume chain is aligned correctly */
- spider_net_write_reg(card, SPIDER_NET_GDADCHA ,
- card->rx_chain.tail->bus_addr);
-}
-
-/**
- * spider_net_enable_rxdmac - enables a receive DMA controller
- * @card: card structure
- *
- * spider_net_enable_rxdmac enables the DMA controller by setting RX_DMA_EN
- * in the GDADMACCNTR register
- */
-static inline void
-spider_net_enable_rxdmac(struct spider_net_card *card)
-{
- wmb();
- spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR,
- SPIDER_NET_DMA_RX_VALUE);
-}
-
-/**
- * spider_net_disable_rxdmac - disables the receive DMA controller
- * @card: card structure
- *
- * spider_net_disable_rxdmac terminates processing on the DMA controller
- * by turing off the DMA controller, with the force-end flag set.
- */
-static inline void
-spider_net_disable_rxdmac(struct spider_net_card *card)
-{
- spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR,
- SPIDER_NET_DMA_RX_FEND_VALUE);
-}
-
-/**
- * spider_net_refill_rx_chain - refills descriptors/skbs in the rx chains
- * @card: card structure
- *
- * refills descriptors in the rx chain: allocates skbs and iommu-maps them.
- */
-static void
-spider_net_refill_rx_chain(struct spider_net_card *card)
-{
- struct spider_net_descr_chain *chain = &card->rx_chain;
- unsigned long flags;
-
- /* one context doing the refill (and a second context seeing that
- * and omitting it) is ok. If called by NAPI, we'll be called again
- * as spider_net_decode_one_descr is called several times. If some
- * interrupt calls us, the NAPI is about to clean up anyway.
- */
- if (!spin_trylock_irqsave(&chain->lock, flags))
- return;
-
- while (spider_net_get_descr_status(chain->head->hwdescr) ==
- SPIDER_NET_DESCR_NOT_IN_USE) {
- if (spider_net_prepare_rx_descr(card, chain->head))
- break;
- chain->head = chain->head->next;
- }
-
- spin_unlock_irqrestore(&chain->lock, flags);
-}
-
-/**
- * spider_net_alloc_rx_skbs - Allocates rx skbs in rx descriptor chains
- * @card: card structure
- *
- * Returns 0 on success, <0 on failure.
- */
-static int
-spider_net_alloc_rx_skbs(struct spider_net_card *card)
-{
- struct spider_net_descr_chain *chain = &card->rx_chain;
- struct spider_net_descr *start = chain->tail;
- struct spider_net_descr *descr = start;
-
- /* Link up the hardware chain pointers */
- do {
- descr->prev->hwdescr->next_descr_addr = descr->bus_addr;
- descr = descr->next;
- } while (descr != start);
-
- /* Put at least one buffer into the chain. if this fails,
- * we've got a problem. If not, spider_net_refill_rx_chain
- * will do the rest at the end of this function.
- */
- if (spider_net_prepare_rx_descr(card, chain->head))
- goto error;
- else
- chain->head = chain->head->next;
-
- /* This will allocate the rest of the rx buffers;
- * if not, it's business as usual later on.
- */
- spider_net_refill_rx_chain(card);
- spider_net_enable_rxdmac(card);
- return 0;
-
-error:
- spider_net_free_rx_chain_contents(card);
- return -ENOMEM;
-}
-
-/**
- * spider_net_get_multicast_hash - generates hash for multicast filter table
- * @netdev: interface device structure
- * @addr: multicast address
- *
- * returns the hash value.
- *
- * spider_net_get_multicast_hash calculates a hash value for a given multicast
- * address, that is used to set the multicast filter tables
- */
-static u8
-spider_net_get_multicast_hash(struct net_device *netdev, __u8 *addr)
-{
- u32 crc;
- u8 hash;
- char addr_for_crc[ETH_ALEN] = { 0, };
- int i, bit;
-
- for (i = 0; i < ETH_ALEN * 8; i++) {
- bit = (addr[i / 8] >> (i % 8)) & 1;
- addr_for_crc[ETH_ALEN - 1 - i / 8] += bit << (7 - (i % 8));
- }
-
- crc = crc32_be(~0, addr_for_crc, netdev->addr_len);
-
- hash = (crc >> 27);
- hash <<= 3;
- hash |= crc & 7;
- hash &= 0xff;
-
- return hash;
-}
-
-/**
- * spider_net_set_multi - sets multicast addresses and promisc flags
- * @netdev: interface device structure
- *
- * spider_net_set_multi configures multicast addresses as needed for the
- * netdev interface. It also sets up multicast, allmulti and promisc
- * flags appropriately
- */
-static void
-spider_net_set_multi(struct net_device *netdev)
-{
- struct netdev_hw_addr *ha;
- u8 hash;
- int i;
- u32 reg;
- struct spider_net_card *card = netdev_priv(netdev);
- DECLARE_BITMAP(bitmask, SPIDER_NET_MULTICAST_HASHES);
-
- spider_net_set_promisc(card);
-
- if (netdev->flags & IFF_ALLMULTI) {
- bitmap_fill(bitmask, SPIDER_NET_MULTICAST_HASHES);
- goto write_hash;
- }
-
- bitmap_zero(bitmask, SPIDER_NET_MULTICAST_HASHES);
-
- /* well, we know, what the broadcast hash value is: it's xfd
- hash = spider_net_get_multicast_hash(netdev, netdev->broadcast); */
- __set_bit(0xfd, bitmask);
-
- netdev_for_each_mc_addr(ha, netdev) {
- hash = spider_net_get_multicast_hash(netdev, ha->addr);
- __set_bit(hash, bitmask);
- }
-
-write_hash:
- for (i = 0; i < SPIDER_NET_MULTICAST_HASHES / 4; i++) {
- reg = 0;
- if (test_bit(i * 4, bitmask))
- reg += 0x08;
- reg <<= 8;
- if (test_bit(i * 4 + 1, bitmask))
- reg += 0x08;
- reg <<= 8;
- if (test_bit(i * 4 + 2, bitmask))
- reg += 0x08;
- reg <<= 8;
- if (test_bit(i * 4 + 3, bitmask))
- reg += 0x08;
-
- spider_net_write_reg(card, SPIDER_NET_GMRMHFILnR + i * 4, reg);
- }
-}
-
-/**
- * spider_net_prepare_tx_descr - fill tx descriptor with skb data
- * @card: card structure
- * @skb: packet to use
- *
- * returns 0 on success, <0 on failure.
- *
- * fills out the descriptor structure with skb data and len. Copies data,
- * if needed (32bit DMA!)
- */
-static int
-spider_net_prepare_tx_descr(struct spider_net_card *card,
- struct sk_buff *skb)
-{
- struct spider_net_descr_chain *chain = &card->tx_chain;
- struct spider_net_descr *descr;
- struct spider_net_hw_descr *hwdescr;
- dma_addr_t buf;
- unsigned long flags;
-
- buf = dma_map_single(&card->pdev->dev, skb->data, skb->len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(&card->pdev->dev, buf)) {
- if (netif_msg_tx_err(card) && net_ratelimit())
- dev_err(&card->netdev->dev, "could not iommu-map packet (%p, %i). "
- "Dropping packet\n", skb->data, skb->len);
- card->spider_stats.tx_iommu_map_error++;
- return -ENOMEM;
- }
-
- spin_lock_irqsave(&chain->lock, flags);
- descr = card->tx_chain.head;
- if (descr->next == chain->tail->prev) {
- spin_unlock_irqrestore(&chain->lock, flags);
- dma_unmap_single(&card->pdev->dev, buf, skb->len,
- DMA_TO_DEVICE);
- return -ENOMEM;
- }
- hwdescr = descr->hwdescr;
- chain->head = descr->next;
-
- descr->skb = skb;
- hwdescr->buf_addr = buf;
- hwdescr->buf_size = skb->len;
- hwdescr->next_descr_addr = 0;
- hwdescr->data_status = 0;
-
- hwdescr->dmac_cmd_status =
- SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_TXFRMTL;
- spin_unlock_irqrestore(&chain->lock, flags);
-
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- switch (ip_hdr(skb)->protocol) {
- case IPPROTO_TCP:
- hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP;
- break;
- case IPPROTO_UDP:
- hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_UDP;
- break;
- }
-
- /* Chain the bus address, so that the DMA engine finds this descr. */
- wmb();
- descr->prev->hwdescr->next_descr_addr = descr->bus_addr;
-
- netif_trans_update(card->netdev); /* set netdev watchdog timer */
- return 0;
-}
-
-static int
-spider_net_set_low_watermark(struct spider_net_card *card)
-{
- struct spider_net_descr *descr = card->tx_chain.tail;
- struct spider_net_hw_descr *hwdescr;
- unsigned long flags;
- int status;
- int cnt=0;
- int i;
-
- /* Measure the length of the queue. Measurement does not
- * need to be precise -- does not need a lock.
- */
- while (descr != card->tx_chain.head) {
- status = descr->hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_NOT_IN_USE;
- if (status == SPIDER_NET_DESCR_NOT_IN_USE)
- break;
- descr = descr->next;
- cnt++;
- }
-
- /* If TX queue is short, don't even bother with interrupts */
- if (cnt < card->tx_chain.num_desc/4)
- return cnt;
-
- /* Set low-watermark 3/4th's of the way into the queue. */
- descr = card->tx_chain.tail;
- cnt = (cnt*3)/4;
- for (i=0;i<cnt; i++)
- descr = descr->next;
-
- /* Set the new watermark, clear the old watermark */
- spin_lock_irqsave(&card->tx_chain.lock, flags);
- descr->hwdescr->dmac_cmd_status |= SPIDER_NET_DESCR_TXDESFLG;
- if (card->low_watermark && card->low_watermark != descr) {
- hwdescr = card->low_watermark->hwdescr;
- hwdescr->dmac_cmd_status =
- hwdescr->dmac_cmd_status & ~SPIDER_NET_DESCR_TXDESFLG;
- }
- card->low_watermark = descr;
- spin_unlock_irqrestore(&card->tx_chain.lock, flags);
- return cnt;
-}
-
-/**
- * spider_net_release_tx_chain - processes sent tx descriptors
- * @card: adapter structure
- * @brutal: if set, don't care about whether descriptor seems to be in use
- *
- * returns 0 if the tx ring is empty, otherwise 1.
- *
- * spider_net_release_tx_chain releases the tx descriptors that spider has
- * finished with (if non-brutal) or simply release tx descriptors (if brutal).
- * If some other context is calling this function, we return 1 so that we're
- * scheduled again (if we were scheduled) and will not lose initiative.
- */
-static int
-spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
-{
- struct net_device *dev = card->netdev;
- struct spider_net_descr_chain *chain = &card->tx_chain;
- struct spider_net_descr *descr;
- struct spider_net_hw_descr *hwdescr;
- struct sk_buff *skb;
- u32 buf_addr;
- unsigned long flags;
- int status;
-
- while (1) {
- spin_lock_irqsave(&chain->lock, flags);
- if (chain->tail == chain->head) {
- spin_unlock_irqrestore(&chain->lock, flags);
- return 0;
- }
- descr = chain->tail;
- hwdescr = descr->hwdescr;
-
- status = spider_net_get_descr_status(hwdescr);
- switch (status) {
- case SPIDER_NET_DESCR_COMPLETE:
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += descr->skb->len;
- break;
-
- case SPIDER_NET_DESCR_CARDOWNED:
- if (!brutal) {
- spin_unlock_irqrestore(&chain->lock, flags);
- return 1;
- }
-
- /* fallthrough, if we release the descriptors
- * brutally (then we don't care about
- * SPIDER_NET_DESCR_CARDOWNED)
- */
- fallthrough;
-
- case SPIDER_NET_DESCR_RESPONSE_ERROR:
- case SPIDER_NET_DESCR_PROTECTION_ERROR:
- case SPIDER_NET_DESCR_FORCE_END:
- if (netif_msg_tx_err(card))
- dev_err(&card->netdev->dev, "forcing end of tx descriptor "
- "with status x%02x\n", status);
- dev->stats.tx_errors++;
- break;
-
- default:
- dev->stats.tx_dropped++;
- if (!brutal) {
- spin_unlock_irqrestore(&chain->lock, flags);
- return 1;
- }
- }
-
- chain->tail = descr->next;
- hwdescr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE;
- skb = descr->skb;
- descr->skb = NULL;
- buf_addr = hwdescr->buf_addr;
- spin_unlock_irqrestore(&chain->lock, flags);
-
- /* unmap the skb */
- if (skb) {
- dma_unmap_single(&card->pdev->dev, buf_addr, skb->len,
- DMA_TO_DEVICE);
- dev_consume_skb_any(skb);
- }
- }
- return 0;
-}
-
-/**
- * spider_net_kick_tx_dma - enables TX DMA processing
- * @card: card structure
- *
- * This routine will start the transmit DMA running if
- * it is not already running. This routine ned only be
- * called when queueing a new packet to an empty tx queue.
- * Writes the current tx chain head as start address
- * of the tx descriptor chain and enables the transmission
- * DMA engine.
- */
-static inline void
-spider_net_kick_tx_dma(struct spider_net_card *card)
-{
- struct spider_net_descr *descr;
-
- if (spider_net_read_reg(card, SPIDER_NET_GDTDMACCNTR) &
- SPIDER_NET_TX_DMA_EN)
- goto out;
-
- descr = card->tx_chain.tail;
- for (;;) {
- if (spider_net_get_descr_status(descr->hwdescr) ==
- SPIDER_NET_DESCR_CARDOWNED) {
- spider_net_write_reg(card, SPIDER_NET_GDTDCHA,
- descr->bus_addr);
- spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
- SPIDER_NET_DMA_TX_VALUE);
- break;
- }
- if (descr == card->tx_chain.head)
- break;
- descr = descr->next;
- }
-
-out:
- mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);
-}
-
-/**
- * spider_net_xmit - transmits a frame over the device
- * @skb: packet to send out
- * @netdev: interface device structure
- *
- * returns NETDEV_TX_OK on success, NETDEV_TX_BUSY on failure
- */
-static netdev_tx_t
-spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
-{
- int cnt;
- struct spider_net_card *card = netdev_priv(netdev);
-
- spider_net_release_tx_chain(card, 0);
-
- if (spider_net_prepare_tx_descr(card, skb) != 0) {
- netdev->stats.tx_dropped++;
- netif_stop_queue(netdev);
- return NETDEV_TX_BUSY;
- }
-
- cnt = spider_net_set_low_watermark(card);
- if (cnt < 5)
- spider_net_kick_tx_dma(card);
- return NETDEV_TX_OK;
-}
-
-/**
- * spider_net_cleanup_tx_ring - cleans up the TX ring
- * @t: timer context used to obtain the pointer to net card data structure
- *
- * spider_net_cleanup_tx_ring is called by either the tx_timer
- * or from the NAPI polling routine.
- * This routine releases resources associted with transmitted
- * packets, including updating the queue tail pointer.
- */
-static void
-spider_net_cleanup_tx_ring(struct timer_list *t)
-{
- struct spider_net_card *card = from_timer(card, t, tx_timer);
- if ((spider_net_release_tx_chain(card, 0) != 0) &&
- (card->netdev->flags & IFF_UP)) {
- spider_net_kick_tx_dma(card);
- netif_wake_queue(card->netdev);
- }
-}
-
-/**
- * spider_net_do_ioctl - called for device ioctls
- * @netdev: interface device structure
- * @ifr: request parameter structure for ioctl
- * @cmd: command code for ioctl
- *
- * returns 0 on success, <0 on failure. Currently, we have no special ioctls.
- * -EOPNOTSUPP is returned, if an unknown ioctl was requested
- */
-static int
-spider_net_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
- switch (cmd) {
- default:
- return -EOPNOTSUPP;
- }
-}
-
-/**
- * spider_net_pass_skb_up - takes an skb from a descriptor and passes it on
- * @descr: descriptor to process
- * @card: card structure
- *
- * Fills out skb structure and passes the data to the stack.
- * The descriptor state is not changed.
- */
-static void
-spider_net_pass_skb_up(struct spider_net_descr *descr,
- struct spider_net_card *card)
-{
- struct spider_net_hw_descr *hwdescr = descr->hwdescr;
- struct sk_buff *skb = descr->skb;
- struct net_device *netdev = card->netdev;
- u32 data_status = hwdescr->data_status;
- u32 data_error = hwdescr->data_error;
-
- skb_put(skb, hwdescr->valid_size);
-
- /* the card seems to add 2 bytes of junk in front
- * of the ethernet frame
- */
-#define SPIDER_MISALIGN 2
- skb_pull(skb, SPIDER_MISALIGN);
- skb->protocol = eth_type_trans(skb, netdev);
-
- /* checksum offload */
- skb_checksum_none_assert(skb);
- if (netdev->features & NETIF_F_RXCSUM) {
- if ( ( (data_status & SPIDER_NET_DATA_STATUS_CKSUM_MASK) ==
- SPIDER_NET_DATA_STATUS_CKSUM_MASK) &&
- !(data_error & SPIDER_NET_DATA_ERR_CKSUM_MASK))
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- }
-
- if (data_status & SPIDER_NET_VLAN_PACKET) {
- /* further enhancements: HW-accel VLAN */
- }
-
- /* update netdevice statistics */
- netdev->stats.rx_packets++;
- netdev->stats.rx_bytes += skb->len;
-
- /* pass skb up to stack */
- netif_receive_skb(skb);
-}
-
-static void show_rx_chain(struct spider_net_card *card)
-{
- struct spider_net_descr_chain *chain = &card->rx_chain;
- struct spider_net_descr *start= chain->tail;
- struct spider_net_descr *descr= start;
- struct spider_net_hw_descr *hwd = start->hwdescr;
- struct device *dev = &card->netdev->dev;
- u32 curr_desc, next_desc;
- int status;
-
- int tot = 0;
- int cnt = 0;
- int off = start - chain->ring;
- int cstat = hwd->dmac_cmd_status;
-
- dev_info(dev, "Total number of descrs=%d\n",
- chain->num_desc);
- dev_info(dev, "Chain tail located at descr=%d, status=0x%x\n",
- off, cstat);
-
- curr_desc = spider_net_read_reg(card, SPIDER_NET_GDACTDPA);
- next_desc = spider_net_read_reg(card, SPIDER_NET_GDACNEXTDA);
-
- status = cstat;
- do
- {
- hwd = descr->hwdescr;
- off = descr - chain->ring;
- status = hwd->dmac_cmd_status;
-
- if (descr == chain->head)
- dev_info(dev, "Chain head is at %d, head status=0x%x\n",
- off, status);
-
- if (curr_desc == descr->bus_addr)
- dev_info(dev, "HW curr desc (GDACTDPA) is at %d, status=0x%x\n",
- off, status);
-
- if (next_desc == descr->bus_addr)
- dev_info(dev, "HW next desc (GDACNEXTDA) is at %d, status=0x%x\n",
- off, status);
-
- if (hwd->next_descr_addr == 0)
- dev_info(dev, "chain is cut at %d\n", off);
-
- if (cstat != status) {
- int from = (chain->num_desc + off - cnt) % chain->num_desc;
- int to = (chain->num_desc + off - 1) % chain->num_desc;
- dev_info(dev, "Have %d (from %d to %d) descrs "
- "with stat=0x%08x\n", cnt, from, to, cstat);
- cstat = status;
- cnt = 0;
- }
-
- cnt ++;
- tot ++;
- descr = descr->next;
- } while (descr != start);
-
- dev_info(dev, "Last %d descrs with stat=0x%08x "
- "for a total of %d descrs\n", cnt, cstat, tot);
-
-#ifdef DEBUG
- /* Now dump the whole ring */
- descr = start;
- do
- {
- struct spider_net_hw_descr *hwd = descr->hwdescr;
- status = spider_net_get_descr_status(hwd);
- cnt = descr - chain->ring;
- dev_info(dev, "Descr %d stat=0x%08x skb=%p\n",
- cnt, status, descr->skb);
- dev_info(dev, "bus addr=%08x buf addr=%08x sz=%d\n",
- descr->bus_addr, hwd->buf_addr, hwd->buf_size);
- dev_info(dev, "next=%08x result sz=%d valid sz=%d\n",
- hwd->next_descr_addr, hwd->result_size,
- hwd->valid_size);
- dev_info(dev, "dmac=%08x data stat=%08x data err=%08x\n",
- hwd->dmac_cmd_status, hwd->data_status,
- hwd->data_error);
- dev_info(dev, "\n");
-
- descr = descr->next;
- } while (descr != start);
-#endif
-
-}
-
-/**
- * spider_net_resync_head_ptr - Advance head ptr past empty descrs
- * @card: card structure
- *
- * If the driver fails to keep up and empty the queue, then the
- * hardware wil run out of room to put incoming packets. This
- * will cause the hardware to skip descrs that are full (instead
- * of halting/retrying). Thus, once the driver runs, it wil need
- * to "catch up" to where the hardware chain pointer is at.
- */
-static void spider_net_resync_head_ptr(struct spider_net_card *card)
-{
- unsigned long flags;
- struct spider_net_descr_chain *chain = &card->rx_chain;
- struct spider_net_descr *descr;
- int i, status;
-
- /* Advance head pointer past any empty descrs */
- descr = chain->head;
- status = spider_net_get_descr_status(descr->hwdescr);
-
- if (status == SPIDER_NET_DESCR_NOT_IN_USE)
- return;
-
- spin_lock_irqsave(&chain->lock, flags);
-
- descr = chain->head;
- status = spider_net_get_descr_status(descr->hwdescr);
- for (i=0; i<chain->num_desc; i++) {
- if (status != SPIDER_NET_DESCR_CARDOWNED) break;
- descr = descr->next;
- status = spider_net_get_descr_status(descr->hwdescr);
- }
- chain->head = descr;
-
- spin_unlock_irqrestore(&chain->lock, flags);
-}
-
-static int spider_net_resync_tail_ptr(struct spider_net_card *card)
-{
- struct spider_net_descr_chain *chain = &card->rx_chain;
- struct spider_net_descr *descr;
- int i, status;
-
- /* Advance tail pointer past any empty and reaped descrs */
- descr = chain->tail;
- status = spider_net_get_descr_status(descr->hwdescr);
-
- for (i=0; i<chain->num_desc; i++) {
- if ((status != SPIDER_NET_DESCR_CARDOWNED) &&
- (status != SPIDER_NET_DESCR_NOT_IN_USE)) break;
- descr = descr->next;
- status = spider_net_get_descr_status(descr->hwdescr);
- }
- chain->tail = descr;
-
- if ((i == chain->num_desc) || (i == 0))
- return 1;
- return 0;
-}
-
-/**
- * spider_net_decode_one_descr - processes an RX descriptor
- * @card: card structure
- *
- * Returns 1 if a packet has been sent to the stack, otherwise 0.
- *
- * Processes an RX descriptor by iommu-unmapping the data buffer
- * and passing the packet up to the stack. This function is called
- * in softirq context, e.g. either bottom half from interrupt or
- * NAPI polling context.
- */
-static int
-spider_net_decode_one_descr(struct spider_net_card *card)
-{
- struct net_device *dev = card->netdev;
- struct spider_net_descr_chain *chain = &card->rx_chain;
- struct spider_net_descr *descr = chain->tail;
- struct spider_net_hw_descr *hwdescr = descr->hwdescr;
- u32 hw_buf_addr;
- int status;
-
- status = spider_net_get_descr_status(hwdescr);
-
- /* Nothing in the descriptor, or ring must be empty */
- if ((status == SPIDER_NET_DESCR_CARDOWNED) ||
- (status == SPIDER_NET_DESCR_NOT_IN_USE))
- return 0;
-
- /* descriptor definitively used -- move on tail */
- chain->tail = descr->next;
-
- /* unmap descriptor */
- hw_buf_addr = hwdescr->buf_addr;
- hwdescr->buf_addr = 0xffffffff;
- dma_unmap_single(&card->pdev->dev, hw_buf_addr, SPIDER_NET_MAX_FRAME,
- DMA_FROM_DEVICE);
-
- if ( (status == SPIDER_NET_DESCR_RESPONSE_ERROR) ||
- (status == SPIDER_NET_DESCR_PROTECTION_ERROR) ||
- (status == SPIDER_NET_DESCR_FORCE_END) ) {
- if (netif_msg_rx_err(card))
- dev_err(&dev->dev,
- "dropping RX descriptor with state %d\n", status);
- dev->stats.rx_dropped++;
- goto bad_desc;
- }
-
- if ( (status != SPIDER_NET_DESCR_COMPLETE) &&
- (status != SPIDER_NET_DESCR_FRAME_END) ) {
- if (netif_msg_rx_err(card))
- dev_err(&card->netdev->dev,
- "RX descriptor with unknown state %d\n", status);
- card->spider_stats.rx_desc_unk_state++;
- goto bad_desc;
- }
-
- /* The cases we'll throw away the packet immediately */
- if (hwdescr->data_error & SPIDER_NET_DESTROY_RX_FLAGS) {
- if (netif_msg_rx_err(card))
- dev_err(&card->netdev->dev,
- "error in received descriptor found, "
- "data_status=x%08x, data_error=x%08x\n",
- hwdescr->data_status, hwdescr->data_error);
- goto bad_desc;
- }
-
- if (hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_BAD_STATUS) {
- dev_err(&card->netdev->dev, "bad status, cmd_status=x%08x\n",
- hwdescr->dmac_cmd_status);
- pr_err("buf_addr=x%08x\n", hw_buf_addr);
- pr_err("buf_size=x%08x\n", hwdescr->buf_size);
- pr_err("next_descr_addr=x%08x\n", hwdescr->next_descr_addr);
- pr_err("result_size=x%08x\n", hwdescr->result_size);
- pr_err("valid_size=x%08x\n", hwdescr->valid_size);
- pr_err("data_status=x%08x\n", hwdescr->data_status);
- pr_err("data_error=x%08x\n", hwdescr->data_error);
- pr_err("which=%ld\n", descr - card->rx_chain.ring);
-
- card->spider_stats.rx_desc_error++;
- goto bad_desc;
- }
-
- /* Ok, we've got a packet in descr */
- spider_net_pass_skb_up(descr, card);
- descr->skb = NULL;
- hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
- return 1;
-
-bad_desc:
- if (netif_msg_rx_err(card))
- show_rx_chain(card);
- dev_kfree_skb_irq(descr->skb);
- descr->skb = NULL;
- hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
- return 0;
-}
-
-/**
- * spider_net_poll - NAPI poll function called by the stack to return packets
- * @napi: napi device structure
- * @budget: number of packets we can pass to the stack at most
- *
- * returns 0 if no more packets available to the driver/stack. Returns 1,
- * if the quota is exceeded, but the driver has still packets.
- *
- * spider_net_poll returns all packets from the rx descriptors to the stack
- * (using netif_receive_skb). If all/enough packets are up, the driver
- * reenables interrupts and returns 0. If not, 1 is returned.
- */
-static int spider_net_poll(struct napi_struct *napi, int budget)
-{
- struct spider_net_card *card = container_of(napi, struct spider_net_card, napi);
- int packets_done = 0;
-
- while (packets_done < budget) {
- if (!spider_net_decode_one_descr(card))
- break;
-
- packets_done++;
- }
-
- if ((packets_done == 0) && (card->num_rx_ints != 0)) {
- if (!spider_net_resync_tail_ptr(card))
- packets_done = budget;
- spider_net_resync_head_ptr(card);
- }
- card->num_rx_ints = 0;
-
- spider_net_refill_rx_chain(card);
- spider_net_enable_rxdmac(card);
-
- spider_net_cleanup_tx_ring(&card->tx_timer);
-
- /* if all packets are in the stack, enable interrupts and return 0 */
- /* if not, return 1 */
- if (packets_done < budget) {
- napi_complete_done(napi, packets_done);
- spider_net_rx_irq_on(card);
- card->ignore_rx_ramfull = 0;
- }
-
- return packets_done;
-}
-
-/**
- * spider_net_set_mac - sets the MAC of an interface
- * @netdev: interface device structure
- * @p: pointer to new MAC address
- *
- * Returns 0 on success, <0 on failure. Currently, we don't support this
- * and will always return EOPNOTSUPP.
- */
-static int
-spider_net_set_mac(struct net_device *netdev, void *p)
-{
- struct spider_net_card *card = netdev_priv(netdev);
- u32 macl, macu, regvalue;
- struct sockaddr *addr = p;
-
- if (!is_valid_ether_addr(addr->sa_data))
- return -EADDRNOTAVAIL;
-
- eth_hw_addr_set(netdev, addr->sa_data);
-
- /* switch off GMACTPE and GMACRPE */
- regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD);
- regvalue &= ~((1 << 5) | (1 << 6));
- spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, regvalue);
-
- /* write mac */
- macu = (netdev->dev_addr[0]<<24) + (netdev->dev_addr[1]<<16) +
- (netdev->dev_addr[2]<<8) + (netdev->dev_addr[3]);
- macl = (netdev->dev_addr[4]<<8) + (netdev->dev_addr[5]);
- spider_net_write_reg(card, SPIDER_NET_GMACUNIMACU, macu);
- spider_net_write_reg(card, SPIDER_NET_GMACUNIMACL, macl);
-
- /* switch GMACTPE and GMACRPE back on */
- regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD);
- regvalue |= ((1 << 5) | (1 << 6));
- spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, regvalue);
-
- spider_net_set_promisc(card);
-
- return 0;
-}
-
-/**
- * spider_net_link_reset
- * @netdev: net device structure
- *
- * This is called when the PHY_LINK signal is asserted. For the blade this is
- * not connected so we should never get here.
- *
- */
-static void
-spider_net_link_reset(struct net_device *netdev)
-{
-
- struct spider_net_card *card = netdev_priv(netdev);
-
- del_timer_sync(&card->aneg_timer);
-
- /* clear interrupt, block further interrupts */
- spider_net_write_reg(card, SPIDER_NET_GMACST,
- spider_net_read_reg(card, SPIDER_NET_GMACST));
- spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0);
-
- /* reset phy and setup aneg */
- card->aneg_count = 0;
- card->medium = BCM54XX_COPPER;
- spider_net_setup_aneg(card);
- mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
-
-}
-
-/**
- * spider_net_handle_error_irq - handles errors raised by an interrupt
- * @card: card structure
- * @status_reg: interrupt status register 0 (GHIINT0STS)
- * @error_reg1: interrupt status register 1 (GHIINT1STS)
- * @error_reg2: interrupt status register 2 (GHIINT2STS)
- *
- * spider_net_handle_error_irq treats or ignores all error conditions
- * found when an interrupt is presented
- */
-static void
-spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg,
- u32 error_reg1, u32 error_reg2)
-{
- u32 i;
- int show_error = 1;
-
- /* check GHIINT0STS ************************************/
- if (status_reg)
- for (i = 0; i < 32; i++)
- if (status_reg & (1<<i))
- switch (i)
- {
- /* let error_reg1 and error_reg2 evaluation decide, what to do
- case SPIDER_NET_PHYINT:
- case SPIDER_NET_GMAC2INT:
- case SPIDER_NET_GMAC1INT:
- case SPIDER_NET_GFIFOINT:
- case SPIDER_NET_DMACINT:
- case SPIDER_NET_GSYSINT:
- break; */
-
- case SPIDER_NET_GIPSINT:
- show_error = 0;
- break;
-
- case SPIDER_NET_GPWOPCMPINT:
- /* PHY write operation completed */
- show_error = 0;
- break;
- case SPIDER_NET_GPROPCMPINT:
- /* PHY read operation completed */
- /* we don't use semaphores, as we poll for the completion
- * of the read operation in spider_net_read_phy. Should take
- * about 50 us
- */
- show_error = 0;
- break;
- case SPIDER_NET_GPWFFINT:
- /* PHY command queue full */
- if (netif_msg_intr(card))
- dev_err(&card->netdev->dev, "PHY write queue full\n");
- show_error = 0;
- break;
-
- /* case SPIDER_NET_GRMDADRINT: not used. print a message */
- /* case SPIDER_NET_GRMARPINT: not used. print a message */
- /* case SPIDER_NET_GRMMPINT: not used. print a message */
-
- case SPIDER_NET_GDTDEN0INT:
- /* someone has set TX_DMA_EN to 0 */
- show_error = 0;
- break;
-
- case SPIDER_NET_GDDDEN0INT:
- case SPIDER_NET_GDCDEN0INT:
- case SPIDER_NET_GDBDEN0INT:
- case SPIDER_NET_GDADEN0INT:
- /* someone has set RX_DMA_EN to 0 */
- show_error = 0;
- break;
-
- /* RX interrupts */
- case SPIDER_NET_GDDFDCINT:
- case SPIDER_NET_GDCFDCINT:
- case SPIDER_NET_GDBFDCINT:
- case SPIDER_NET_GDAFDCINT:
- /* case SPIDER_NET_GDNMINT: not used. print a message */
- /* case SPIDER_NET_GCNMINT: not used. print a message */
- /* case SPIDER_NET_GBNMINT: not used. print a message */
- /* case SPIDER_NET_GANMINT: not used. print a message */
- /* case SPIDER_NET_GRFNMINT: not used. print a message */
- show_error = 0;
- break;
-
- /* TX interrupts */
- case SPIDER_NET_GDTFDCINT:
- show_error = 0;
- break;
- case SPIDER_NET_GTTEDINT:
- show_error = 0;
- break;
- case SPIDER_NET_GDTDCEINT:
- /* chain end. If a descriptor should be sent, kick off
- * tx dma
- if (card->tx_chain.tail != card->tx_chain.head)
- spider_net_kick_tx_dma(card);
- */
- show_error = 0;
- break;
-
- /* case SPIDER_NET_G1TMCNTINT: not used. print a message */
- /* case SPIDER_NET_GFREECNTINT: not used. print a message */
- }
-
- /* check GHIINT1STS ************************************/
- if (error_reg1)
- for (i = 0; i < 32; i++)
- if (error_reg1 & (1<<i))
- switch (i)
- {
- case SPIDER_NET_GTMFLLINT:
- /* TX RAM full may happen on a usual case.
- * Logging is not needed.
- */
- show_error = 0;
- break;
- case SPIDER_NET_GRFDFLLINT:
- case SPIDER_NET_GRFCFLLINT:
- case SPIDER_NET_GRFBFLLINT:
- case SPIDER_NET_GRFAFLLINT:
- case SPIDER_NET_GRMFLLINT:
- /* Could happen when rx chain is full */
- if (card->ignore_rx_ramfull == 0) {
- card->ignore_rx_ramfull = 1;
- spider_net_resync_head_ptr(card);
- spider_net_refill_rx_chain(card);
- spider_net_enable_rxdmac(card);
- card->num_rx_ints ++;
- napi_schedule(&card->napi);
- }
- show_error = 0;
- break;
-
- /* case SPIDER_NET_GTMSHTINT: problem, print a message */
- case SPIDER_NET_GDTINVDINT:
- /* allrighty. tx from previous descr ok */
- show_error = 0;
- break;
-
- /* chain end */
- case SPIDER_NET_GDDDCEINT:
- case SPIDER_NET_GDCDCEINT:
- case SPIDER_NET_GDBDCEINT:
- case SPIDER_NET_GDADCEINT:
- spider_net_resync_head_ptr(card);
- spider_net_refill_rx_chain(card);
- spider_net_enable_rxdmac(card);
- card->num_rx_ints ++;
- napi_schedule(&card->napi);
- show_error = 0;
- break;
-
- /* invalid descriptor */
- case SPIDER_NET_GDDINVDINT:
- case SPIDER_NET_GDCINVDINT:
- case SPIDER_NET_GDBINVDINT:
- case SPIDER_NET_GDAINVDINT:
- /* Could happen when rx chain is full */
- spider_net_resync_head_ptr(card);
- spider_net_refill_rx_chain(card);
- spider_net_enable_rxdmac(card);
- card->num_rx_ints ++;
- napi_schedule(&card->napi);
- show_error = 0;
- break;
-
- /* case SPIDER_NET_GDTRSERINT: problem, print a message */
- /* case SPIDER_NET_GDDRSERINT: problem, print a message */
- /* case SPIDER_NET_GDCRSERINT: problem, print a message */
- /* case SPIDER_NET_GDBRSERINT: problem, print a message */
- /* case SPIDER_NET_GDARSERINT: problem, print a message */
- /* case SPIDER_NET_GDSERINT: problem, print a message */
- /* case SPIDER_NET_GDTPTERINT: problem, print a message */
- /* case SPIDER_NET_GDDPTERINT: problem, print a message */
- /* case SPIDER_NET_GDCPTERINT: problem, print a message */
- /* case SPIDER_NET_GDBPTERINT: problem, print a message */
- /* case SPIDER_NET_GDAPTERINT: problem, print a message */
- default:
- show_error = 1;
- break;
- }
-
- /* check GHIINT2STS ************************************/
- if (error_reg2)
- for (i = 0; i < 32; i++)
- if (error_reg2 & (1<<i))
- switch (i)
- {
- /* there is nothing we can (want to) do at this time. Log a
- * message, we can switch on and off the specific values later on
- case SPIDER_NET_GPROPERINT:
- case SPIDER_NET_GMCTCRSNGINT:
- case SPIDER_NET_GMCTLCOLINT:
- case SPIDER_NET_GMCTTMOTINT:
- case SPIDER_NET_GMCRCAERINT:
- case SPIDER_NET_GMCRCALERINT:
- case SPIDER_NET_GMCRALNERINT:
- case SPIDER_NET_GMCROVRINT:
- case SPIDER_NET_GMCRRNTINT:
- case SPIDER_NET_GMCRRXERINT:
- case SPIDER_NET_GTITCSERINT:
- case SPIDER_NET_GTIFMTERINT:
- case SPIDER_NET_GTIPKTRVKINT:
- case SPIDER_NET_GTISPINGINT:
- case SPIDER_NET_GTISADNGINT:
- case SPIDER_NET_GTISPDNGINT:
- case SPIDER_NET_GRIFMTERINT:
- case SPIDER_NET_GRIPKTRVKINT:
- case SPIDER_NET_GRISPINGINT:
- case SPIDER_NET_GRISADNGINT:
- case SPIDER_NET_GRISPDNGINT:
- break;
- */
- default:
- break;
- }
-
- if ((show_error) && (netif_msg_intr(card)) && net_ratelimit())
- dev_err(&card->netdev->dev, "Error interrupt, GHIINT0STS = 0x%08x, "
- "GHIINT1STS = 0x%08x, GHIINT2STS = 0x%08x\n",
- status_reg, error_reg1, error_reg2);
-
- /* clear interrupt sources */
- spider_net_write_reg(card, SPIDER_NET_GHIINT1STS, error_reg1);
- spider_net_write_reg(card, SPIDER_NET_GHIINT2STS, error_reg2);
-}
-
-/**
- * spider_net_interrupt - interrupt handler for spider_net
- * @irq: interrupt number
- * @ptr: pointer to net_device
- *
- * returns IRQ_HANDLED, if interrupt was for driver, or IRQ_NONE, if no
- * interrupt found raised by card.
- *
- * This is the interrupt handler, that turns off
- * interrupts for this device and makes the stack poll the driver
- */
-static irqreturn_t
-spider_net_interrupt(int irq, void *ptr)
-{
- struct net_device *netdev = ptr;
- struct spider_net_card *card = netdev_priv(netdev);
- u32 status_reg, error_reg1, error_reg2;
-
- status_reg = spider_net_read_reg(card, SPIDER_NET_GHIINT0STS);
- error_reg1 = spider_net_read_reg(card, SPIDER_NET_GHIINT1STS);
- error_reg2 = spider_net_read_reg(card, SPIDER_NET_GHIINT2STS);
-
- if (!(status_reg & SPIDER_NET_INT0_MASK_VALUE) &&
- !(error_reg1 & SPIDER_NET_INT1_MASK_VALUE) &&
- !(error_reg2 & SPIDER_NET_INT2_MASK_VALUE))
- return IRQ_NONE;
-
- if (status_reg & SPIDER_NET_RXINT ) {
- spider_net_rx_irq_off(card);
- napi_schedule(&card->napi);
- card->num_rx_ints ++;
- }
- if (status_reg & SPIDER_NET_TXINT)
- napi_schedule(&card->napi);
-
- if (status_reg & SPIDER_NET_LINKINT)
- spider_net_link_reset(netdev);
-
- if (status_reg & SPIDER_NET_ERRINT )
- spider_net_handle_error_irq(card, status_reg,
- error_reg1, error_reg2);
-
- /* clear interrupt sources */
- spider_net_write_reg(card, SPIDER_NET_GHIINT0STS, status_reg);
-
- return IRQ_HANDLED;
-}
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/**
- * spider_net_poll_controller - artificial interrupt for netconsole etc.
- * @netdev: interface device structure
- *
- * see Documentation/networking/netconsole.rst
- */
-static void
-spider_net_poll_controller(struct net_device *netdev)
-{
- disable_irq(netdev->irq);
- spider_net_interrupt(netdev->irq, netdev);
- enable_irq(netdev->irq);
-}
-#endif /* CONFIG_NET_POLL_CONTROLLER */
-
-/**
- * spider_net_enable_interrupts - enable interrupts
- * @card: card structure
- *
- * spider_net_enable_interrupt enables several interrupts
- */
-static void
-spider_net_enable_interrupts(struct spider_net_card *card)
-{
- spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK,
- SPIDER_NET_INT0_MASK_VALUE);
- spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK,
- SPIDER_NET_INT1_MASK_VALUE);
- spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK,
- SPIDER_NET_INT2_MASK_VALUE);
-}
-
-/**
- * spider_net_disable_interrupts - disable interrupts
- * @card: card structure
- *
- * spider_net_disable_interrupts disables all the interrupts
- */
-static void
-spider_net_disable_interrupts(struct spider_net_card *card)
-{
- spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0);
- spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0);
- spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0);
- spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0);
-}
-
-/**
- * spider_net_init_card - initializes the card
- * @card: card structure
- *
- * spider_net_init_card initializes the card so that other registers can
- * be used
- */
-static void
-spider_net_init_card(struct spider_net_card *card)
-{
- spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
- SPIDER_NET_CKRCTRL_STOP_VALUE);
-
- spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
- SPIDER_NET_CKRCTRL_RUN_VALUE);
-
- /* trigger ETOMOD signal */
- spider_net_write_reg(card, SPIDER_NET_GMACOPEMD,
- spider_net_read_reg(card, SPIDER_NET_GMACOPEMD) | 0x4);
-
- spider_net_disable_interrupts(card);
-}
-
-/**
- * spider_net_enable_card - enables the card by setting all kinds of regs
- * @card: card structure
- *
- * spider_net_enable_card sets a lot of SMMIO registers to enable the device
- */
-static void
-spider_net_enable_card(struct spider_net_card *card)
-{
- int i;
- /* the following array consists of (register),(value) pairs
- * that are set in this function. A register of 0 ends the list
- */
- u32 regs[][2] = {
- { SPIDER_NET_GRESUMINTNUM, 0 },
- { SPIDER_NET_GREINTNUM, 0 },
-
- /* set interrupt frame number registers */
- /* clear the single DMA engine registers first */
- { SPIDER_NET_GFAFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
- { SPIDER_NET_GFBFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
- { SPIDER_NET_GFCFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
- { SPIDER_NET_GFDFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
- /* then set, what we really need */
- { SPIDER_NET_GFFRMNUM, SPIDER_NET_FRAMENUM_VALUE },
-
- /* timer counter registers and stuff */
- { SPIDER_NET_GFREECNNUM, 0 },
- { SPIDER_NET_GONETIMENUM, 0 },
- { SPIDER_NET_GTOUTFRMNUM, 0 },
-
- /* RX mode setting */
- { SPIDER_NET_GRXMDSET, SPIDER_NET_RXMODE_VALUE },
- /* TX mode setting */
- { SPIDER_NET_GTXMDSET, SPIDER_NET_TXMODE_VALUE },
- /* IPSEC mode setting */
- { SPIDER_NET_GIPSECINIT, SPIDER_NET_IPSECINIT_VALUE },
-
- { SPIDER_NET_GFTRESTRT, SPIDER_NET_RESTART_VALUE },
-
- { SPIDER_NET_GMRWOLCTRL, 0 },
- { SPIDER_NET_GTESTMD, 0x10000000 },
- { SPIDER_NET_GTTQMSK, 0x00400040 },
-
- { SPIDER_NET_GMACINTEN, 0 },
-
- /* flow control stuff */
- { SPIDER_NET_GMACAPAUSE, SPIDER_NET_MACAPAUSE_VALUE },
- { SPIDER_NET_GMACTXPAUSE, SPIDER_NET_TXPAUSE_VALUE },
-
- { SPIDER_NET_GMACBSTLMT, SPIDER_NET_BURSTLMT_VALUE },
- { 0, 0}
- };
-
- i = 0;
- while (regs[i][0]) {
- spider_net_write_reg(card, regs[i][0], regs[i][1]);
- i++;
- }
-
- /* clear unicast filter table entries 1 to 14 */
- for (i = 1; i <= 14; i++) {
- spider_net_write_reg(card,
- SPIDER_NET_GMRUAFILnR + i * 8,
- 0x00080000);
- spider_net_write_reg(card,
- SPIDER_NET_GMRUAFILnR + i * 8 + 4,
- 0x00000000);
- }
-
- spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R, 0x08080000);
-
- spider_net_write_reg(card, SPIDER_NET_ECMODE, SPIDER_NET_ECMODE_VALUE);
-
- /* set chain tail address for RX chains and
- * enable DMA
- */
- spider_net_enable_rxchtails(card);
- spider_net_enable_rxdmac(card);
-
- spider_net_write_reg(card, SPIDER_NET_GRXDMAEN, SPIDER_NET_WOL_VALUE);
-
- spider_net_write_reg(card, SPIDER_NET_GMACLENLMT,
- SPIDER_NET_LENLMT_VALUE);
- spider_net_write_reg(card, SPIDER_NET_GMACOPEMD,
- SPIDER_NET_OPMODE_VALUE);
-
- spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
- SPIDER_NET_GDTBSTA);
-}
-
-/**
- * spider_net_download_firmware - loads firmware into the adapter
- * @card: card structure
- * @firmware_ptr: pointer to firmware data
- *
- * spider_net_download_firmware loads the firmware data into the
- * adapter. It assumes the length etc. to be allright.
- */
-static int
-spider_net_download_firmware(struct spider_net_card *card,
- const void *firmware_ptr)
-{
- int sequencer, i;
- const u32 *fw_ptr = firmware_ptr;
-
- /* stop sequencers */
- spider_net_write_reg(card, SPIDER_NET_GSINIT,
- SPIDER_NET_STOP_SEQ_VALUE);
-
- for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
- sequencer++) {
- spider_net_write_reg(card,
- SPIDER_NET_GSnPRGADR + sequencer * 8, 0);
- for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
- spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
- sequencer * 8, *fw_ptr);
- fw_ptr++;
- }
- }
-
- if (spider_net_read_reg(card, SPIDER_NET_GSINIT))
- return -EIO;
-
- spider_net_write_reg(card, SPIDER_NET_GSINIT,
- SPIDER_NET_RUN_SEQ_VALUE);
-
- return 0;
-}
-
-/**
- * spider_net_init_firmware - reads in firmware parts
- * @card: card structure
- *
- * Returns 0 on success, <0 on failure
- *
- * spider_net_init_firmware opens the sequencer firmware and does some basic
- * checks. This function opens and releases the firmware structure. A call
- * to download the firmware is performed before the release.
- *
- * Firmware format
- * ===============
- * spider_fw.bin is expected to be a file containing 6*1024*4 bytes, 4k being
- * the program for each sequencer. Use the command
- * tail -q -n +2 Seq_code1_0x088.txt Seq_code2_0x090.txt \
- * Seq_code3_0x098.txt Seq_code4_0x0A0.txt Seq_code5_0x0A8.txt \
- * Seq_code6_0x0B0.txt | xxd -r -p -c4 > spider_fw.bin
- *
- * to generate spider_fw.bin, if you have sequencer programs with something
- * like the following contents for each sequencer:
- * <ONE LINE COMMENT>
- * <FIRST 4-BYTES-WORD FOR SEQUENCER>
- * <SECOND 4-BYTES-WORD FOR SEQUENCER>
- * ...
- * <1024th 4-BYTES-WORD FOR SEQUENCER>
- */
-static int
-spider_net_init_firmware(struct spider_net_card *card)
-{
- struct firmware *firmware = NULL;
- struct device_node *dn;
- const u8 *fw_prop = NULL;
- int err = -ENOENT;
- int fw_size;
-
- if (request_firmware((const struct firmware **)&firmware,
- SPIDER_NET_FIRMWARE_NAME, &card->pdev->dev) == 0) {
- if ( (firmware->size != SPIDER_NET_FIRMWARE_LEN) &&
- netif_msg_probe(card) ) {
- dev_err(&card->netdev->dev,
- "Incorrect size of spidernet firmware in " \
- "filesystem. Looking in host firmware...\n");
- goto try_host_fw;
- }
- err = spider_net_download_firmware(card, firmware->data);
-
- release_firmware(firmware);
- if (err)
- goto try_host_fw;
-
- goto done;
- }
-
-try_host_fw:
- dn = pci_device_to_OF_node(card->pdev);
- if (!dn)
- goto out_err;
-
- fw_prop = of_get_property(dn, "firmware", &fw_size);
- if (!fw_prop)
- goto out_err;
-
- if ( (fw_size != SPIDER_NET_FIRMWARE_LEN) &&
- netif_msg_probe(card) ) {
- dev_err(&card->netdev->dev,
- "Incorrect size of spidernet firmware in host firmware\n");
- goto done;
- }
-
- err = spider_net_download_firmware(card, fw_prop);
-
-done:
- return err;
-out_err:
- if (netif_msg_probe(card))
- dev_err(&card->netdev->dev,
- "Couldn't find spidernet firmware in filesystem " \
- "or host firmware\n");
- return err;
-}
-
-/**
- * spider_net_open - called upon ifonfig up
- * @netdev: interface device structure
- *
- * returns 0 on success, <0 on failure
- *
- * spider_net_open allocates all the descriptors and memory needed for
- * operation, sets up multicast list and enables interrupts
- */
-int
-spider_net_open(struct net_device *netdev)
-{
- struct spider_net_card *card = netdev_priv(netdev);
- int result;
-
- result = spider_net_init_firmware(card);
- if (result)
- goto init_firmware_failed;
-
- /* start probing with copper */
- card->aneg_count = 0;
- card->medium = BCM54XX_COPPER;
- spider_net_setup_aneg(card);
- if (card->phy.def->phy_id)
- mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
-
- result = spider_net_init_chain(card, &card->tx_chain);
- if (result)
- goto alloc_tx_failed;
- card->low_watermark = NULL;
-
- result = spider_net_init_chain(card, &card->rx_chain);
- if (result)
- goto alloc_rx_failed;
-
- /* Allocate rx skbs */
- result = spider_net_alloc_rx_skbs(card);
- if (result)
- goto alloc_skbs_failed;
-
- spider_net_set_multi(netdev);
-
- /* further enhancement: setup hw vlan, if needed */
-
- result = -EBUSY;
- if (request_irq(netdev->irq, spider_net_interrupt,
- IRQF_SHARED, netdev->name, netdev))
- goto register_int_failed;
-
- spider_net_enable_card(card);
-
- netif_start_queue(netdev);
- netif_carrier_on(netdev);
- napi_enable(&card->napi);
-
- spider_net_enable_interrupts(card);
-
- return 0;
-
-register_int_failed:
- spider_net_free_rx_chain_contents(card);
-alloc_skbs_failed:
- spider_net_free_chain(card, &card->rx_chain);
-alloc_rx_failed:
- spider_net_free_chain(card, &card->tx_chain);
-alloc_tx_failed:
- del_timer_sync(&card->aneg_timer);
-init_firmware_failed:
- return result;
-}
-
-/**
- * spider_net_link_phy
- * @t: timer context used to obtain the pointer to net card data structure
- */
-static void spider_net_link_phy(struct timer_list *t)
-{
- struct spider_net_card *card = from_timer(card, t, aneg_timer);
- struct mii_phy *phy = &card->phy;
-
- /* if link didn't come up after SPIDER_NET_ANEG_TIMEOUT tries, setup phy again */
- if (card->aneg_count > SPIDER_NET_ANEG_TIMEOUT) {
-
- pr_debug("%s: link is down trying to bring it up\n",
- card->netdev->name);
-
- switch (card->medium) {
- case BCM54XX_COPPER:
- /* enable fiber with autonegotiation first */
- if (phy->def->ops->enable_fiber)
- phy->def->ops->enable_fiber(phy, 1);
- card->medium = BCM54XX_FIBER;
- break;
-
- case BCM54XX_FIBER:
- /* fiber didn't come up, try to disable fiber autoneg */
- if (phy->def->ops->enable_fiber)
- phy->def->ops->enable_fiber(phy, 0);
- card->medium = BCM54XX_UNKNOWN;
- break;
-
- case BCM54XX_UNKNOWN:
- /* copper, fiber with and without failed,
- * retry from beginning
- */
- spider_net_setup_aneg(card);
- card->medium = BCM54XX_COPPER;
- break;
- }
-
- card->aneg_count = 0;
- mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
- return;
- }
-
- /* link still not up, try again later */
- if (!(phy->def->ops->poll_link(phy))) {
- card->aneg_count++;
- mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
- return;
- }
-
- /* link came up, get abilities */
- phy->def->ops->read_link(phy);
-
- spider_net_write_reg(card, SPIDER_NET_GMACST,
- spider_net_read_reg(card, SPIDER_NET_GMACST));
- spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0x4);
-
- if (phy->speed == 1000)
- spider_net_write_reg(card, SPIDER_NET_GMACMODE, 0x00000001);
- else
- spider_net_write_reg(card, SPIDER_NET_GMACMODE, 0);
-
- card->aneg_count = 0;
-
- pr_info("%s: link up, %i Mbps, %s-duplex %sautoneg.\n",
- card->netdev->name, phy->speed,
- phy->duplex == 1 ? "Full" : "Half",
- phy->autoneg == 1 ? "" : "no ");
-}
-
-/**
- * spider_net_setup_phy - setup PHY
- * @card: card structure
- *
- * returns 0 on success, <0 on failure
- *
- * spider_net_setup_phy is used as part of spider_net_probe.
- **/
-static int
-spider_net_setup_phy(struct spider_net_card *card)
-{
- struct mii_phy *phy = &card->phy;
-
- spider_net_write_reg(card, SPIDER_NET_GDTDMASEL,
- SPIDER_NET_DMASEL_VALUE);
- spider_net_write_reg(card, SPIDER_NET_GPCCTRL,
- SPIDER_NET_PHY_CTRL_VALUE);
-
- phy->dev = card->netdev;
- phy->mdio_read = spider_net_read_phy;
- phy->mdio_write = spider_net_write_phy;
-
- for (phy->mii_id = 1; phy->mii_id <= 31; phy->mii_id++) {
- unsigned short id;
- id = spider_net_read_phy(card->netdev, phy->mii_id, MII_BMSR);
- if (id != 0x0000 && id != 0xffff) {
- if (!sungem_phy_probe(phy, phy->mii_id)) {
- pr_info("Found %s.\n", phy->def->name);
- break;
- }
- }
- }
-
- return 0;
-}
-
-/**
- * spider_net_workaround_rxramfull - work around firmware bug
- * @card: card structure
- *
- * no return value
- **/
-static void
-spider_net_workaround_rxramfull(struct spider_net_card *card)
-{
- int i, sequencer = 0;
-
- /* cancel reset */
- spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
- SPIDER_NET_CKRCTRL_RUN_VALUE);
-
- /* empty sequencer data */
- for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
- sequencer++) {
- spider_net_write_reg(card, SPIDER_NET_GSnPRGADR +
- sequencer * 8, 0x0);
- for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
- spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
- sequencer * 8, 0x0);
- }
- }
-
- /* set sequencer operation */
- spider_net_write_reg(card, SPIDER_NET_GSINIT, 0x000000fe);
-
- /* reset */
- spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
- SPIDER_NET_CKRCTRL_STOP_VALUE);
-}
-
-/**
- * spider_net_stop - called upon ifconfig down
- * @netdev: interface device structure
- *
- * always returns 0
- */
-int
-spider_net_stop(struct net_device *netdev)
-{
- struct spider_net_card *card = netdev_priv(netdev);
-
- napi_disable(&card->napi);
- netif_carrier_off(netdev);
- netif_stop_queue(netdev);
- del_timer_sync(&card->tx_timer);
- del_timer_sync(&card->aneg_timer);
-
- spider_net_disable_interrupts(card);
-
- free_irq(netdev->irq, netdev);
-
- spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
- SPIDER_NET_DMA_TX_FEND_VALUE);
-
- /* turn off DMA, force end */
- spider_net_disable_rxdmac(card);
-
- /* release chains */
- spider_net_release_tx_chain(card, 1);
- spider_net_free_rx_chain_contents(card);
-
- spider_net_free_chain(card, &card->tx_chain);
- spider_net_free_chain(card, &card->rx_chain);
-
- return 0;
-}
-
-/**
- * spider_net_tx_timeout_task - task scheduled by the watchdog timeout
- * function (to be called not under interrupt status)
- * @work: work context used to obtain the pointer to net card data structure
- *
- * called as task when tx hangs, resets interface (if interface is up)
- */
-static void
-spider_net_tx_timeout_task(struct work_struct *work)
-{
- struct spider_net_card *card =
- container_of(work, struct spider_net_card, tx_timeout_task);
- struct net_device *netdev = card->netdev;
-
- if (!(netdev->flags & IFF_UP))
- goto out;
-
- netif_device_detach(netdev);
- spider_net_stop(netdev);
-
- spider_net_workaround_rxramfull(card);
- spider_net_init_card(card);
-
- if (spider_net_setup_phy(card))
- goto out;
-
- spider_net_open(netdev);
- spider_net_kick_tx_dma(card);
- netif_device_attach(netdev);
-
-out:
- atomic_dec(&card->tx_timeout_task_counter);
-}
-
-/**
- * spider_net_tx_timeout - called when the tx timeout watchdog kicks in.
- * @netdev: interface device structure
- * @txqueue: unused
- *
- * called, if tx hangs. Schedules a task that resets the interface
- */
-static void
-spider_net_tx_timeout(struct net_device *netdev, unsigned int txqueue)
-{
- struct spider_net_card *card;
-
- card = netdev_priv(netdev);
- atomic_inc(&card->tx_timeout_task_counter);
- if (netdev->flags & IFF_UP)
- schedule_work(&card->tx_timeout_task);
- else
- atomic_dec(&card->tx_timeout_task_counter);
- card->spider_stats.tx_timeouts++;
-}
-
-static const struct net_device_ops spider_net_ops = {
- .ndo_open = spider_net_open,
- .ndo_stop = spider_net_stop,
- .ndo_start_xmit = spider_net_xmit,
- .ndo_set_rx_mode = spider_net_set_multi,
- .ndo_set_mac_address = spider_net_set_mac,
- .ndo_eth_ioctl = spider_net_do_ioctl,
- .ndo_tx_timeout = spider_net_tx_timeout,
- .ndo_validate_addr = eth_validate_addr,
- /* HW VLAN */
-#ifdef CONFIG_NET_POLL_CONTROLLER
- /* poll controller */
- .ndo_poll_controller = spider_net_poll_controller,
-#endif /* CONFIG_NET_POLL_CONTROLLER */
-};
-
-/**
- * spider_net_setup_netdev_ops - initialization of net_device operations
- * @netdev: net_device structure
- *
- * fills out function pointers in the net_device structure
- */
-static void
-spider_net_setup_netdev_ops(struct net_device *netdev)
-{
- netdev->netdev_ops = &spider_net_ops;
- netdev->watchdog_timeo = SPIDER_NET_WATCHDOG_TIMEOUT;
- /* ethtool ops */
- netdev->ethtool_ops = &spider_net_ethtool_ops;
-}
-
-/**
- * spider_net_setup_netdev - initialization of net_device
- * @card: card structure
- *
- * Returns 0 on success or <0 on failure
- *
- * spider_net_setup_netdev initializes the net_device structure
- **/
-static int
-spider_net_setup_netdev(struct spider_net_card *card)
-{
- int result;
- struct net_device *netdev = card->netdev;
- struct device_node *dn;
- struct sockaddr addr;
- const u8 *mac;
-
- SET_NETDEV_DEV(netdev, &card->pdev->dev);
-
- pci_set_drvdata(card->pdev, netdev);
-
- timer_setup(&card->tx_timer, spider_net_cleanup_tx_ring, 0);
- netdev->irq = card->pdev->irq;
-
- card->aneg_count = 0;
- timer_setup(&card->aneg_timer, spider_net_link_phy, 0);
-
- netif_napi_add(netdev, &card->napi, spider_net_poll);
-
- spider_net_setup_netdev_ops(netdev);
-
- netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
- if (SPIDER_NET_RX_CSUM_DEFAULT)
- netdev->features |= NETIF_F_RXCSUM;
- netdev->features |= NETIF_F_IP_CSUM;
- /* some time: NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
- * NETIF_F_HW_VLAN_CTAG_FILTER
- */
- netdev->lltx = true;
-
- /* MTU range: 64 - 2294 */
- netdev->min_mtu = SPIDER_NET_MIN_MTU;
- netdev->max_mtu = SPIDER_NET_MAX_MTU;
-
- netdev->irq = card->pdev->irq;
- card->num_rx_ints = 0;
- card->ignore_rx_ramfull = 0;
-
- dn = pci_device_to_OF_node(card->pdev);
- if (!dn)
- return -EIO;
-
- mac = of_get_property(dn, "local-mac-address", NULL);
- if (!mac)
- return -EIO;
- memcpy(addr.sa_data, mac, ETH_ALEN);
-
- result = spider_net_set_mac(netdev, &addr);
- if ((result) && (netif_msg_probe(card)))
- dev_err(&card->netdev->dev,
- "Failed to set MAC address: %i\n", result);
-
- result = register_netdev(netdev);
- if (result) {
- if (netif_msg_probe(card))
- dev_err(&card->netdev->dev,
- "Couldn't register net_device: %i\n", result);
- return result;
- }
-
- if (netif_msg_probe(card))
- pr_info("Initialized device %s.\n", netdev->name);
-
- return 0;
-}
-
-/**
- * spider_net_alloc_card - allocates net_device and card structure
- *
- * returns the card structure or NULL in case of errors
- *
- * the card and net_device structures are linked to each other
- */
-static struct spider_net_card *
-spider_net_alloc_card(void)
-{
- struct net_device *netdev;
- struct spider_net_card *card;
-
- netdev = alloc_etherdev(struct_size(card, darray,
- size_add(tx_descriptors, rx_descriptors)));
- if (!netdev)
- return NULL;
-
- card = netdev_priv(netdev);
- card->netdev = netdev;
- card->msg_enable = SPIDER_NET_DEFAULT_MSG;
- INIT_WORK(&card->tx_timeout_task, spider_net_tx_timeout_task);
- init_waitqueue_head(&card->waitq);
- atomic_set(&card->tx_timeout_task_counter, 0);
-
- card->rx_chain.num_desc = rx_descriptors;
- card->rx_chain.ring = card->darray;
- card->tx_chain.num_desc = tx_descriptors;
- card->tx_chain.ring = card->darray + rx_descriptors;
-
- return card;
-}
-
-/**
- * spider_net_undo_pci_setup - releases PCI ressources
- * @card: card structure
- *
- * spider_net_undo_pci_setup releases the mapped regions
- */
-static void
-spider_net_undo_pci_setup(struct spider_net_card *card)
-{
- iounmap(card->regs);
- pci_release_regions(card->pdev);
-}
-
-/**
- * spider_net_setup_pci_dev - sets up the device in terms of PCI operations
- * @pdev: PCI device
- *
- * Returns the card structure or NULL if any errors occur
- *
- * spider_net_setup_pci_dev initializes pdev and together with the
- * functions called in spider_net_open configures the device so that
- * data can be transferred over it
- * The net_device structure is attached to the card structure, if the
- * function returns without error.
- **/
-static struct spider_net_card *
-spider_net_setup_pci_dev(struct pci_dev *pdev)
-{
- struct spider_net_card *card;
- unsigned long mmio_start, mmio_len;
-
- if (pci_enable_device(pdev)) {
- dev_err(&pdev->dev, "Couldn't enable PCI device\n");
- return NULL;
- }
-
- if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
- dev_err(&pdev->dev,
- "Couldn't find proper PCI device base address.\n");
- goto out_disable_dev;
- }
-
- if (pci_request_regions(pdev, spider_net_driver_name)) {
- dev_err(&pdev->dev,
- "Couldn't obtain PCI resources, aborting.\n");
- goto out_disable_dev;
- }
-
- pci_set_master(pdev);
-
- card = spider_net_alloc_card();
- if (!card) {
- dev_err(&pdev->dev,
- "Couldn't allocate net_device structure, aborting.\n");
- goto out_release_regions;
- }
- card->pdev = pdev;
-
- /* fetch base address and length of first resource */
- mmio_start = pci_resource_start(pdev, 0);
- mmio_len = pci_resource_len(pdev, 0);
-
- card->netdev->mem_start = mmio_start;
- card->netdev->mem_end = mmio_start + mmio_len;
- card->regs = ioremap(mmio_start, mmio_len);
-
- if (!card->regs) {
- dev_err(&pdev->dev,
- "Couldn't obtain PCI resources, aborting.\n");
- goto out_release_regions;
- }
-
- return card;
-
-out_release_regions:
- pci_release_regions(pdev);
-out_disable_dev:
- pci_disable_device(pdev);
- return NULL;
-}
-
-/**
- * spider_net_probe - initialization of a device
- * @pdev: PCI device
- * @ent: entry in the device id list
- *
- * Returns 0 on success, <0 on failure
- *
- * spider_net_probe initializes pdev and registers a net_device
- * structure for it. After that, the device can be ifconfig'ed up
- **/
-static int
-spider_net_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- int err = -EIO;
- struct spider_net_card *card;
-
- card = spider_net_setup_pci_dev(pdev);
- if (!card)
- goto out;
-
- spider_net_workaround_rxramfull(card);
- spider_net_init_card(card);
-
- err = spider_net_setup_phy(card);
- if (err)
- goto out_undo_pci;
-
- err = spider_net_setup_netdev(card);
- if (err)
- goto out_undo_pci;
-
- return 0;
-
-out_undo_pci:
- spider_net_undo_pci_setup(card);
- free_netdev(card->netdev);
-out:
- return err;
-}
-
-/**
- * spider_net_remove - removal of a device
- * @pdev: PCI device
- *
- * Returns 0 on success, <0 on failure
- *
- * spider_net_remove is called to remove the device and unregisters the
- * net_device
- **/
-static void
-spider_net_remove(struct pci_dev *pdev)
-{
- struct net_device *netdev;
- struct spider_net_card *card;
-
- netdev = pci_get_drvdata(pdev);
- card = netdev_priv(netdev);
-
- wait_event(card->waitq,
- atomic_read(&card->tx_timeout_task_counter) == 0);
-
- unregister_netdev(netdev);
-
- /* switch off card */
- spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
- SPIDER_NET_CKRCTRL_STOP_VALUE);
- spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
- SPIDER_NET_CKRCTRL_RUN_VALUE);
-
- spider_net_undo_pci_setup(card);
- free_netdev(netdev);
-}
-
-static struct pci_driver spider_net_driver = {
- .name = spider_net_driver_name,
- .id_table = spider_net_pci_tbl,
- .probe = spider_net_probe,
- .remove = spider_net_remove
-};
-
-/**
- * spider_net_init - init function when the driver is loaded
- *
- * spider_net_init registers the device driver
- */
-static int __init spider_net_init(void)
-{
- printk(KERN_INFO "Spidernet version %s.\n", VERSION);
-
- if (rx_descriptors < SPIDER_NET_RX_DESCRIPTORS_MIN) {
- rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_MIN;
- pr_info("adjusting rx descriptors to %i.\n", rx_descriptors);
- }
- if (rx_descriptors > SPIDER_NET_RX_DESCRIPTORS_MAX) {
- rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_MAX;
- pr_info("adjusting rx descriptors to %i.\n", rx_descriptors);
- }
- if (tx_descriptors < SPIDER_NET_TX_DESCRIPTORS_MIN) {
- tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_MIN;
- pr_info("adjusting tx descriptors to %i.\n", tx_descriptors);
- }
- if (tx_descriptors > SPIDER_NET_TX_DESCRIPTORS_MAX) {
- tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_MAX;
- pr_info("adjusting tx descriptors to %i.\n", tx_descriptors);
- }
-
- return pci_register_driver(&spider_net_driver);
-}
-
-/**
- * spider_net_cleanup - exit function when driver is unloaded
- *
- * spider_net_cleanup unregisters the device driver
- */
-static void __exit spider_net_cleanup(void)
-{
- pci_unregister_driver(&spider_net_driver);
-}
-
-module_init(spider_net_init);
-module_exit(spider_net_cleanup);
diff --git a/drivers/net/ethernet/toshiba/spider_net.h b/drivers/net/ethernet/toshiba/spider_net.h
deleted file mode 100644
index 51948e2b3a34..000000000000
--- a/drivers/net/ethernet/toshiba/spider_net.h
+++ /dev/null
@@ -1,475 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Network device driver for Cell Processor-Based Blade and Celleb platform
- *
- * (C) Copyright IBM Corp. 2005
- * (C) Copyright 2006 TOSHIBA CORPORATION
- *
- * Authors : Utz Bacher <utz.bacher@de.ibm.com>
- * Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
- */
-
-#ifndef _SPIDER_NET_H
-#define _SPIDER_NET_H
-
-#define VERSION "2.0 B"
-
-#include <linux/sungem_phy.h>
-
-int spider_net_stop(struct net_device *netdev);
-int spider_net_open(struct net_device *netdev);
-
-extern const struct ethtool_ops spider_net_ethtool_ops;
-
-extern char spider_net_driver_name[];
-
-#define SPIDER_NET_MAX_FRAME 2312
-#define SPIDER_NET_MAX_MTU 2294
-#define SPIDER_NET_MIN_MTU 64
-
-#define SPIDER_NET_RXBUF_ALIGN 128
-
-#define SPIDER_NET_RX_DESCRIPTORS_DEFAULT 256
-#define SPIDER_NET_RX_DESCRIPTORS_MIN 16
-#define SPIDER_NET_RX_DESCRIPTORS_MAX 512
-
-#define SPIDER_NET_TX_DESCRIPTORS_DEFAULT 256
-#define SPIDER_NET_TX_DESCRIPTORS_MIN 16
-#define SPIDER_NET_TX_DESCRIPTORS_MAX 512
-
-#define SPIDER_NET_TX_TIMER (HZ/5)
-#define SPIDER_NET_ANEG_TIMER (HZ)
-#define SPIDER_NET_ANEG_TIMEOUT 5
-
-#define SPIDER_NET_RX_CSUM_DEFAULT 1
-
-#define SPIDER_NET_WATCHDOG_TIMEOUT 50*HZ
-
-#define SPIDER_NET_FIRMWARE_SEQS 6
-#define SPIDER_NET_FIRMWARE_SEQWORDS 1024
-#define SPIDER_NET_FIRMWARE_LEN (SPIDER_NET_FIRMWARE_SEQS * \
- SPIDER_NET_FIRMWARE_SEQWORDS * \
- sizeof(u32))
-#define SPIDER_NET_FIRMWARE_NAME "spider_fw.bin"
-
-/** spider_net SMMIO registers */
-#define SPIDER_NET_GHIINT0STS 0x00000000
-#define SPIDER_NET_GHIINT1STS 0x00000004
-#define SPIDER_NET_GHIINT2STS 0x00000008
-#define SPIDER_NET_GHIINT0MSK 0x00000010
-#define SPIDER_NET_GHIINT1MSK 0x00000014
-#define SPIDER_NET_GHIINT2MSK 0x00000018
-
-#define SPIDER_NET_GRESUMINTNUM 0x00000020
-#define SPIDER_NET_GREINTNUM 0x00000024
-
-#define SPIDER_NET_GFFRMNUM 0x00000028
-#define SPIDER_NET_GFAFRMNUM 0x0000002c
-#define SPIDER_NET_GFBFRMNUM 0x00000030
-#define SPIDER_NET_GFCFRMNUM 0x00000034
-#define SPIDER_NET_GFDFRMNUM 0x00000038
-
-/* clear them (don't use it) */
-#define SPIDER_NET_GFREECNNUM 0x0000003c
-#define SPIDER_NET_GONETIMENUM 0x00000040
-
-#define SPIDER_NET_GTOUTFRMNUM 0x00000044
-
-#define SPIDER_NET_GTXMDSET 0x00000050
-#define SPIDER_NET_GPCCTRL 0x00000054
-#define SPIDER_NET_GRXMDSET 0x00000058
-#define SPIDER_NET_GIPSECINIT 0x0000005c
-#define SPIDER_NET_GFTRESTRT 0x00000060
-#define SPIDER_NET_GRXDMAEN 0x00000064
-#define SPIDER_NET_GMRWOLCTRL 0x00000068
-#define SPIDER_NET_GPCWOPCMD 0x0000006c
-#define SPIDER_NET_GPCROPCMD 0x00000070
-#define SPIDER_NET_GTTFRMCNT 0x00000078
-#define SPIDER_NET_GTESTMD 0x0000007c
-
-#define SPIDER_NET_GSINIT 0x00000080
-#define SPIDER_NET_GSnPRGADR 0x00000084
-#define SPIDER_NET_GSnPRGDAT 0x00000088
-
-#define SPIDER_NET_GMACOPEMD 0x00000100
-#define SPIDER_NET_GMACLENLMT 0x00000108
-#define SPIDER_NET_GMACST 0x00000110
-#define SPIDER_NET_GMACINTEN 0x00000118
-#define SPIDER_NET_GMACPHYCTRL 0x00000120
-
-#define SPIDER_NET_GMACAPAUSE 0x00000154
-#define SPIDER_NET_GMACTXPAUSE 0x00000164
-
-#define SPIDER_NET_GMACMODE 0x000001b0
-#define SPIDER_NET_GMACBSTLMT 0x000001b4
-
-#define SPIDER_NET_GMACUNIMACU 0x000001c0
-#define SPIDER_NET_GMACUNIMACL 0x000001c8
-
-#define SPIDER_NET_GMRMHFILnR 0x00000400
-#define SPIDER_NET_MULTICAST_HASHES 256
-
-#define SPIDER_NET_GMRUAFILnR 0x00000500
-#define SPIDER_NET_GMRUA0FIL15R 0x00000578
-
-#define SPIDER_NET_GTTQMSK 0x00000934
-
-/* RX DMA controller registers, all 0x00000a.. are for DMA controller A,
- * 0x00000b.. for DMA controller B, etc. */
-#define SPIDER_NET_GDADCHA 0x00000a00
-#define SPIDER_NET_GDADMACCNTR 0x00000a04
-#define SPIDER_NET_GDACTDPA 0x00000a08
-#define SPIDER_NET_GDACTDCNT 0x00000a0c
-#define SPIDER_NET_GDACDBADDR 0x00000a20
-#define SPIDER_NET_GDACDBSIZE 0x00000a24
-#define SPIDER_NET_GDACNEXTDA 0x00000a28
-#define SPIDER_NET_GDACCOMST 0x00000a2c
-#define SPIDER_NET_GDAWBCOMST 0x00000a30
-#define SPIDER_NET_GDAWBRSIZE 0x00000a34
-#define SPIDER_NET_GDAWBVSIZE 0x00000a38
-#define SPIDER_NET_GDAWBTRST 0x00000a3c
-#define SPIDER_NET_GDAWBTRERR 0x00000a40
-
-/* TX DMA controller registers */
-#define SPIDER_NET_GDTDCHA 0x00000e00
-#define SPIDER_NET_GDTDMACCNTR 0x00000e04
-#define SPIDER_NET_GDTCDPA 0x00000e08
-#define SPIDER_NET_GDTDMASEL 0x00000e14
-
-#define SPIDER_NET_ECMODE 0x00000f00
-/* clock and reset control register */
-#define SPIDER_NET_CKRCTRL 0x00000ff0
-
-/** SCONFIG registers */
-#define SPIDER_NET_SCONFIG_IOACTE 0x00002810
-
-/** interrupt mask registers */
-#define SPIDER_NET_INT0_MASK_VALUE 0x3f7fe2c7
-#define SPIDER_NET_INT1_MASK_VALUE 0x0000fff2
-#define SPIDER_NET_INT2_MASK_VALUE 0x000003f1
-
-/* we rely on flagged descriptor interrupts */
-#define SPIDER_NET_FRAMENUM_VALUE 0x00000000
-/* set this first, then the FRAMENUM_VALUE */
-#define SPIDER_NET_GFXFRAMES_VALUE 0x00000000
-
-#define SPIDER_NET_STOP_SEQ_VALUE 0x00000000
-#define SPIDER_NET_RUN_SEQ_VALUE 0x0000007e
-
-#define SPIDER_NET_PHY_CTRL_VALUE 0x00040040
-/* #define SPIDER_NET_PHY_CTRL_VALUE 0x01070080*/
-#define SPIDER_NET_RXMODE_VALUE 0x00000011
-/* auto retransmission in case of MAC aborts */
-#define SPIDER_NET_TXMODE_VALUE 0x00010000
-#define SPIDER_NET_RESTART_VALUE 0x00000000
-#define SPIDER_NET_WOL_VALUE 0x00001111
-#if 0
-#define SPIDER_NET_WOL_VALUE 0x00000000
-#endif
-#define SPIDER_NET_IPSECINIT_VALUE 0x6f716f71
-
-/* pause frames: automatic, no upper retransmission count */
-/* outside loopback mode: ETOMOD signal dont matter, not connected */
-/* ETOMOD signal is brought to PHY reset. bit 2 must be 1 in Celleb */
-#define SPIDER_NET_OPMODE_VALUE 0x00000067
-/*#define SPIDER_NET_OPMODE_VALUE 0x001b0062*/
-#define SPIDER_NET_LENLMT_VALUE 0x00000908
-
-#define SPIDER_NET_MACAPAUSE_VALUE 0x00000800 /* about 1 ms */
-#define SPIDER_NET_TXPAUSE_VALUE 0x00000000
-
-#define SPIDER_NET_MACMODE_VALUE 0x00000001
-#define SPIDER_NET_BURSTLMT_VALUE 0x00000200 /* about 16 us */
-
-/* DMAC control register GDMACCNTR
- *
- * 1(0) enable r/tx dma
- * 0000000 fixed to 0
- *
- * 000000 fixed to 0
- * 0(1) en/disable descr writeback on force end
- * 0(1) force end
- *
- * 000000 fixed to 0
- * 00 burst alignment: 128 bytes
- * 11 burst alignment: 1024 bytes
- *
- * 00000 fixed to 0
- * 0 descr writeback size 32 bytes
- * 0(1) descr chain end interrupt enable
- * 0(1) descr status writeback enable */
-
-/* to set RX_DMA_EN */
-#define SPIDER_NET_DMA_RX_VALUE 0x80000000
-#define SPIDER_NET_DMA_RX_FEND_VALUE 0x00030003
-/* to set TX_DMA_EN */
-#define SPIDER_NET_TX_DMA_EN 0x80000000
-#define SPIDER_NET_GDTBSTA 0x00000300
-#define SPIDER_NET_GDTDCEIDIS 0x00000002
-#define SPIDER_NET_DMA_TX_VALUE SPIDER_NET_TX_DMA_EN | \
- SPIDER_NET_GDTDCEIDIS | \
- SPIDER_NET_GDTBSTA
-
-#define SPIDER_NET_DMA_TX_FEND_VALUE 0x00030003
-
-/* SPIDER_NET_UA_DESCR_VALUE is OR'ed with the unicast address */
-#define SPIDER_NET_UA_DESCR_VALUE 0x00080000
-#define SPIDER_NET_PROMISC_VALUE 0x00080000
-#define SPIDER_NET_NONPROMISC_VALUE 0x00000000
-
-#define SPIDER_NET_DMASEL_VALUE 0x00000001
-
-#define SPIDER_NET_ECMODE_VALUE 0x00000000
-
-#define SPIDER_NET_CKRCTRL_RUN_VALUE 0x1fff010f
-#define SPIDER_NET_CKRCTRL_STOP_VALUE 0x0000010f
-
-#define SPIDER_NET_SBIMSTATE_VALUE 0x00000000
-#define SPIDER_NET_SBTMSTATE_VALUE 0x00000000
-
-/* SPIDER_NET_GHIINT0STS bits, in reverse order so that they can be used
- * with 1 << SPIDER_NET_... */
-enum spider_net_int0_status {
- SPIDER_NET_GPHYINT = 0,
- SPIDER_NET_GMAC2INT,
- SPIDER_NET_GMAC1INT,
- SPIDER_NET_GIPSINT,
- SPIDER_NET_GFIFOINT,
- SPIDER_NET_GDMACINT,
- SPIDER_NET_GSYSINT,
- SPIDER_NET_GPWOPCMPINT,
- SPIDER_NET_GPROPCMPINT,
- SPIDER_NET_GPWFFINT,
- SPIDER_NET_GRMDADRINT,
- SPIDER_NET_GRMARPINT,
- SPIDER_NET_GRMMPINT,
- SPIDER_NET_GDTDEN0INT,
- SPIDER_NET_GDDDEN0INT,
- SPIDER_NET_GDCDEN0INT,
- SPIDER_NET_GDBDEN0INT,
- SPIDER_NET_GDADEN0INT,
- SPIDER_NET_GDTFDCINT,
- SPIDER_NET_GDDFDCINT,
- SPIDER_NET_GDCFDCINT,
- SPIDER_NET_GDBFDCINT,
- SPIDER_NET_GDAFDCINT,
- SPIDER_NET_GTTEDINT,
- SPIDER_NET_GDTDCEINT,
- SPIDER_NET_GRFDNMINT,
- SPIDER_NET_GRFCNMINT,
- SPIDER_NET_GRFBNMINT,
- SPIDER_NET_GRFANMINT,
- SPIDER_NET_GRFNMINT,
- SPIDER_NET_G1TMCNTINT,
- SPIDER_NET_GFREECNTINT
-};
-/* GHIINT1STS bits */
-enum spider_net_int1_status {
- SPIDER_NET_GTMFLLINT = 0,
- SPIDER_NET_GRMFLLINT,
- SPIDER_NET_GTMSHTINT,
- SPIDER_NET_GDTINVDINT,
- SPIDER_NET_GRFDFLLINT,
- SPIDER_NET_GDDDCEINT,
- SPIDER_NET_GDDINVDINT,
- SPIDER_NET_GRFCFLLINT,
- SPIDER_NET_GDCDCEINT,
- SPIDER_NET_GDCINVDINT,
- SPIDER_NET_GRFBFLLINT,
- SPIDER_NET_GDBDCEINT,
- SPIDER_NET_GDBINVDINT,
- SPIDER_NET_GRFAFLLINT,
- SPIDER_NET_GDADCEINT,
- SPIDER_NET_GDAINVDINT,
- SPIDER_NET_GDTRSERINT,
- SPIDER_NET_GDDRSERINT,
- SPIDER_NET_GDCRSERINT,
- SPIDER_NET_GDBRSERINT,
- SPIDER_NET_GDARSERINT,
- SPIDER_NET_GDSERINT,
- SPIDER_NET_GDTPTERINT,
- SPIDER_NET_GDDPTERINT,
- SPIDER_NET_GDCPTERINT,
- SPIDER_NET_GDBPTERINT,
- SPIDER_NET_GDAPTERINT
-};
-/* GHIINT2STS bits */
-enum spider_net_int2_status {
- SPIDER_NET_GPROPERINT = 0,
- SPIDER_NET_GMCTCRSNGINT,
- SPIDER_NET_GMCTLCOLINT,
- SPIDER_NET_GMCTTMOTINT,
- SPIDER_NET_GMCRCAERINT,
- SPIDER_NET_GMCRCALERINT,
- SPIDER_NET_GMCRALNERINT,
- SPIDER_NET_GMCROVRINT,
- SPIDER_NET_GMCRRNTINT,
- SPIDER_NET_GMCRRXERINT,
- SPIDER_NET_GTITCSERINT,
- SPIDER_NET_GTIFMTERINT,
- SPIDER_NET_GTIPKTRVKINT,
- SPIDER_NET_GTISPINGINT,
- SPIDER_NET_GTISADNGINT,
- SPIDER_NET_GTISPDNGINT,
- SPIDER_NET_GRIFMTERINT,
- SPIDER_NET_GRIPKTRVKINT,
- SPIDER_NET_GRISPINGINT,
- SPIDER_NET_GRISADNGINT,
- SPIDER_NET_GRISPDNGINT
-};
-
-#define SPIDER_NET_TXINT (1 << SPIDER_NET_GDTFDCINT)
-
-/* We rely on flagged descriptor interrupts */
-#define SPIDER_NET_RXINT ( (1 << SPIDER_NET_GDAFDCINT) )
-
-#define SPIDER_NET_LINKINT ( 1 << SPIDER_NET_GMAC2INT )
-
-#define SPIDER_NET_ERRINT ( 0xffffffff & \
- (~SPIDER_NET_TXINT) & \
- (~SPIDER_NET_RXINT) & \
- (~SPIDER_NET_LINKINT) )
-
-#define SPIDER_NET_GPREXEC 0x80000000
-#define SPIDER_NET_GPRDAT_MASK 0x0000ffff
-
-#define SPIDER_NET_DMAC_NOINTR_COMPLETE 0x00800000
-#define SPIDER_NET_DMAC_TXFRMTL 0x00040000
-#define SPIDER_NET_DMAC_TCP 0x00020000
-#define SPIDER_NET_DMAC_UDP 0x00030000
-#define SPIDER_NET_TXDCEST 0x08000000
-
-#define SPIDER_NET_DESCR_RXFDIS 0x00000001
-#define SPIDER_NET_DESCR_RXDCEIS 0x00000002
-#define SPIDER_NET_DESCR_RXDEN0IS 0x00000004
-#define SPIDER_NET_DESCR_RXINVDIS 0x00000008
-#define SPIDER_NET_DESCR_RXRERRIS 0x00000010
-#define SPIDER_NET_DESCR_RXFDCIMS 0x00000100
-#define SPIDER_NET_DESCR_RXDCEIMS 0x00000200
-#define SPIDER_NET_DESCR_RXDEN0IMS 0x00000400
-#define SPIDER_NET_DESCR_RXINVDIMS 0x00000800
-#define SPIDER_NET_DESCR_RXRERRMIS 0x00001000
-#define SPIDER_NET_DESCR_UNUSED 0x077fe0e0
-
-#define SPIDER_NET_DESCR_IND_PROC_MASK 0xF0000000
-#define SPIDER_NET_DESCR_COMPLETE 0x00000000 /* used in rx and tx */
-#define SPIDER_NET_DESCR_RESPONSE_ERROR 0x10000000 /* used in rx and tx */
-#define SPIDER_NET_DESCR_PROTECTION_ERROR 0x20000000 /* used in rx and tx */
-#define SPIDER_NET_DESCR_FRAME_END 0x40000000 /* used in rx */
-#define SPIDER_NET_DESCR_FORCE_END 0x50000000 /* used in rx and tx */
-#define SPIDER_NET_DESCR_CARDOWNED 0xA0000000 /* used in rx and tx */
-#define SPIDER_NET_DESCR_NOT_IN_USE 0xF0000000
-#define SPIDER_NET_DESCR_TXDESFLG 0x00800000
-
-#define SPIDER_NET_DESCR_BAD_STATUS (SPIDER_NET_DESCR_RXDEN0IS | \
- SPIDER_NET_DESCR_RXRERRIS | \
- SPIDER_NET_DESCR_RXDEN0IMS | \
- SPIDER_NET_DESCR_RXINVDIMS | \
- SPIDER_NET_DESCR_RXRERRMIS | \
- SPIDER_NET_DESCR_UNUSED)
-
-/* Descriptor, as defined by the hardware */
-struct spider_net_hw_descr {
- u32 buf_addr;
- u32 buf_size;
- u32 next_descr_addr;
- u32 dmac_cmd_status;
- u32 result_size;
- u32 valid_size; /* all zeroes for tx */
- u32 data_status;
- u32 data_error; /* all zeroes for tx */
-} __attribute__((aligned(32)));
-
-struct spider_net_descr {
- struct spider_net_hw_descr *hwdescr;
- struct sk_buff *skb;
- u32 bus_addr;
- struct spider_net_descr *next;
- struct spider_net_descr *prev;
-};
-
-struct spider_net_descr_chain {
- spinlock_t lock;
- struct spider_net_descr *head;
- struct spider_net_descr *tail;
- struct spider_net_descr *ring;
- int num_desc;
- struct spider_net_hw_descr *hwring;
- dma_addr_t dma_addr;
-};
-
-/* descriptor data_status bits */
-#define SPIDER_NET_RX_IPCHK 29
-#define SPIDER_NET_RX_TCPCHK 28
-#define SPIDER_NET_VLAN_PACKET 21
-#define SPIDER_NET_DATA_STATUS_CKSUM_MASK ( (1 << SPIDER_NET_RX_IPCHK) | \
- (1 << SPIDER_NET_RX_TCPCHK) )
-
-/* descriptor data_error bits */
-#define SPIDER_NET_RX_IPCHKERR 27
-#define SPIDER_NET_RX_RXTCPCHKERR 28
-
-#define SPIDER_NET_DATA_ERR_CKSUM_MASK (1 << SPIDER_NET_RX_IPCHKERR)
-
-/* the cases we don't pass the packet to the stack.
- * 701b8000 would be correct, but every packets gets that flag */
-#define SPIDER_NET_DESTROY_RX_FLAGS 0x700b8000
-
-#define SPIDER_NET_DEFAULT_MSG ( NETIF_MSG_DRV | \
- NETIF_MSG_PROBE | \
- NETIF_MSG_LINK | \
- NETIF_MSG_TIMER | \
- NETIF_MSG_IFDOWN | \
- NETIF_MSG_IFUP | \
- NETIF_MSG_RX_ERR | \
- NETIF_MSG_TX_ERR | \
- NETIF_MSG_TX_QUEUED | \
- NETIF_MSG_INTR | \
- NETIF_MSG_TX_DONE | \
- NETIF_MSG_RX_STATUS | \
- NETIF_MSG_PKTDATA | \
- NETIF_MSG_HW | \
- NETIF_MSG_WOL )
-
-struct spider_net_extra_stats {
- unsigned long rx_desc_error;
- unsigned long tx_timeouts;
- unsigned long alloc_rx_skb_error;
- unsigned long rx_iommu_map_error;
- unsigned long tx_iommu_map_error;
- unsigned long rx_desc_unk_state;
-};
-
-struct spider_net_card {
- struct net_device *netdev;
- struct pci_dev *pdev;
- struct mii_phy phy;
-
- struct napi_struct napi;
-
- int medium;
-
- void __iomem *regs;
-
- struct spider_net_descr_chain tx_chain;
- struct spider_net_descr_chain rx_chain;
- struct spider_net_descr *low_watermark;
-
- int aneg_count;
- struct timer_list aneg_timer;
- struct timer_list tx_timer;
- struct work_struct tx_timeout_task;
- atomic_t tx_timeout_task_counter;
- wait_queue_head_t waitq;
- int num_rx_ints;
- int ignore_rx_ramfull;
-
- /* for ethtool */
- int msg_enable;
- struct spider_net_extra_stats spider_stats;
-
- /* Must be last item in struct */
- struct spider_net_descr darray[];
-};
-
-#endif
diff --git a/drivers/net/ethernet/toshiba/spider_net_ethtool.c b/drivers/net/ethernet/toshiba/spider_net_ethtool.c
deleted file mode 100644
index fef9fd127b5e..000000000000
--- a/drivers/net/ethernet/toshiba/spider_net_ethtool.c
+++ /dev/null
@@ -1,174 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Network device driver for Cell Processor-Based Blade
- *
- * (C) Copyright IBM Corp. 2005
- *
- * Authors : Utz Bacher <utz.bacher@de.ibm.com>
- * Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
- */
-
-#include <linux/netdevice.h>
-#include <linux/ethtool.h>
-#include <linux/pci.h>
-
-#include "spider_net.h"
-
-
-static struct {
- const char str[ETH_GSTRING_LEN];
-} ethtool_stats_keys[] = {
- { "tx_packets" },
- { "tx_bytes" },
- { "rx_packets" },
- { "rx_bytes" },
- { "tx_errors" },
- { "tx_dropped" },
- { "rx_dropped" },
- { "rx_descriptor_error" },
- { "tx_timeouts" },
- { "alloc_rx_skb_error" },
- { "rx_iommu_map_error" },
- { "tx_iommu_map_error" },
- { "rx_desc_unk_state" },
-};
-
-static int
-spider_net_ethtool_get_link_ksettings(struct net_device *netdev,
- struct ethtool_link_ksettings *cmd)
-{
- struct spider_net_card *card;
- card = netdev_priv(netdev);
-
- ethtool_link_ksettings_zero_link_mode(cmd, supported);
- ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
-
- ethtool_link_ksettings_zero_link_mode(cmd, advertising);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
-
- cmd->base.port = PORT_FIBRE;
- cmd->base.speed = card->phy.speed;
- cmd->base.duplex = DUPLEX_FULL;
-
- return 0;
-}
-
-static void
-spider_net_ethtool_get_drvinfo(struct net_device *netdev,
- struct ethtool_drvinfo *drvinfo)
-{
- struct spider_net_card *card;
- card = netdev_priv(netdev);
-
- /* clear and fill out info */
- strscpy(drvinfo->driver, spider_net_driver_name,
- sizeof(drvinfo->driver));
- strscpy(drvinfo->version, VERSION, sizeof(drvinfo->version));
- strscpy(drvinfo->fw_version, "no information",
- sizeof(drvinfo->fw_version));
- strscpy(drvinfo->bus_info, pci_name(card->pdev),
- sizeof(drvinfo->bus_info));
-}
-
-static void
-spider_net_ethtool_get_wol(struct net_device *netdev,
- struct ethtool_wolinfo *wolinfo)
-{
- /* no support for wol */
- wolinfo->supported = 0;
- wolinfo->wolopts = 0;
-}
-
-static u32
-spider_net_ethtool_get_msglevel(struct net_device *netdev)
-{
- struct spider_net_card *card;
- card = netdev_priv(netdev);
- return card->msg_enable;
-}
-
-static void
-spider_net_ethtool_set_msglevel(struct net_device *netdev,
- u32 level)
-{
- struct spider_net_card *card;
- card = netdev_priv(netdev);
- card->msg_enable = level;
-}
-
-static int
-spider_net_ethtool_nway_reset(struct net_device *netdev)
-{
- if (netif_running(netdev)) {
- spider_net_stop(netdev);
- spider_net_open(netdev);
- }
- return 0;
-}
-
-static void
-spider_net_ethtool_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ering,
- struct kernel_ethtool_ringparam *kernel_ering,
- struct netlink_ext_ack *extack)
-{
- struct spider_net_card *card = netdev_priv(netdev);
-
- ering->tx_max_pending = SPIDER_NET_TX_DESCRIPTORS_MAX;
- ering->tx_pending = card->tx_chain.num_desc;
- ering->rx_max_pending = SPIDER_NET_RX_DESCRIPTORS_MAX;
- ering->rx_pending = card->rx_chain.num_desc;
-}
-
-static int spider_net_get_sset_count(struct net_device *netdev, int sset)
-{
- switch (sset) {
- case ETH_SS_STATS:
- return ARRAY_SIZE(ethtool_stats_keys);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static void spider_net_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats *stats, u64 *data)
-{
- struct spider_net_card *card = netdev_priv(netdev);
-
- data[0] = netdev->stats.tx_packets;
- data[1] = netdev->stats.tx_bytes;
- data[2] = netdev->stats.rx_packets;
- data[3] = netdev->stats.rx_bytes;
- data[4] = netdev->stats.tx_errors;
- data[5] = netdev->stats.tx_dropped;
- data[6] = netdev->stats.rx_dropped;
- data[7] = card->spider_stats.rx_desc_error;
- data[8] = card->spider_stats.tx_timeouts;
- data[9] = card->spider_stats.alloc_rx_skb_error;
- data[10] = card->spider_stats.rx_iommu_map_error;
- data[11] = card->spider_stats.tx_iommu_map_error;
- data[12] = card->spider_stats.rx_desc_unk_state;
-}
-
-static void spider_net_get_strings(struct net_device *netdev, u32 stringset,
- u8 *data)
-{
- memcpy(data, ethtool_stats_keys, sizeof(ethtool_stats_keys));
-}
-
-const struct ethtool_ops spider_net_ethtool_ops = {
- .get_drvinfo = spider_net_ethtool_get_drvinfo,
- .get_wol = spider_net_ethtool_get_wol,
- .get_msglevel = spider_net_ethtool_get_msglevel,
- .set_msglevel = spider_net_ethtool_set_msglevel,
- .get_link = ethtool_op_get_link,
- .nway_reset = spider_net_ethtool_nway_reset,
- .get_ringparam = spider_net_ethtool_get_ringparam,
- .get_strings = spider_net_get_strings,
- .get_sset_count = spider_net_get_sset_count,
- .get_ethtool_stats = spider_net_get_ethtool_stats,
- .get_link_ksettings = spider_net_ethtool_get_link_ksettings,
-};
-
diff --git a/drivers/ntb/msi.c b/drivers/ntb/msi.c
index 368f6d894bba..6295e55ef85e 100644
--- a/drivers/ntb/msi.c
+++ b/drivers/ntb/msi.c
@@ -106,10 +106,10 @@ int ntb_msi_setup_mws(struct ntb_dev *ntb)
if (!ntb->msi)
return -EINVAL;
- scoped_guard (msi_descs_lock, &ntb->pdev->dev) {
- desc = msi_first_desc(&ntb->pdev->dev, MSI_DESC_ASSOCIATED);
- addr = desc->msg.address_lo + ((uint64_t)desc->msg.address_hi << 32);
- }
+ msi_lock_descs(&ntb->pdev->dev);
+ desc = msi_first_desc(&ntb->pdev->dev, MSI_DESC_ASSOCIATED);
+ addr = desc->msg.address_lo + ((uint64_t)desc->msg.address_hi << 32);
+ msi_unlock_descs(&ntb->pdev->dev);
for (peer = 0; peer < ntb_peer_port_count(ntb); peer++) {
peer_widx = ntb_peer_highest_mw_idx(ntb, peer);
@@ -289,7 +289,7 @@ int ntbm_msi_request_threaded_irq(struct ntb_dev *ntb, irq_handler_t handler,
if (!ntb->msi)
return -EINVAL;
- guard(msi_descs_lock)(dev);
+ msi_lock_descs(dev);
msi_for_each_desc(entry, dev, MSI_DESC_ASSOCIATED) {
if (irq_has_action(entry->irq))
continue;
@@ -307,11 +307,17 @@ int ntbm_msi_request_threaded_irq(struct ntb_dev *ntb, irq_handler_t handler,
ret = ntbm_msi_setup_callback(ntb, entry, msi_desc);
if (ret) {
devm_free_irq(&ntb->dev, entry->irq, dev_id);
- return ret;
+ goto unlock;
}
- return entry->irq;
+
+ ret = entry->irq;
+ goto unlock;
}
- return -ENODEV;
+ ret = -ENODEV;
+
+unlock:
+ msi_unlock_descs(dev);
+ return ret;
}
EXPORT_SYMBOL(ntbm_msi_request_threaded_irq);
diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
index 57e863ecde58..b1fddfa33ab9 100644
--- a/drivers/nvme/host/apple.c
+++ b/drivers/nvme/host/apple.c
@@ -221,7 +221,7 @@ static unsigned int apple_nvme_queue_depth(struct apple_nvme_queue *q)
return APPLE_ANS_MAX_QUEUE_DEPTH;
}
-static void apple_nvme_rtkit_crashed(void *cookie)
+static void apple_nvme_rtkit_crashed(void *cookie, const void *crashlog, size_t crashlog_size)
{
struct apple_nvme *anv = cookie;
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index 178da6b9fc33..44d7f4339306 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -3975,18 +3975,24 @@ static int hv_pci_restore_msi_msg(struct pci_dev *pdev, void *arg)
{
struct irq_data *irq_data;
struct msi_desc *entry;
+ int ret = 0;
if (!pdev->msi_enabled && !pdev->msix_enabled)
return 0;
- guard(msi_descs_lock)(&pdev->dev);
+ msi_lock_descs(&pdev->dev);
msi_for_each_desc(entry, &pdev->dev, MSI_DESC_ASSOCIATED) {
irq_data = irq_get_irq_data(entry->irq);
- if (WARN_ON_ONCE(!irq_data))
- return -EINVAL;
+ if (WARN_ON_ONCE(!irq_data)) {
+ ret = -EINVAL;
+ break;
+ }
+
hv_compose_msi_msg(irq_data, &entry->msg);
}
- return 0;
+ msi_unlock_descs(&pdev->dev);
+
+ return ret;
}
/*
diff --git a/drivers/pci/msi/api.c b/drivers/pci/msi/api.c
index d89f491afdf0..b956ce591f96 100644
--- a/drivers/pci/msi/api.c
+++ b/drivers/pci/msi/api.c
@@ -53,9 +53,10 @@ void pci_disable_msi(struct pci_dev *dev)
if (!pci_msi_enabled() || !dev || !dev->msi_enabled)
return;
- guard(msi_descs_lock)(&dev->dev);
+ msi_lock_descs(&dev->dev);
pci_msi_shutdown(dev);
pci_free_msi_irqs(dev);
+ msi_unlock_descs(&dev->dev);
}
EXPORT_SYMBOL(pci_disable_msi);
@@ -195,9 +196,10 @@ void pci_disable_msix(struct pci_dev *dev)
if (!pci_msi_enabled() || !dev || !dev->msix_enabled)
return;
- guard(msi_descs_lock)(&dev->dev);
+ msi_lock_descs(&dev->dev);
pci_msix_shutdown(dev);
pci_free_msi_irqs(dev);
+ msi_unlock_descs(&dev->dev);
}
EXPORT_SYMBOL(pci_disable_msix);
diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c
index 7058d59e7c5f..6569ba3577fe 100644
--- a/drivers/pci/msi/msi.c
+++ b/drivers/pci/msi/msi.c
@@ -335,11 +335,41 @@ static int msi_verify_entries(struct pci_dev *dev)
return !entry ? 0 : -EIO;
}
-static int __msi_capability_init(struct pci_dev *dev, int nvec, struct irq_affinity_desc *masks)
+/**
+ * msi_capability_init - configure device's MSI capability structure
+ * @dev: pointer to the pci_dev data structure of MSI device function
+ * @nvec: number of interrupts to allocate
+ * @affd: description of automatic IRQ affinity assignments (may be %NULL)
+ *
+ * Setup the MSI capability structure of the device with the requested
+ * number of interrupts. A return value of zero indicates the successful
+ * setup of an entry with the new MSI IRQ. A negative return value indicates
+ * an error, and a positive return value indicates the number of interrupts
+ * which could have been allocated.
+ */
+static int msi_capability_init(struct pci_dev *dev, int nvec,
+ struct irq_affinity *affd)
{
- int ret = msi_setup_msi_desc(dev, nvec, masks);
+ struct irq_affinity_desc *masks = NULL;
struct msi_desc *entry, desc;
+ int ret;
+ /* Reject multi-MSI early on irq domain enabled architectures */
+ if (nvec > 1 && !pci_msi_domain_supports(dev, MSI_FLAG_MULTI_PCI_MSI, ALLOW_LEGACY))
+ return 1;
+
+ /*
+ * Disable MSI during setup in the hardware, but mark it enabled
+ * so that setup code can evaluate it.
+ */
+ pci_msi_set_enable(dev, 0);
+ dev->msi_enabled = 1;
+
+ if (affd)
+ masks = irq_create_affinity_masks(nvec, affd);
+
+ msi_lock_descs(&dev->dev);
+ ret = msi_setup_msi_desc(dev, nvec, masks);
if (ret)
goto fail;
@@ -368,48 +398,19 @@ static int __msi_capability_init(struct pci_dev *dev, int nvec, struct irq_affin
pcibios_free_irq(dev);
dev->irq = entry->irq;
- return 0;
+ goto unlock;
+
err:
pci_msi_unmask(&desc, msi_multi_mask(&desc));
pci_free_msi_irqs(dev);
fail:
dev->msi_enabled = 0;
+unlock:
+ msi_unlock_descs(&dev->dev);
+ kfree(masks);
return ret;
}
-/**
- * msi_capability_init - configure device's MSI capability structure
- * @dev: pointer to the pci_dev data structure of MSI device function
- * @nvec: number of interrupts to allocate
- * @affd: description of automatic IRQ affinity assignments (may be %NULL)
- *
- * Setup the MSI capability structure of the device with the requested
- * number of interrupts. A return value of zero indicates the successful
- * setup of an entry with the new MSI IRQ. A negative return value indicates
- * an error, and a positive return value indicates the number of interrupts
- * which could have been allocated.
- */
-static int msi_capability_init(struct pci_dev *dev, int nvec,
- struct irq_affinity *affd)
-{
- /* Reject multi-MSI early on irq domain enabled architectures */
- if (nvec > 1 && !pci_msi_domain_supports(dev, MSI_FLAG_MULTI_PCI_MSI, ALLOW_LEGACY))
- return 1;
-
- /*
- * Disable MSI during setup in the hardware, but mark it enabled
- * so that setup code can evaluate it.
- */
- pci_msi_set_enable(dev, 0);
- dev->msi_enabled = 1;
-
- struct irq_affinity_desc *masks __free(kfree) =
- affd ? irq_create_affinity_masks(nvec, affd) : NULL;
-
- guard(msi_descs_lock)(&dev->dev);
- return __msi_capability_init(dev, nvec, masks);
-}
-
int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
struct irq_affinity *affd)
{
@@ -662,41 +663,40 @@ static void msix_mask_all(void __iomem *base, int tsize)
writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL);
}
-static int __msix_setup_interrupts(struct pci_dev *dev, struct msix_entry *entries,
- int nvec, struct irq_affinity_desc *masks)
+static int msix_setup_interrupts(struct pci_dev *dev, struct msix_entry *entries,
+ int nvec, struct irq_affinity *affd)
{
- int ret = msix_setup_msi_descs(dev, entries, nvec, masks);
+ struct irq_affinity_desc *masks = NULL;
+ int ret;
+
+ if (affd)
+ masks = irq_create_affinity_masks(nvec, affd);
+ msi_lock_descs(&dev->dev);
+ ret = msix_setup_msi_descs(dev, entries, nvec, masks);
if (ret)
- goto fail;
+ goto out_free;
ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
if (ret)
- goto fail;
+ goto out_free;
/* Check if all MSI entries honor device restrictions */
ret = msi_verify_entries(dev);
if (ret)
- goto fail;
+ goto out_free;
msix_update_entries(dev, entries);
- return 0;
+ goto out_unlock;
-fail:
+out_free:
pci_free_msi_irqs(dev);
+out_unlock:
+ msi_unlock_descs(&dev->dev);
+ kfree(masks);
return ret;
}
-static int msix_setup_interrupts(struct pci_dev *dev, struct msix_entry *entries,
- int nvec, struct irq_affinity *affd)
-{
- struct irq_affinity_desc *masks __free(kfree) =
- affd ? irq_create_affinity_masks(nvec, affd) : NULL;
-
- guard(msi_descs_lock)(&dev->dev);
- return __msix_setup_interrupts(dev, entries, nvec, masks);
-}
-
/**
* msix_capability_init - configure device's MSI-X capability
* @dev: pointer to the pci_dev data structure of MSI-X device function
@@ -870,13 +870,13 @@ void __pci_restore_msix_state(struct pci_dev *dev)
write_msg = arch_restore_msi_irqs(dev);
- scoped_guard (msi_descs_lock, &dev->dev) {
- msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) {
- if (write_msg)
- __pci_write_msi_msg(entry, &entry->msg);
- pci_msix_write_vector_ctrl(entry, entry->pci.msix_ctrl);
- }
+ msi_lock_descs(&dev->dev);
+ msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) {
+ if (write_msg)
+ __pci_write_msi_msg(entry, &entry->msg);
+ pci_msix_write_vector_ctrl(entry, entry->pci.msix_ctrl);
}
+ msi_unlock_descs(&dev->dev);
pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
}
@@ -915,53 +915,6 @@ void pci_free_msi_irqs(struct pci_dev *dev)
}
}
-#ifdef CONFIG_PCIE_TPH
-/**
- * pci_msix_write_tph_tag - Update the TPH tag for a given MSI-X vector
- * @pdev: The PCIe device to update
- * @index: The MSI-X index to update
- * @tag: The tag to write
- *
- * Returns: 0 on success, error code on failure
- */
-int pci_msix_write_tph_tag(struct pci_dev *pdev, unsigned int index, u16 tag)
-{
- struct msi_desc *msi_desc;
- struct irq_desc *irq_desc;
- unsigned int virq;
-
- if (!pdev->msix_enabled)
- return -ENXIO;
-
- guard(msi_descs_lock)(&pdev->dev);
- virq = msi_get_virq(&pdev->dev, index);
- if (!virq)
- return -ENXIO;
- /*
- * This is a horrible hack, but short of implementing a PCI
- * specific interrupt chip callback and a huge pile of
- * infrastructure, this is the minor nuissance. It provides the
- * protection against concurrent operations on this entry and keeps
- * the control word cache in sync.
- */
- irq_desc = irq_to_desc(virq);
- if (!irq_desc)
- return -ENXIO;
-
- guard(raw_spinlock_irq)(&irq_desc->lock);
- msi_desc = irq_data_get_msi_desc(&irq_desc->irq_data);
- if (!msi_desc || msi_desc->pci.msi_attrib.is_virtual)
- return -ENXIO;
-
- msi_desc->pci.msix_ctrl &= ~PCI_MSIX_ENTRY_CTRL_ST;
- msi_desc->pci.msix_ctrl |= FIELD_PREP(PCI_MSIX_ENTRY_CTRL_ST, tag);
- pci_msix_write_vector_ctrl(msi_desc, msi_desc->pci.msix_ctrl);
- /* Flush the write */
- readl(pci_msix_desc_addr(msi_desc));
- return 0;
-}
-#endif
-
/* Misc. infrastructure */
struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc)
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 2e9cf26a9ee9..01e51db8d285 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -989,15 +989,6 @@ int pcim_request_region_exclusive(struct pci_dev *pdev, int bar,
const char *name);
void pcim_release_region(struct pci_dev *pdev, int bar);
-#ifdef CONFIG_PCI_MSI
-int pci_msix_write_tph_tag(struct pci_dev *pdev, unsigned int index, u16 tag);
-#else
-static inline int pci_msix_write_tph_tag(struct pci_dev *pdev, unsigned int index, u16 tag)
-{
- return -ENODEV;
-}
-#endif
-
/*
* Config Address for PCI Configuration Mechanism #1
*
diff --git a/drivers/pci/tph.c b/drivers/pci/tph.c
index 77fce5e1b830..07de59ca2ebf 100644
--- a/drivers/pci/tph.c
+++ b/drivers/pci/tph.c
@@ -204,6 +204,48 @@ static u8 get_rp_completer_type(struct pci_dev *pdev)
return FIELD_GET(PCI_EXP_DEVCAP2_TPH_COMP_MASK, reg);
}
+/* Write ST to MSI-X vector control reg - Return 0 if OK, otherwise -errno */
+static int write_tag_to_msix(struct pci_dev *pdev, int msix_idx, u16 tag)
+{
+#ifdef CONFIG_PCI_MSI
+ struct msi_desc *msi_desc = NULL;
+ void __iomem *vec_ctrl;
+ u32 val;
+ int err = 0;
+
+ msi_lock_descs(&pdev->dev);
+
+ /* Find the msi_desc entry with matching msix_idx */
+ msi_for_each_desc(msi_desc, &pdev->dev, MSI_DESC_ASSOCIATED) {
+ if (msi_desc->msi_index == msix_idx)
+ break;
+ }
+
+ if (!msi_desc) {
+ err = -ENXIO;
+ goto err_out;
+ }
+
+ /* Get the vector control register (offset 0xc) pointed by msix_idx */
+ vec_ctrl = pdev->msix_base + msix_idx * PCI_MSIX_ENTRY_SIZE;
+ vec_ctrl += PCI_MSIX_ENTRY_VECTOR_CTRL;
+
+ val = readl(vec_ctrl);
+ val &= ~PCI_MSIX_ENTRY_CTRL_ST;
+ val |= FIELD_PREP(PCI_MSIX_ENTRY_CTRL_ST, tag);
+ writel(val, vec_ctrl);
+
+ /* Read back to flush the update */
+ val = readl(vec_ctrl);
+
+err_out:
+ msi_unlock_descs(&pdev->dev);
+ return err;
+#else
+ return -ENODEV;
+#endif
+}
+
/* Write tag to ST table - Return 0 if OK, otherwise -errno */
static int write_tag_to_st_table(struct pci_dev *pdev, int index, u16 tag)
{
@@ -304,7 +346,7 @@ int pcie_tph_set_st_entry(struct pci_dev *pdev, unsigned int index, u16 tag)
switch (loc) {
case PCI_TPH_LOC_MSIX:
- err = pci_msix_write_tph_tag(pdev, index, tag);
+ err = write_tag_to_msix(pdev, index, tag);
break;
case PCI_TPH_LOC_CAP:
err = write_tag_to_st_table(pdev, index, tag);
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index 5b3abb6db248..99f6f9784e68 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -96,6 +96,13 @@ config RESET_HSDK
help
This enables the reset controller driver for HSDK board.
+config RESET_IMX_SCU
+ tristate "i.MX8Q Reset Driver"
+ depends on IMX_SCU && HAVE_ARM_SMCCC
+ depends on (ARM64 && ARCH_MXC) || COMPILE_TEST
+ help
+ This enables the reset controller driver for i.MX8QM/i.MX8QXP
+
config RESET_IMX7
tristate "i.MX7/8 Reset Driver"
depends on HAS_IOMEM
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index 677c4d1e2632..31f9904d13f9 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_RESET_BRCMSTB_RESCAL) += reset-brcmstb-rescal.o
obj-$(CONFIG_RESET_EYEQ) += reset-eyeq.o
obj-$(CONFIG_RESET_GPIO) += reset-gpio.o
obj-$(CONFIG_RESET_HSDK) += reset-hsdk.o
+obj-$(CONFIG_RESET_IMX_SCU) += reset-imx-scu.o
obj-$(CONFIG_RESET_IMX7) += reset-imx7.o
obj-$(CONFIG_RESET_IMX8MP_AUDIOMIX) += reset-imx8mp-audiomix.o
obj-$(CONFIG_RESET_INTEL_GW) += reset-intel-gw.o
diff --git a/drivers/reset/reset-imx-scu.c b/drivers/reset/reset-imx-scu.c
new file mode 100644
index 000000000000..919fc29f944c
--- /dev/null
+++ b/drivers/reset/reset-imx-scu.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2025 NXP
+ * Frank Li <Frank.Li@nxp.com>
+ */
+#include <linux/firmware/imx/svc/misc.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/firmware/imx/rsrc.h>
+
+struct imx_scu_reset {
+ struct reset_controller_dev rc;
+ struct imx_sc_ipc *ipc_handle;
+};
+
+static struct imx_scu_reset *to_imx_scu(struct reset_controller_dev *rc)
+{
+ return container_of(rc, struct imx_scu_reset, rc);
+}
+
+struct imx_scu_id_map {
+ u32 resource_id;
+ u32 command_id;
+};
+
+static const struct imx_scu_id_map imx_scu_id_map[] = {
+ { IMX_SC_R_CSI_0, IMX_SC_C_MIPI_RESET },
+ { IMX_SC_R_CSI_1, IMX_SC_C_MIPI_RESET },
+};
+
+static int imx_scu_reset_assert(struct reset_controller_dev *rc, unsigned long id)
+{
+ struct imx_scu_reset *priv = to_imx_scu(rc);
+
+ return imx_sc_misc_set_control(priv->ipc_handle, imx_scu_id_map[id].resource_id,
+ imx_scu_id_map[id].command_id, true);
+}
+
+static const struct reset_control_ops imx_scu_reset_ops = {
+ .assert = imx_scu_reset_assert,
+};
+
+static int imx_scu_xlate(struct reset_controller_dev *rc, const struct of_phandle_args *reset_spec)
+{
+ int i;
+
+ for (i = 0; i < rc->nr_resets; i++)
+ if (reset_spec->args[0] == imx_scu_id_map[i].resource_id)
+ return i;
+
+ return -EINVAL;
+}
+
+static int imx_scu_reset_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct imx_scu_reset *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, &priv->rc);
+
+ ret = imx_scu_get_handle(&priv->ipc_handle);
+ if (ret)
+ return dev_err_probe(dev, ret, "sc_misc_MIPI get ipc handle failed!\n");
+
+ priv->rc.ops = &imx_scu_reset_ops;
+ priv->rc.owner = THIS_MODULE;
+ priv->rc.of_node = dev->of_node;
+ priv->rc.of_reset_n_cells = 1;
+ priv->rc.of_xlate = imx_scu_xlate;
+ priv->rc.nr_resets = ARRAY_SIZE(imx_scu_id_map);
+
+ return devm_reset_controller_register(dev, &priv->rc);
+}
+
+static const struct of_device_id imx_scu_reset_ids[] = {
+ { .compatible = "fsl,imx-scu-reset", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, imx_scu_reset_ids);
+
+static struct platform_driver imx_scu_reset_driver = {
+ .probe = imx_scu_reset_probe,
+ .driver = {
+ .name = "scu-reset",
+ .of_match_table = imx_scu_reset_ids,
+ },
+};
+module_platform_driver(imx_scu_reset_driver);
+
+MODULE_AUTHOR("Frank Li <Frank.Li@nxp.com>");
+MODULE_DESCRIPTION("i.MX scu reset driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/sh/clk/cpg.c b/drivers/sh/clk/cpg.c
index fd72d9088bdc..64ed7d64458a 100644
--- a/drivers/sh/clk/cpg.c
+++ b/drivers/sh/clk/cpg.c
@@ -26,6 +26,19 @@ static unsigned int sh_clk_read(struct clk *clk)
return ioread32(clk->mapped_reg);
}
+static unsigned int sh_clk_read_status(struct clk *clk)
+{
+ void __iomem *mapped_status = (phys_addr_t)clk->status_reg -
+ (phys_addr_t)clk->enable_reg + clk->mapped_reg;
+
+ if (clk->flags & CLK_ENABLE_REG_8BIT)
+ return ioread8(mapped_status);
+ else if (clk->flags & CLK_ENABLE_REG_16BIT)
+ return ioread16(mapped_status);
+
+ return ioread32(mapped_status);
+}
+
static void sh_clk_write(int value, struct clk *clk)
{
if (clk->flags & CLK_ENABLE_REG_8BIT)
@@ -40,20 +53,10 @@ static int sh_clk_mstp_enable(struct clk *clk)
{
sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk);
if (clk->status_reg) {
- unsigned int (*read)(const void __iomem *addr);
int i;
- void __iomem *mapped_status = (phys_addr_t)clk->status_reg -
- (phys_addr_t)clk->enable_reg + clk->mapped_reg;
-
- if (clk->flags & CLK_ENABLE_REG_8BIT)
- read = ioread8;
- else if (clk->flags & CLK_ENABLE_REG_16BIT)
- read = ioread16;
- else
- read = ioread32;
for (i = 1000;
- (read(mapped_status) & (1 << clk->enable_bit)) && i;
+ (sh_clk_read_status(clk) & (1 << clk->enable_bit)) && i;
i--)
cpu_relax();
if (!i) {
diff --git a/drivers/soc/apple/rtkit-internal.h b/drivers/soc/apple/rtkit-internal.h
index 27c9fa745fd5..b8d5244678f0 100644
--- a/drivers/soc/apple/rtkit-internal.h
+++ b/drivers/soc/apple/rtkit-internal.h
@@ -44,6 +44,7 @@ struct apple_rtkit {
struct apple_rtkit_shmem ioreport_buffer;
struct apple_rtkit_shmem crashlog_buffer;
+ struct apple_rtkit_shmem oslog_buffer;
struct apple_rtkit_shmem syslog_buffer;
char *syslog_msg_buffer;
diff --git a/drivers/soc/apple/rtkit.c b/drivers/soc/apple/rtkit.c
index e6d940292c9f..5fffd0f003dc 100644
--- a/drivers/soc/apple/rtkit.c
+++ b/drivers/soc/apple/rtkit.c
@@ -12,6 +12,7 @@ enum {
APPLE_RTKIT_PWR_STATE_IDLE = 0x201, /* sleeping, retain state */
APPLE_RTKIT_PWR_STATE_QUIESCED = 0x10, /* running but no communication */
APPLE_RTKIT_PWR_STATE_ON = 0x20, /* normal operating state */
+ APPLE_RTKIT_PWR_STATE_INIT = 0x220, /* init after starting the coproc */
};
enum {
@@ -66,8 +67,9 @@ enum {
#define APPLE_RTKIT_SYSLOG_MSG_SIZE GENMASK_ULL(31, 24)
#define APPLE_RTKIT_OSLOG_TYPE GENMASK_ULL(63, 56)
-#define APPLE_RTKIT_OSLOG_INIT 1
-#define APPLE_RTKIT_OSLOG_ACK 3
+#define APPLE_RTKIT_OSLOG_BUFFER_REQUEST 1
+#define APPLE_RTKIT_OSLOG_SIZE GENMASK_ULL(55, 36)
+#define APPLE_RTKIT_OSLOG_IOVA GENMASK_ULL(35, 0)
#define APPLE_RTKIT_MIN_SUPPORTED_VERSION 11
#define APPLE_RTKIT_MAX_SUPPORTED_VERSION 12
@@ -97,12 +99,19 @@ bool apple_rtkit_is_crashed(struct apple_rtkit *rtk)
}
EXPORT_SYMBOL_GPL(apple_rtkit_is_crashed);
-static void apple_rtkit_management_send(struct apple_rtkit *rtk, u8 type,
+static int apple_rtkit_management_send(struct apple_rtkit *rtk, u8 type,
u64 msg)
{
+ int ret;
+
msg &= ~APPLE_RTKIT_MGMT_TYPE;
msg |= FIELD_PREP(APPLE_RTKIT_MGMT_TYPE, type);
- apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_MGMT, msg, NULL, false);
+ ret = apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_MGMT, msg, NULL, false);
+
+ if (ret)
+ dev_err(rtk->dev, "RTKit: Failed to send management message: %d\n", ret);
+
+ return ret;
}
static void apple_rtkit_management_rx_hello(struct apple_rtkit *rtk, u64 msg)
@@ -251,15 +260,21 @@ static int apple_rtkit_common_rx_get_buffer(struct apple_rtkit *rtk,
struct apple_rtkit_shmem *buffer,
u8 ep, u64 msg)
{
- size_t n_4kpages = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_SIZE, msg);
u64 reply;
int err;
+ /* The different size vs. IOVA shifts look odd but are indeed correct this way */
+ if (ep == APPLE_RTKIT_EP_OSLOG) {
+ buffer->size = FIELD_GET(APPLE_RTKIT_OSLOG_SIZE, msg);
+ buffer->iova = FIELD_GET(APPLE_RTKIT_OSLOG_IOVA, msg) << 12;
+ } else {
+ buffer->size = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_SIZE, msg) << 12;
+ buffer->iova = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_IOVA, msg);
+ }
+
buffer->buffer = NULL;
buffer->iomem = NULL;
buffer->is_mapped = false;
- buffer->iova = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_IOVA, msg);
- buffer->size = n_4kpages << 12;
dev_dbg(rtk->dev, "RTKit: buffer request for 0x%zx bytes at %pad\n",
buffer->size, &buffer->iova);
@@ -284,17 +299,30 @@ static int apple_rtkit_common_rx_get_buffer(struct apple_rtkit *rtk,
}
if (!buffer->is_mapped) {
- reply = FIELD_PREP(APPLE_RTKIT_SYSLOG_TYPE,
- APPLE_RTKIT_BUFFER_REQUEST);
- reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_SIZE, n_4kpages);
- reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_IOVA,
- buffer->iova);
+ /* oslog uses different fields and needs a shifted IOVA instead of size */
+ if (ep == APPLE_RTKIT_EP_OSLOG) {
+ reply = FIELD_PREP(APPLE_RTKIT_OSLOG_TYPE,
+ APPLE_RTKIT_OSLOG_BUFFER_REQUEST);
+ reply |= FIELD_PREP(APPLE_RTKIT_OSLOG_SIZE, buffer->size);
+ reply |= FIELD_PREP(APPLE_RTKIT_OSLOG_IOVA,
+ buffer->iova >> 12);
+ } else {
+ reply = FIELD_PREP(APPLE_RTKIT_SYSLOG_TYPE,
+ APPLE_RTKIT_BUFFER_REQUEST);
+ reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_SIZE,
+ buffer->size >> 12);
+ reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_IOVA,
+ buffer->iova);
+ }
apple_rtkit_send_message(rtk, ep, reply, NULL, false);
}
return 0;
error:
+ dev_err(rtk->dev, "RTKit: failed buffer request for 0x%zx bytes (%d)\n",
+ buffer->size, err);
+
buffer->buffer = NULL;
buffer->iomem = NULL;
buffer->iova = 0;
@@ -360,7 +388,6 @@ static void apple_rtkit_crashlog_rx(struct apple_rtkit *rtk, u64 msg)
apple_rtkit_memcpy(rtk, bfr, &rtk->crashlog_buffer, 0,
rtk->crashlog_buffer.size);
apple_rtkit_crashlog_dump(rtk, bfr, rtk->crashlog_buffer.size);
- kfree(bfr);
} else {
dev_err(rtk->dev,
"RTKit: Couldn't allocate crashlog shadow buffer\n");
@@ -368,7 +395,9 @@ static void apple_rtkit_crashlog_rx(struct apple_rtkit *rtk, u64 msg)
rtk->crashed = true;
if (rtk->ops->crashed)
- rtk->ops->crashed(rtk->cookie);
+ rtk->ops->crashed(rtk->cookie, bfr, rtk->crashlog_buffer.size);
+
+ kfree(bfr);
}
static void apple_rtkit_ioreport_rx(struct apple_rtkit *rtk, u64 msg)
@@ -448,7 +477,7 @@ static void apple_rtkit_syslog_rx_log(struct apple_rtkit *rtk, u64 msg)
log_context[sizeof(log_context) - 1] = 0;
- msglen = rtk->syslog_msg_size - 1;
+ msglen = strnlen(rtk->syslog_msg_buffer, rtk->syslog_msg_size - 1);
while (msglen > 0 &&
should_crop_syslog_char(rtk->syslog_msg_buffer[msglen - 1]))
msglen--;
@@ -482,25 +511,18 @@ static void apple_rtkit_syslog_rx(struct apple_rtkit *rtk, u64 msg)
}
}
-static void apple_rtkit_oslog_rx_init(struct apple_rtkit *rtk, u64 msg)
-{
- u64 ack;
-
- dev_dbg(rtk->dev, "RTKit: oslog init: msg: 0x%llx\n", msg);
- ack = FIELD_PREP(APPLE_RTKIT_OSLOG_TYPE, APPLE_RTKIT_OSLOG_ACK);
- apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_OSLOG, ack, NULL, false);
-}
-
static void apple_rtkit_oslog_rx(struct apple_rtkit *rtk, u64 msg)
{
u8 type = FIELD_GET(APPLE_RTKIT_OSLOG_TYPE, msg);
switch (type) {
- case APPLE_RTKIT_OSLOG_INIT:
- apple_rtkit_oslog_rx_init(rtk, msg);
+ case APPLE_RTKIT_OSLOG_BUFFER_REQUEST:
+ apple_rtkit_common_rx_get_buffer(rtk, &rtk->oslog_buffer,
+ APPLE_RTKIT_EP_OSLOG, msg);
break;
default:
- dev_warn(rtk->dev, "RTKit: Unknown oslog message: %llx\n", msg);
+ dev_warn(rtk->dev, "RTKit: Unknown oslog message: %llx\n",
+ msg);
}
}
@@ -588,11 +610,18 @@ int apple_rtkit_send_message(struct apple_rtkit *rtk, u8 ep, u64 message,
.msg1 = ep,
};
- if (rtk->crashed)
+ if (rtk->crashed) {
+ dev_warn(rtk->dev,
+ "RTKit: Device is crashed, cannot send message\n");
return -EINVAL;
+ }
+
if (ep >= APPLE_RTKIT_APP_ENDPOINT_START &&
- !apple_rtkit_is_running(rtk))
+ !apple_rtkit_is_running(rtk)) {
+ dev_warn(rtk->dev,
+ "RTKit: Endpoint 0x%02x is not running, cannot send message\n", ep);
return -EINVAL;
+ }
/*
* The message will be sent with a MMIO write. We need the barrier
@@ -667,7 +696,7 @@ struct apple_rtkit *apple_rtkit_init(struct device *dev, void *cookie,
rtk->mbox->rx = apple_rtkit_rx;
rtk->mbox->cookie = rtk;
- rtk->wq = alloc_ordered_workqueue("rtkit-%s", WQ_MEM_RECLAIM,
+ rtk->wq = alloc_ordered_workqueue("rtkit-%s", WQ_HIGHPRI | WQ_MEM_RECLAIM,
dev_name(rtk->dev));
if (!rtk->wq) {
ret = -ENOMEM;
@@ -710,6 +739,7 @@ int apple_rtkit_reinit(struct apple_rtkit *rtk)
apple_rtkit_free_buffer(rtk, &rtk->ioreport_buffer);
apple_rtkit_free_buffer(rtk, &rtk->crashlog_buffer);
+ apple_rtkit_free_buffer(rtk, &rtk->oslog_buffer);
apple_rtkit_free_buffer(rtk, &rtk->syslog_buffer);
kfree(rtk->syslog_msg_buffer);
@@ -742,8 +772,10 @@ static int apple_rtkit_set_ap_power_state(struct apple_rtkit *rtk,
reinit_completion(&rtk->ap_pwr_ack_completion);
msg = FIELD_PREP(APPLE_RTKIT_MGMT_PWR_STATE, state);
- apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_AP_PWR_STATE,
- msg);
+ ret = apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_AP_PWR_STATE,
+ msg);
+ if (ret)
+ return ret;
ret = apple_rtkit_wait_for_completion(&rtk->ap_pwr_ack_completion);
if (ret)
@@ -763,8 +795,10 @@ static int apple_rtkit_set_iop_power_state(struct apple_rtkit *rtk,
reinit_completion(&rtk->iop_pwr_ack_completion);
msg = FIELD_PREP(APPLE_RTKIT_MGMT_PWR_STATE, state);
- apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE,
- msg);
+ ret = apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE,
+ msg);
+ if (ret)
+ return ret;
ret = apple_rtkit_wait_for_completion(&rtk->iop_pwr_ack_completion);
if (ret)
@@ -865,6 +899,7 @@ EXPORT_SYMBOL_GPL(apple_rtkit_quiesce);
int apple_rtkit_wake(struct apple_rtkit *rtk)
{
u64 msg;
+ int ret;
if (apple_rtkit_is_running(rtk))
return -EINVAL;
@@ -875,9 +910,11 @@ int apple_rtkit_wake(struct apple_rtkit *rtk)
* Use open-coded apple_rtkit_set_iop_power_state since apple_rtkit_boot
* will wait for the completion anyway.
*/
- msg = FIELD_PREP(APPLE_RTKIT_MGMT_PWR_STATE, APPLE_RTKIT_PWR_STATE_ON);
- apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE,
- msg);
+ msg = FIELD_PREP(APPLE_RTKIT_MGMT_PWR_STATE, APPLE_RTKIT_PWR_STATE_INIT);
+ ret = apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE,
+ msg);
+ if (ret)
+ return ret;
return apple_rtkit_boot(rtk);
}
@@ -890,6 +927,7 @@ void apple_rtkit_free(struct apple_rtkit *rtk)
apple_rtkit_free_buffer(rtk, &rtk->ioreport_buffer);
apple_rtkit_free_buffer(rtk, &rtk->crashlog_buffer);
+ apple_rtkit_free_buffer(rtk, &rtk->oslog_buffer);
apple_rtkit_free_buffer(rtk, &rtk->syslog_buffer);
kfree(rtk->syslog_msg_buffer);
diff --git a/drivers/soc/atmel/soc.c b/drivers/soc/atmel/soc.c
index 298b542dd1c0..09347bccdb1d 100644
--- a/drivers/soc/atmel/soc.c
+++ b/drivers/soc/atmel/soc.c
@@ -246,6 +246,9 @@ static const struct at91_soc socs[] __initconst = {
"samv70q19", "samv7"),
#endif
#ifdef CONFIG_SOC_SAMA7
+ AT91_SOC(SAMA7D65_CIDR_MATCH, AT91_CIDR_MASK_SAMA7G5,
+ AT91_CIDR_VERSION_MASK_SAMA7G5, SAMA7D65_EXID_MATCH,
+ "sama7d65", "sama7d6"),
AT91_SOC(SAMA7G5_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
AT91_CIDR_VERSION_MASK_SAMA7G5, SAMA7G51_EXID_MATCH,
"sama7g51", "sama7g5"),
@@ -305,6 +308,7 @@ static int __init at91_get_cidr_exid_from_chipid(u32 *cidr, u32 *exid)
void __iomem *regs;
static const struct of_device_id chipids[] = {
{ .compatible = "atmel,sama5d2-chipid" },
+ { .compatible = "microchip,sama7d65-chipid" },
{ .compatible = "microchip,sama7g5-chipid" },
{ },
};
@@ -393,6 +397,7 @@ static const struct of_device_id at91_soc_allowed_list[] __initconst = {
{ .compatible = "atmel,at91sam9", },
{ .compatible = "atmel,sama5", },
{ .compatible = "atmel,samv7", },
+ { .compatible = "microchip,sama7d65", },
{ .compatible = "microchip,sama7g5", },
{ }
};
diff --git a/drivers/soc/atmel/soc.h b/drivers/soc/atmel/soc.h
index 2c78e54255f7..66a74017d9a3 100644
--- a/drivers/soc/atmel/soc.h
+++ b/drivers/soc/atmel/soc.h
@@ -45,6 +45,7 @@ at91_soc_init(const struct at91_soc *socs);
#define AT91SAM9N12_CIDR_MATCH 0x019a07a0
#define SAM9X60_CIDR_MATCH 0x019b35a0
#define SAM9X7_CIDR_MATCH 0x09750020
+#define SAMA7D65_CIDR_MATCH 0x00262100
#define SAMA7G5_CIDR_MATCH 0x00162100
#define AT91SAM9M11_EXID_MATCH 0x00000001
@@ -75,6 +76,8 @@ at91_soc_init(const struct at91_soc *socs);
#define SAM9X75_D5M_EXID_MATCH 0x00000010
#define SAM9X75_EXID_MATCH 0x00000000
+#define SAMA7D65_EXID_MATCH 0x00000080
+
#define SAMA7G51_EXID_MATCH 0x3
#define SAMA7G52_EXID_MATCH 0x2
#define SAMA7G53_EXID_MATCH 0x1
diff --git a/drivers/soc/mediatek/mt8167-mmsys.h b/drivers/soc/mediatek/mt8167-mmsys.h
index f7a35b3656bb..c468926561b4 100644
--- a/drivers/soc/mediatek/mt8167-mmsys.h
+++ b/drivers/soc/mediatek/mt8167-mmsys.h
@@ -14,22 +14,21 @@
#define MT8167_DSI0_SEL_IN_RDMA0 0x1
static const struct mtk_mmsys_routes mt8167_mmsys_routing_table[] = {
- {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_COLOR0,
- MT8167_DISP_REG_CONFIG_DISP_OVL0_MOUT_EN, OVL0_MOUT_EN_COLOR0,
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_RDMA0,
- MT8167_DISP_REG_CONFIG_DISP_DITHER_MOUT_EN, MT8167_DITHER_MOUT_EN_RDMA0
- }, {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_COLOR0,
- MT8167_DISP_REG_CONFIG_DISP_COLOR0_SEL_IN, COLOR0_SEL_IN_OVL0
- }, {
- DDP_COMPONENT_RDMA0, DDP_COMPONENT_DSI0,
- MT8167_DISP_REG_CONFIG_DISP_DSI0_SEL_IN, MT8167_DSI0_SEL_IN_RDMA0
- }, {
- DDP_COMPONENT_RDMA0, DDP_COMPONENT_DSI0,
- MT8167_DISP_REG_CONFIG_DISP_RDMA0_SOUT_SEL_IN, MT8167_RDMA0_SOUT_DSI0
- },
+ MMSYS_ROUTE(OVL0, COLOR0,
+ MT8167_DISP_REG_CONFIG_DISP_OVL0_MOUT_EN, OVL0_MOUT_EN_COLOR0,
+ OVL0_MOUT_EN_COLOR0),
+ MMSYS_ROUTE(DITHER0, RDMA0,
+ MT8167_DISP_REG_CONFIG_DISP_DITHER_MOUT_EN, MT8167_DITHER_MOUT_EN_RDMA0,
+ MT8167_DITHER_MOUT_EN_RDMA0),
+ MMSYS_ROUTE(OVL0, COLOR0,
+ MT8167_DISP_REG_CONFIG_DISP_COLOR0_SEL_IN, COLOR0_SEL_IN_OVL0,
+ COLOR0_SEL_IN_OVL0),
+ MMSYS_ROUTE(RDMA0, DSI0,
+ MT8167_DISP_REG_CONFIG_DISP_DSI0_SEL_IN, MT8167_DSI0_SEL_IN_RDMA0,
+ MT8167_DSI0_SEL_IN_RDMA0),
+ MMSYS_ROUTE(RDMA0, DSI0,
+ MT8167_DISP_REG_CONFIG_DISP_RDMA0_SOUT_SEL_IN, MT8167_RDMA0_SOUT_DSI0,
+ MT8167_RDMA0_SOUT_DSI0),
};
#endif /* __SOC_MEDIATEK_MT8167_MMSYS_H */
diff --git a/drivers/soc/mediatek/mt8173-mmsys.h b/drivers/soc/mediatek/mt8173-mmsys.h
index 9d24e381271e..957876d7c166 100644
--- a/drivers/soc/mediatek/mt8173-mmsys.h
+++ b/drivers/soc/mediatek/mt8173-mmsys.h
@@ -33,63 +33,48 @@
#define MT8173_RDMA0_SOUT_COLOR0 BIT(0)
static const struct mtk_mmsys_routes mt8173_mmsys_routing_table[] = {
- {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_COLOR0,
- MT8173_DISP_REG_CONFIG_DISP_OVL0_MOUT_EN,
- MT8173_OVL0_MOUT_EN_COLOR0, MT8173_OVL0_MOUT_EN_COLOR0
- }, {
- DDP_COMPONENT_OD0, DDP_COMPONENT_RDMA0,
- MT8173_DISP_REG_CONFIG_DISP_OD_MOUT_EN,
- MT8173_OD0_MOUT_EN_RDMA0, MT8173_OD0_MOUT_EN_RDMA0
- }, {
- DDP_COMPONENT_UFOE, DDP_COMPONENT_DSI0,
- MT8173_DISP_REG_CONFIG_DISP_UFOE_MOUT_EN,
- MT8173_UFOE_MOUT_EN_DSI0, MT8173_UFOE_MOUT_EN_DSI0
- }, {
- DDP_COMPONENT_COLOR0, DDP_COMPONENT_AAL0,
- MT8173_DISP_REG_CONFIG_DISP_COLOR0_SOUT_SEL_IN,
- MT8173_COLOR0_SOUT_MERGE, 0 /* SOUT to AAL */
- }, {
- DDP_COMPONENT_RDMA0, DDP_COMPONENT_UFOE,
- MT8173_DISP_REG_CONFIG_DISP_RDMA0_SOUT_SEL_IN,
- MT8173_RDMA0_SOUT_COLOR0, 0 /* SOUT to UFOE */
- }, {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_COLOR0,
- MT8173_DISP_REG_CONFIG_DISP_COLOR0_SEL_IN,
- MT8173_COLOR0_SEL_IN_OVL0, MT8173_COLOR0_SEL_IN_OVL0
- }, {
- DDP_COMPONENT_AAL0, DDP_COMPONENT_COLOR0,
- MT8173_DISP_REG_CONFIG_DISP_AAL_SEL_IN,
- MT8173_AAL_SEL_IN_MERGE, 0 /* SEL_IN from COLOR0 */
- }, {
- DDP_COMPONENT_RDMA0, DDP_COMPONENT_UFOE,
- MT8173_DISP_REG_CONFIG_DISP_UFOE_SEL_IN,
- MT8173_UFOE_SEL_IN_RDMA0, 0 /* SEL_IN from RDMA0 */
- }, {
- DDP_COMPONENT_UFOE, DDP_COMPONENT_DSI0,
- MT8173_DISP_REG_CONFIG_DSI0_SEL_IN,
- MT8173_DSI0_SEL_IN_UFOE, 0, /* SEL_IN from UFOE */
- }, {
- DDP_COMPONENT_OVL1, DDP_COMPONENT_COLOR1,
- MT8173_DISP_REG_CONFIG_DISP_OVL1_MOUT_EN,
- MT8173_OVL1_MOUT_EN_COLOR1, MT8173_OVL1_MOUT_EN_COLOR1
- }, {
- DDP_COMPONENT_GAMMA, DDP_COMPONENT_RDMA1,
- MT8173_DISP_REG_CONFIG_DISP_GAMMA_MOUT_EN,
- MT8173_GAMMA_MOUT_EN_RDMA1, MT8173_GAMMA_MOUT_EN_RDMA1
- }, {
- DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
- MT8173_DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN,
- RDMA1_SOUT_MASK, RDMA1_SOUT_DPI0
- }, {
- DDP_COMPONENT_OVL1, DDP_COMPONENT_COLOR1,
- MT8173_DISP_REG_CONFIG_DISP_COLOR1_SEL_IN,
- COLOR1_SEL_IN_OVL1, COLOR1_SEL_IN_OVL1
- }, {
- DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
- MT8173_DISP_REG_CONFIG_DPI_SEL_IN,
- MT8173_DPI0_SEL_IN_MASK, MT8173_DPI0_SEL_IN_RDMA1
- }
+ MMSYS_ROUTE(OVL0, COLOR0,
+ MT8173_DISP_REG_CONFIG_DISP_OVL0_MOUT_EN, MT8173_OVL0_MOUT_EN_COLOR0,
+ MT8173_OVL0_MOUT_EN_COLOR0),
+ MMSYS_ROUTE(OD0, RDMA0,
+ MT8173_DISP_REG_CONFIG_DISP_OD_MOUT_EN, MT8173_OD0_MOUT_EN_RDMA0,
+ MT8173_OD0_MOUT_EN_RDMA0),
+ MMSYS_ROUTE(UFOE, DSI0,
+ MT8173_DISP_REG_CONFIG_DISP_UFOE_MOUT_EN, MT8173_UFOE_MOUT_EN_DSI0,
+ MT8173_UFOE_MOUT_EN_DSI0),
+ MMSYS_ROUTE(COLOR0, AAL0,
+ MT8173_DISP_REG_CONFIG_DISP_COLOR0_SOUT_SEL_IN, MT8173_COLOR0_SOUT_MERGE,
+ 0 /* SOUT to AAL */),
+ MMSYS_ROUTE(RDMA0, UFOE,
+ MT8173_DISP_REG_CONFIG_DISP_RDMA0_SOUT_SEL_IN, MT8173_RDMA0_SOUT_COLOR0,
+ 0 /* SOUT to UFOE */),
+ MMSYS_ROUTE(OVL0, COLOR0,
+ MT8173_DISP_REG_CONFIG_DISP_COLOR0_SEL_IN, MT8173_COLOR0_SEL_IN_OVL0,
+ MT8173_COLOR0_SEL_IN_OVL0),
+ MMSYS_ROUTE(AAL0, COLOR0,
+ MT8173_DISP_REG_CONFIG_DISP_AAL_SEL_IN, MT8173_AAL_SEL_IN_MERGE,
+ 0 /* SEL_IN from COLOR0 */),
+ MMSYS_ROUTE(RDMA0, UFOE,
+ MT8173_DISP_REG_CONFIG_DISP_UFOE_SEL_IN, MT8173_UFOE_SEL_IN_RDMA0,
+ 0 /* SEL_IN from RDMA0 */),
+ MMSYS_ROUTE(UFOE, DSI0,
+ MT8173_DISP_REG_CONFIG_DSI0_SEL_IN, MT8173_DSI0_SEL_IN_UFOE,
+ 0 /* SEL_IN from UFOE */),
+ MMSYS_ROUTE(OVL1, COLOR1,
+ MT8173_DISP_REG_CONFIG_DISP_OVL1_MOUT_EN, MT8173_OVL1_MOUT_EN_COLOR1,
+ MT8173_OVL1_MOUT_EN_COLOR1),
+ MMSYS_ROUTE(GAMMA, RDMA1,
+ MT8173_DISP_REG_CONFIG_DISP_GAMMA_MOUT_EN, MT8173_GAMMA_MOUT_EN_RDMA1,
+ MT8173_GAMMA_MOUT_EN_RDMA1),
+ MMSYS_ROUTE(RDMA1, DPI0,
+ MT8173_DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_MASK,
+ RDMA1_SOUT_DPI0),
+ MMSYS_ROUTE(OVL1, COLOR1,
+ MT8173_DISP_REG_CONFIG_DISP_COLOR1_SEL_IN, COLOR1_SEL_IN_OVL1,
+ COLOR1_SEL_IN_OVL1),
+ MMSYS_ROUTE(RDMA1, DPI0,
+ MT8173_DISP_REG_CONFIG_DPI_SEL_IN, MT8173_DPI0_SEL_IN_MASK,
+ MT8173_DPI0_SEL_IN_RDMA1),
};
#endif /* __SOC_MEDIATEK_MT8173_MMSYS_H */
diff --git a/drivers/soc/mediatek/mt8183-mmsys.h b/drivers/soc/mediatek/mt8183-mmsys.h
index ff6be1703469..123384958c4b 100644
--- a/drivers/soc/mediatek/mt8183-mmsys.h
+++ b/drivers/soc/mediatek/mt8183-mmsys.h
@@ -28,35 +28,27 @@
#define MT8183_MMSYS_SW0_RST_B 0x140
static const struct mtk_mmsys_routes mmsys_mt8183_routing_table[] = {
- {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_OVL_2L0,
- MT8183_DISP_OVL0_MOUT_EN, MT8183_OVL0_MOUT_EN_OVL0_2L,
- MT8183_OVL0_MOUT_EN_OVL0_2L
- }, {
- DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA0,
- MT8183_DISP_OVL0_2L_MOUT_EN, MT8183_OVL0_2L_MOUT_EN_DISP_PATH0,
- MT8183_OVL0_2L_MOUT_EN_DISP_PATH0
- }, {
- DDP_COMPONENT_OVL_2L1, DDP_COMPONENT_RDMA1,
- MT8183_DISP_OVL1_2L_MOUT_EN, MT8183_OVL1_2L_MOUT_EN_RDMA1,
- MT8183_OVL1_2L_MOUT_EN_RDMA1
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
- MT8183_DISP_DITHER0_MOUT_EN, MT8183_DITHER0_MOUT_IN_DSI0,
- MT8183_DITHER0_MOUT_IN_DSI0
- }, {
- DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA0,
- MT8183_DISP_PATH0_SEL_IN, MT8183_DISP_PATH0_SEL_IN_OVL0_2L,
- MT8183_DISP_PATH0_SEL_IN_OVL0_2L
- }, {
- DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
- MT8183_DISP_DPI0_SEL_IN, MT8183_DPI0_SEL_IN_RDMA1,
- MT8183_DPI0_SEL_IN_RDMA1
- }, {
- DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0,
- MT8183_DISP_RDMA0_SOUT_SEL_IN, MT8183_RDMA0_SOUT_COLOR0,
- MT8183_RDMA0_SOUT_COLOR0
- }
+ MMSYS_ROUTE(OVL0, OVL_2L0,
+ MT8183_DISP_OVL0_MOUT_EN, MT8183_OVL0_MOUT_EN_OVL0_2L,
+ MT8183_OVL0_MOUT_EN_OVL0_2L),
+ MMSYS_ROUTE(OVL_2L0, RDMA0,
+ MT8183_DISP_OVL0_2L_MOUT_EN, MT8183_OVL0_2L_MOUT_EN_DISP_PATH0,
+ MT8183_OVL0_2L_MOUT_EN_DISP_PATH0),
+ MMSYS_ROUTE(OVL_2L1, RDMA1,
+ MT8183_DISP_OVL1_2L_MOUT_EN, MT8183_OVL1_2L_MOUT_EN_RDMA1,
+ MT8183_OVL1_2L_MOUT_EN_RDMA1),
+ MMSYS_ROUTE(DITHER0, DSI0,
+ MT8183_DISP_DITHER0_MOUT_EN, MT8183_DITHER0_MOUT_IN_DSI0,
+ MT8183_DITHER0_MOUT_IN_DSI0),
+ MMSYS_ROUTE(OVL_2L0, RDMA0,
+ MT8183_DISP_PATH0_SEL_IN, MT8183_DISP_PATH0_SEL_IN_OVL0_2L,
+ MT8183_DISP_PATH0_SEL_IN_OVL0_2L),
+ MMSYS_ROUTE(RDMA1, DPI0,
+ MT8183_DISP_DPI0_SEL_IN, MT8183_DPI0_SEL_IN_RDMA1,
+ MT8183_DPI0_SEL_IN_RDMA1),
+ MMSYS_ROUTE(RDMA0, COLOR0,
+ MT8183_DISP_RDMA0_SOUT_SEL_IN, MT8183_RDMA0_SOUT_COLOR0,
+ MT8183_RDMA0_SOUT_COLOR0),
};
#endif /* __SOC_MEDIATEK_MT8183_MMSYS_H */
diff --git a/drivers/soc/mediatek/mt8186-mmsys.h b/drivers/soc/mediatek/mt8186-mmsys.h
index 279d4138525b..354664be72bd 100644
--- a/drivers/soc/mediatek/mt8186-mmsys.h
+++ b/drivers/soc/mediatek/mt8186-mmsys.h
@@ -63,61 +63,39 @@
#define MT8186_MMSYS_SW0_RST_B 0x160
static const struct mtk_mmsys_routes mmsys_mt8186_routing_table[] = {
- {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
- MT8186_DISP_OVL0_MOUT_EN, MT8186_OVL0_MOUT_EN_MASK,
- MT8186_OVL0_MOUT_TO_RDMA0
- },
- {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
- MT8186_DISP_RDMA0_SEL_IN, MT8186_RDMA0_SEL_IN_MASK,
- MT8186_RDMA0_FROM_OVL0
- },
- {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
- MT8186_MMSYS_OVL_CON, MT8186_MMSYS_OVL0_CON_MASK,
- MT8186_OVL0_GO_BLEND
- },
- {
- DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0,
- MT8186_DISP_RDMA0_SOUT_SEL, MT8186_RDMA0_SOUT_SEL_MASK,
- MT8186_RDMA0_SOUT_TO_COLOR0
- },
- {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
- MT8186_DISP_DITHER0_MOUT_EN, MT8186_DITHER0_MOUT_EN_MASK,
- MT8186_DITHER0_MOUT_TO_DSI0,
- },
- {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
- MT8186_DISP_DSI0_SEL_IN, MT8186_DSI0_SEL_IN_MASK,
- MT8186_DSI0_FROM_DITHER0
- },
- {
- DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA1,
- MT8186_DISP_OVL0_2L_MOUT_EN, MT8186_OVL0_2L_MOUT_EN_MASK,
- MT8186_OVL0_2L_MOUT_TO_RDMA1
- },
- {
- DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA1,
- MT8186_DISP_RDMA1_SEL_IN, MT8186_RDMA1_SEL_IN_MASK,
- MT8186_RDMA1_FROM_OVL0_2L
- },
- {
- DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA1,
- MT8186_MMSYS_OVL_CON, MT8186_MMSYS_OVL0_2L_CON_MASK,
- MT8186_OVL0_2L_GO_BLEND
- },
- {
- DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
- MT8186_DISP_RDMA1_MOUT_EN, MT8186_RDMA1_MOUT_EN_MASK,
- MT8186_RDMA1_MOUT_TO_DPI0_SEL
- },
- {
- DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
- MT8186_DISP_DPI0_SEL_IN, MT8186_DPI0_SEL_IN_MASK,
- MT8186_DPI0_FROM_RDMA1
- },
+ MMSYS_ROUTE(OVL0, RDMA0,
+ MT8186_DISP_OVL0_MOUT_EN, MT8186_OVL0_MOUT_EN_MASK,
+ MT8186_OVL0_MOUT_TO_RDMA0),
+ MMSYS_ROUTE(OVL0, RDMA0,
+ MT8186_DISP_RDMA0_SEL_IN, MT8186_RDMA0_SEL_IN_MASK,
+ MT8186_RDMA0_FROM_OVL0),
+ MMSYS_ROUTE(OVL0, RDMA0,
+ MT8186_MMSYS_OVL_CON, MT8186_MMSYS_OVL0_CON_MASK,
+ MT8186_OVL0_GO_BLEND),
+ MMSYS_ROUTE(RDMA0, COLOR0,
+ MT8186_DISP_RDMA0_SOUT_SEL, MT8186_RDMA0_SOUT_SEL_MASK,
+ MT8186_RDMA0_SOUT_TO_COLOR0),
+ MMSYS_ROUTE(DITHER0, DSI0,
+ MT8186_DISP_DITHER0_MOUT_EN, MT8186_DITHER0_MOUT_EN_MASK,
+ MT8186_DITHER0_MOUT_TO_DSI0),
+ MMSYS_ROUTE(DITHER0, DSI0,
+ MT8186_DISP_DSI0_SEL_IN, MT8186_DSI0_SEL_IN_MASK,
+ MT8186_DSI0_FROM_DITHER0),
+ MMSYS_ROUTE(OVL_2L0, RDMA1,
+ MT8186_DISP_OVL0_2L_MOUT_EN, MT8186_OVL0_2L_MOUT_EN_MASK,
+ MT8186_OVL0_2L_MOUT_TO_RDMA1),
+ MMSYS_ROUTE(OVL_2L0, RDMA1,
+ MT8186_DISP_RDMA1_SEL_IN, MT8186_RDMA1_SEL_IN_MASK,
+ MT8186_RDMA1_FROM_OVL0_2L),
+ MMSYS_ROUTE(OVL_2L0, RDMA1,
+ MT8186_MMSYS_OVL_CON, MT8186_MMSYS_OVL0_2L_CON_MASK,
+ MT8186_OVL0_2L_GO_BLEND),
+ MMSYS_ROUTE(RDMA1, DPI0,
+ MT8186_DISP_RDMA1_MOUT_EN, MT8186_RDMA1_MOUT_EN_MASK,
+ MT8186_RDMA1_MOUT_TO_DPI0_SEL),
+ MMSYS_ROUTE(RDMA1, DPI0,
+ MT8186_DISP_DPI0_SEL_IN, MT8186_DPI0_SEL_IN_MASK,
+ MT8186_DPI0_FROM_RDMA1),
};
#endif /* __SOC_MEDIATEK_MT8186_MMSYS_H */
diff --git a/drivers/soc/mediatek/mt8188-mmsys.h b/drivers/soc/mediatek/mt8188-mmsys.h
index 6bebf1a69fc0..99080afead7e 100644
--- a/drivers/soc/mediatek/mt8188-mmsys.h
+++ b/drivers/soc/mediatek/mt8188-mmsys.h
@@ -202,158 +202,126 @@ static const u8 mmsys_mt8188_vdo1_rst_tb[] = {
};
static const struct mtk_mmsys_routes mmsys_mt8188_routing_table[] = {
- {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
- MT8188_VDO0_OVL_MOUT_EN, MT8188_MOUT_DISP_OVL0_TO_DISP_RDMA0,
- MT8188_MOUT_DISP_OVL0_TO_DISP_RDMA0
- }, {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_WDMA0,
- MT8188_VDO0_OVL_MOUT_EN, MT8188_MOUT_DISP_OVL0_TO_DISP_WDMA0,
- MT8188_MOUT_DISP_OVL0_TO_DISP_WDMA0
- }, {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
- MT8188_VDO0_DISP_RDMA_SEL, MT8188_SEL_IN_DISP_RDMA0_FROM_MASK,
- MT8188_SEL_IN_DISP_RDMA0_FROM_DISP_OVL0
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
- MT8188_VDO0_DSI0_SEL_IN, MT8188_SEL_IN_DSI0_FROM_MASK,
- MT8188_SEL_IN_DSI0_FROM_DISP_DITHER0
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_MERGE0,
- MT8188_VDO0_VPP_MERGE_SEL, MT8188_SEL_IN_VPP_MERGE_FROM_MASK,
- MT8188_SEL_IN_VPP_MERGE_FROM_DITHER0_OUT
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSC0,
- MT8188_VDO0_DSC_WARP_SEL,
- MT8188_SEL_IN_DSC_WRAP0C0_IN_FROM_MASK,
- MT8188_SEL_IN_DSC_WRAP0C0_IN_FROM_DISP_DITHER0
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DP_INTF0,
- MT8188_VDO0_DP_INTF0_SEL_IN, MT8188_SEL_IN_DP_INTF0_FROM_MASK,
- MT8188_SEL_IN_DP_INTF0_FROM_DISP_DITHER0
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_MERGE0,
- MT8188_VDO0_VPP_MERGE_SEL, MT8188_SEL_IN_VPP_MERGE_FROM_MASK,
- MT8188_SEL_IN_VPP_MERGE_FROM_DSC_WRAP0_OUT
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_DSI0,
- MT8188_VDO0_DSI0_SEL_IN, MT8188_SEL_IN_DSI0_FROM_MASK,
- MT8188_SEL_IN_DSI0_FROM_DSC_WRAP0_OUT
- }, {
- DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0,
- MT8188_VDO0_DISP_RDMA_SEL, MT8188_SOUT_DISP_RDMA0_TO_MASK,
- MT8188_SOUT_DISP_RDMA0_TO_DISP_COLOR0
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
- MT8188_VDO0_DISP_DITHER0_SEL_OUT,
- MT8188_SOUT_DISP_DITHER0_TO_MASK,
- MT8188_SOUT_DISP_DITHER0_TO_DSI0
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DP_INTF0,
- MT8188_VDO0_DISP_DITHER0_SEL_OUT,
- MT8188_SOUT_DISP_DITHER0_TO_MASK,
- MT8188_SOUT_DISP_DITHER0_TO_DP_INTF0
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DP_INTF0,
- MT8188_VDO0_VPP_MERGE_SEL, MT8188_SOUT_VPP_MERGE_TO_MASK,
- MT8188_SOUT_VPP_MERGE_TO_DP_INTF0
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DPI0,
- MT8188_VDO0_VPP_MERGE_SEL, MT8188_SOUT_VPP_MERGE_TO_MASK,
- MT8188_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_WDMA0,
- MT8188_VDO0_VPP_MERGE_SEL, MT8188_SOUT_VPP_MERGE_TO_MASK,
- MT8188_SOUT_VPP_MERGE_TO_DISP_WDMA0
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSC0,
- MT8188_VDO0_VPP_MERGE_SEL, MT8188_SOUT_VPP_MERGE_TO_MASK,
- MT8188_SOUT_VPP_MERGE_TO_DSC_WRAP0_IN
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_DSI0,
- MT8188_VDO0_DSC_WARP_SEL, MT8188_SOUT_DSC_WRAP0_OUT_TO_MASK,
- MT8188_SOUT_DSC_WRAP0_OUT_TO_DSI0
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_MERGE0,
- MT8188_VDO0_DSC_WARP_SEL, MT8188_SOUT_DSC_WRAP0_OUT_TO_MASK,
- MT8188_SOUT_DSC_WRAP0_OUT_TO_VPP_MERGE
- },
+ MMSYS_ROUTE(OVL0, RDMA0,
+ MT8188_VDO0_OVL_MOUT_EN, MT8188_MOUT_DISP_OVL0_TO_DISP_RDMA0,
+ MT8188_MOUT_DISP_OVL0_TO_DISP_RDMA0),
+ MMSYS_ROUTE(OVL0, WDMA0,
+ MT8188_VDO0_OVL_MOUT_EN, MT8188_MOUT_DISP_OVL0_TO_DISP_WDMA0,
+ MT8188_MOUT_DISP_OVL0_TO_DISP_WDMA0),
+ MMSYS_ROUTE(OVL0, RDMA0,
+ MT8188_VDO0_DISP_RDMA_SEL, MT8188_SEL_IN_DISP_RDMA0_FROM_MASK,
+ MT8188_SEL_IN_DISP_RDMA0_FROM_DISP_OVL0),
+ MMSYS_ROUTE(DITHER0, DSI0,
+ MT8188_VDO0_DSI0_SEL_IN, MT8188_SEL_IN_DSI0_FROM_MASK,
+ MT8188_SEL_IN_DSI0_FROM_DISP_DITHER0),
+ MMSYS_ROUTE(DITHER0, MERGE0,
+ MT8188_VDO0_VPP_MERGE_SEL, MT8188_SEL_IN_VPP_MERGE_FROM_MASK,
+ MT8188_SEL_IN_DP_INTF0_FROM_DISP_DITHER0),
+ MMSYS_ROUTE(DITHER0, DSC0,
+ MT8188_VDO0_DSC_WARP_SEL, MT8188_SEL_IN_DSC_WRAP0C0_IN_FROM_MASK,
+ MT8188_SEL_IN_DSC_WRAP0C0_IN_FROM_DISP_DITHER0),
+ MMSYS_ROUTE(DITHER0, DP_INTF0,
+ MT8188_VDO0_DP_INTF0_SEL_IN, MT8188_SEL_IN_DP_INTF0_FROM_MASK,
+ MT8188_SEL_IN_DP_INTF0_FROM_DISP_DITHER0),
+ MMSYS_ROUTE(DSC0, MERGE0,
+ MT8188_VDO0_VPP_MERGE_SEL, MT8188_SEL_IN_VPP_MERGE_FROM_MASK,
+ MT8188_SEL_IN_VPP_MERGE_FROM_DSC_WRAP0_OUT),
+ MMSYS_ROUTE(MERGE0, DP_INTF0,
+ MT8188_VDO0_DP_INTF0_SEL_IN, MT8188_SEL_IN_DP_INTF0_FROM_MASK,
+ MT8188_SEL_IN_DP_INTF0_FROM_VPP_MERGE),
+ MMSYS_ROUTE(DSC0, DSI0,
+ MT8188_VDO0_DSI0_SEL_IN, MT8188_SEL_IN_DSI0_FROM_MASK,
+ MT8188_SEL_IN_DSI0_FROM_DSC_WRAP0_OUT),
+ MMSYS_ROUTE(RDMA0, COLOR0,
+ MT8188_VDO0_DISP_RDMA_SEL, GENMASK(1, 0),
+ MT8188_SOUT_DISP_RDMA0_TO_DISP_COLOR0),
+ MMSYS_ROUTE(DITHER0, DSC0,
+ MT8188_VDO0_DISP_DITHER0_SEL_OUT, MT8188_SOUT_DISP_DITHER0_TO_MASK,
+ MT8188_SOUT_DISP_DITHER0_TO_DSC_WRAP0_IN),
+ MMSYS_ROUTE(DITHER0, DSI0,
+ MT8188_VDO0_DISP_DITHER0_SEL_OUT, MT8188_SOUT_DISP_DITHER0_TO_MASK,
+ MT8188_SOUT_DISP_DITHER0_TO_DSI0),
+ MMSYS_ROUTE(DITHER0, MERGE0,
+ MT8188_VDO0_DISP_DITHER0_SEL_OUT, MT8188_SOUT_DISP_DITHER0_TO_MASK,
+ MT8188_SOUT_DISP_DITHER0_TO_VPP_MERGE0),
+ MMSYS_ROUTE(DITHER0, DP_INTF0,
+ MT8188_VDO0_DISP_DITHER0_SEL_OUT, MT8188_SOUT_DISP_DITHER0_TO_MASK,
+ MT8188_SOUT_DISP_DITHER0_TO_DP_INTF0),
+ MMSYS_ROUTE(MERGE0, DP_INTF0,
+ MT8188_VDO0_VPP_MERGE_SEL, MT8188_SOUT_VPP_MERGE_TO_MASK,
+ MT8188_SOUT_VPP_MERGE_TO_DP_INTF0),
+ MMSYS_ROUTE(MERGE0, DPI0,
+ MT8188_VDO0_VPP_MERGE_SEL, MT8188_SOUT_VPP_MERGE_TO_MASK,
+ MT8188_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0),
+ MMSYS_ROUTE(MERGE0, WDMA0,
+ MT8188_VDO0_VPP_MERGE_SEL, MT8188_SOUT_VPP_MERGE_TO_MASK,
+ MT8188_SOUT_VPP_MERGE_TO_DISP_WDMA0),
+ MMSYS_ROUTE(MERGE0, DSC0,
+ MT8188_VDO0_VPP_MERGE_SEL, MT8188_SOUT_VPP_MERGE_TO_MASK,
+ MT8188_SOUT_VPP_MERGE_TO_DSC_WRAP0_IN),
+ MMSYS_ROUTE(DSC0, DSI0,
+ MT8188_VDO0_DSC_WARP_SEL, MT8188_SOUT_DSC_WRAP0_OUT_TO_MASK,
+ MT8188_SOUT_DSC_WRAP0_OUT_TO_DSI0),
+ MMSYS_ROUTE(DSC0, MERGE0,
+ MT8188_VDO0_DSC_WARP_SEL, MT8188_SOUT_DSC_WRAP0_OUT_TO_MASK,
+ MT8188_SOUT_DSC_WRAP0_OUT_TO_VPP_MERGE),
};
static const struct mtk_mmsys_routes mmsys_mt8188_vdo1_routing_table[] = {
- {
- DDP_COMPONENT_MDP_RDMA0, DDP_COMPONENT_MERGE1,
- MT8188_VDO1_VPP_MERGE0_P0_SEL_IN, GENMASK(0, 0),
- MT8188_VPP_MERGE0_P0_SEL_IN_FROM_MDP_RDMA0
- }, {
- DDP_COMPONENT_MDP_RDMA1, DDP_COMPONENT_MERGE1,
- MT8188_VDO1_VPP_MERGE0_P1_SEL_IN, GENMASK(0, 0),
- MT8188_VPP_MERGE0_P1_SEL_IN_FROM_MDP_RDMA1
- }, {
- DDP_COMPONENT_MDP_RDMA2, DDP_COMPONENT_MERGE2,
- MT8188_VDO1_VPP_MERGE1_P0_SEL_IN, GENMASK(0, 0),
- MT8188_VPP_MERGE1_P0_SEL_IN_FROM_MDP_RDMA2
- }, {
- DDP_COMPONENT_MERGE1, DDP_COMPONENT_ETHDR_MIXER,
- MT8188_VDO1_MERGE0_ASYNC_SOUT_SEL, GENMASK(1, 0),
- MT8188_SOUT_TO_MIXER_IN1_SEL
- }, {
- DDP_COMPONENT_MERGE2, DDP_COMPONENT_ETHDR_MIXER,
- MT8188_VDO1_MERGE1_ASYNC_SOUT_SEL, GENMASK(1, 0),
- MT8188_SOUT_TO_MIXER_IN2_SEL
- }, {
- DDP_COMPONENT_MERGE3, DDP_COMPONENT_ETHDR_MIXER,
- MT8188_VDO1_MERGE2_ASYNC_SOUT_SEL, GENMASK(1, 0),
- MT8188_SOUT_TO_MIXER_IN3_SEL
- }, {
- DDP_COMPONENT_MERGE4, DDP_COMPONENT_ETHDR_MIXER,
- MT8188_VDO1_MERGE3_ASYNC_SOUT_SEL, GENMASK(1, 0),
- MT8188_SOUT_TO_MIXER_IN4_SEL
- }, {
- DDP_COMPONENT_ETHDR_MIXER, DDP_COMPONENT_MERGE5,
- MT8188_VDO1_MIXER_OUT_SOUT_SEL, GENMASK(0, 0),
- MT8188_MIXER_SOUT_TO_MERGE4_ASYNC_SEL
- }, {
- DDP_COMPONENT_MERGE1, DDP_COMPONENT_ETHDR_MIXER,
- MT8188_VDO1_MIXER_IN1_SEL_IN, GENMASK(0, 0),
- MT8188_MIXER_IN1_SEL_IN_FROM_MERGE0_ASYNC_SOUT
- }, {
- DDP_COMPONENT_MERGE2, DDP_COMPONENT_ETHDR_MIXER,
- MT8188_VDO1_MIXER_IN2_SEL_IN, GENMASK(0, 0),
- MT8188_MIXER_IN2_SEL_IN_FROM_MERGE1_ASYNC_SOUT
- }, {
- DDP_COMPONENT_MERGE3, DDP_COMPONENT_ETHDR_MIXER,
- MT8188_VDO1_MIXER_IN3_SEL_IN, GENMASK(0, 0),
- MT8188_MIXER_IN3_SEL_IN_FROM_MERGE2_ASYNC_SOUT
- }, {
- DDP_COMPONENT_MERGE4, DDP_COMPONENT_ETHDR_MIXER,
- MT8188_VDO1_MIXER_IN4_SEL_IN, GENMASK(0, 0),
- MT8188_MIXER_IN4_SEL_IN_FROM_MERGE3_ASYNC_SOUT
- }, {
- DDP_COMPONENT_ETHDR_MIXER, DDP_COMPONENT_MERGE5,
- MT8188_VDO1_MIXER_SOUT_SEL_IN, GENMASK(2, 0),
- MT8188_MIXER_SOUT_SEL_IN_FROM_DISP_MIXER
- }, {
- DDP_COMPONENT_ETHDR_MIXER, DDP_COMPONENT_MERGE5,
- MT8188_VDO1_MERGE4_ASYNC_SEL_IN, GENMASK(2, 0),
- MT8188_MERGE4_ASYNC_SEL_IN_FROM_MIXER_OUT_SOUT
- }, {
- DDP_COMPONENT_MERGE5, DDP_COMPONENT_DPI1,
- MT8188_VDO1_DISP_DPI1_SEL_IN, GENMASK(1, 0),
- MT8188_DISP_DPI1_SEL_IN_FROM_VPP_MERGE4_MOUT
- }, {
- DDP_COMPONENT_MERGE5, DDP_COMPONENT_DPI1,
- MT8188_VDO1_MERGE4_SOUT_SEL, GENMASK(1, 0),
- MT8188_MERGE4_SOUT_TO_DPI1_SEL
- }, {
- DDP_COMPONENT_MERGE5, DDP_COMPONENT_DP_INTF1,
- MT8188_VDO1_DISP_DP_INTF0_SEL_IN, GENMASK(1, 0),
- MT8188_DISP_DP_INTF0_SEL_IN_FROM_VPP_MERGE4_MOUT
- }, {
- DDP_COMPONENT_MERGE5, DDP_COMPONENT_DP_INTF1,
- MT8188_VDO1_MERGE4_SOUT_SEL, GENMASK(3, 0),
- MT8188_MERGE4_SOUT_TO_DP_INTF0_SEL
- }
+ MMSYS_ROUTE(MDP_RDMA0, MERGE1,
+ MT8188_VDO1_VPP_MERGE0_P0_SEL_IN, GENMASK(0, 0),
+ MT8188_VPP_MERGE0_P0_SEL_IN_FROM_MDP_RDMA0),
+ MMSYS_ROUTE(MDP_RDMA1, MERGE1,
+ MT8188_VDO1_VPP_MERGE0_P1_SEL_IN, GENMASK(0, 0),
+ MT8188_VPP_MERGE0_P1_SEL_IN_FROM_MDP_RDMA1),
+ MMSYS_ROUTE(MDP_RDMA2, MERGE2,
+ MT8188_VDO1_VPP_MERGE1_P0_SEL_IN, GENMASK(0, 0),
+ MT8188_VPP_MERGE1_P0_SEL_IN_FROM_MDP_RDMA2),
+ MMSYS_ROUTE(MERGE1, ETHDR_MIXER,
+ MT8188_VDO1_MERGE0_ASYNC_SOUT_SEL, GENMASK(1, 0),
+ MT8188_SOUT_TO_MIXER_IN1_SEL),
+ MMSYS_ROUTE(MERGE2, ETHDR_MIXER,
+ MT8188_VDO1_MERGE1_ASYNC_SOUT_SEL, GENMASK(1, 0),
+ MT8188_SOUT_TO_MIXER_IN2_SEL),
+ MMSYS_ROUTE(MERGE3, ETHDR_MIXER,
+ MT8188_VDO1_MERGE2_ASYNC_SOUT_SEL, GENMASK(1, 0),
+ MT8188_SOUT_TO_MIXER_IN3_SEL),
+ MMSYS_ROUTE(MERGE4, ETHDR_MIXER,
+ MT8188_VDO1_MERGE3_ASYNC_SOUT_SEL, GENMASK(1, 0),
+ MT8188_SOUT_TO_MIXER_IN4_SEL),
+ MMSYS_ROUTE(ETHDR_MIXER, MERGE5,
+ MT8188_VDO1_MIXER_OUT_SOUT_SEL, GENMASK(0, 0),
+ MT8188_MIXER_SOUT_TO_MERGE4_ASYNC_SEL),
+ MMSYS_ROUTE(MERGE1, ETHDR_MIXER,
+ MT8188_VDO1_MIXER_IN1_SEL_IN, GENMASK(0, 0),
+ MT8188_MIXER_IN1_SEL_IN_FROM_MERGE0_ASYNC_SOUT),
+ MMSYS_ROUTE(MERGE2, ETHDR_MIXER,
+ MT8188_VDO1_MIXER_IN2_SEL_IN, GENMASK(0, 0),
+ MT8188_MIXER_IN2_SEL_IN_FROM_MERGE1_ASYNC_SOUT),
+ MMSYS_ROUTE(MERGE3, ETHDR_MIXER,
+ MT8188_VDO1_MIXER_IN3_SEL_IN, GENMASK(0, 0),
+ MT8188_MIXER_IN3_SEL_IN_FROM_MERGE2_ASYNC_SOUT),
+ MMSYS_ROUTE(MERGE4, ETHDR_MIXER,
+ MT8188_VDO1_MIXER_IN4_SEL_IN, GENMASK(0, 0),
+ MT8188_MIXER_IN4_SEL_IN_FROM_MERGE3_ASYNC_SOUT),
+ MMSYS_ROUTE(ETHDR_MIXER, MERGE5,
+ MT8188_VDO1_MIXER_SOUT_SEL_IN, GENMASK(2, 0),
+ MT8188_MIXER_SOUT_SEL_IN_FROM_DISP_MIXER),
+ MMSYS_ROUTE(ETHDR_MIXER, MERGE5,
+ MT8188_VDO1_MERGE4_ASYNC_SEL_IN, GENMASK(2, 0),
+ MT8188_MERGE4_ASYNC_SEL_IN_FROM_MIXER_OUT_SOUT),
+ MMSYS_ROUTE(MERGE5, DPI1,
+ MT8188_VDO1_DISP_DPI1_SEL_IN, GENMASK(1, 0),
+ MT8188_DISP_DPI1_SEL_IN_FROM_VPP_MERGE4_MOUT),
+ MMSYS_ROUTE(MERGE5, DPI1,
+ MT8188_VDO1_MERGE4_SOUT_SEL, GENMASK(3, 0),
+ MT8188_MERGE4_SOUT_TO_DPI1_SEL),
+ MMSYS_ROUTE(MERGE5, DP_INTF1,
+ MT8188_VDO1_DISP_DP_INTF0_SEL_IN, GENMASK(1, 0),
+ MT8188_DISP_DP_INTF0_SEL_IN_FROM_VPP_MERGE4_MOUT),
+ MMSYS_ROUTE(MERGE5, DP_INTF1,
+ MT8188_VDO1_MERGE4_SOUT_SEL, GENMASK(3, 0),
+ MT8188_MERGE4_SOUT_TO_DP_INTF0_SEL),
};
#endif /* __SOC_MEDIATEK_MT8188_MMSYS_H */
diff --git a/drivers/soc/mediatek/mt8192-mmsys.h b/drivers/soc/mediatek/mt8192-mmsys.h
index a016d80b4bc1..7cafa2455fd0 100644
--- a/drivers/soc/mediatek/mt8192-mmsys.h
+++ b/drivers/soc/mediatek/mt8192-mmsys.h
@@ -31,47 +31,36 @@
#define MT8192_DSI0_SEL_IN_DITHER0 0x1
static const struct mtk_mmsys_routes mmsys_mt8192_routing_table[] = {
- {
- DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA0,
- MT8192_DISP_OVL0_2L_MOUT_EN, MT8192_OVL0_MOUT_EN_DISP_RDMA0,
- MT8192_OVL0_MOUT_EN_DISP_RDMA0
- }, {
- DDP_COMPONENT_OVL_2L2, DDP_COMPONENT_RDMA4,
- MT8192_DISP_OVL2_2L_MOUT_EN, MT8192_OVL2_2L_MOUT_EN_RDMA4,
- MT8192_OVL2_2L_MOUT_EN_RDMA4
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
- MT8192_DISP_DITHER0_MOUT_EN, MT8192_DITHER0_MOUT_IN_DSI0,
- MT8192_DITHER0_MOUT_IN_DSI0
- }, {
- DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA0,
- MT8192_DISP_RDMA0_SEL_IN, MT8192_RDMA0_SEL_IN_OVL0_2L,
- MT8192_RDMA0_SEL_IN_OVL0_2L
- }, {
- DDP_COMPONENT_CCORR, DDP_COMPONENT_AAL0,
- MT8192_DISP_AAL0_SEL_IN, MT8192_AAL0_SEL_IN_CCORR0,
- MT8192_AAL0_SEL_IN_CCORR0
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
- MT8192_DISP_DSI0_SEL_IN, MT8192_DSI0_SEL_IN_DITHER0,
- MT8192_DSI0_SEL_IN_DITHER0
- }, {
- DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0,
- MT8192_DISP_RDMA0_SOUT_SEL, MT8192_RDMA0_SOUT_COLOR0,
- MT8192_RDMA0_SOUT_COLOR0
- }, {
- DDP_COMPONENT_CCORR, DDP_COMPONENT_AAL0,
- MT8192_DISP_CCORR0_SOUT_SEL, MT8192_CCORR0_SOUT_AAL0,
- MT8192_CCORR0_SOUT_AAL0
- }, {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_OVL_2L0,
- MT8192_MMSYS_OVL_MOUT_EN, MT8192_DISP_OVL0_GO_BG,
- MT8192_DISP_OVL0_GO_BG
- }, {
- DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA0,
- MT8192_MMSYS_OVL_MOUT_EN, MT8192_DISP_OVL0_2L_GO_BLEND,
- MT8192_DISP_OVL0_2L_GO_BLEND
- }
+ MMSYS_ROUTE(OVL_2L0, RDMA0,
+ MT8192_DISP_OVL0_2L_MOUT_EN, MT8192_OVL0_MOUT_EN_DISP_RDMA0,
+ MT8192_OVL0_MOUT_EN_DISP_RDMA0),
+ MMSYS_ROUTE(OVL_2L2, RDMA4,
+ MT8192_DISP_OVL2_2L_MOUT_EN, MT8192_OVL2_2L_MOUT_EN_RDMA4,
+ MT8192_OVL2_2L_MOUT_EN_RDMA4),
+ MMSYS_ROUTE(DITHER0, DSI0,
+ MT8192_DISP_DITHER0_MOUT_EN, MT8192_DITHER0_MOUT_IN_DSI0,
+ MT8192_DITHER0_MOUT_IN_DSI0),
+ MMSYS_ROUTE(OVL_2L0, RDMA0,
+ MT8192_DISP_RDMA0_SEL_IN, MT8192_RDMA0_SEL_IN_OVL0_2L,
+ MT8192_RDMA0_SEL_IN_OVL0_2L),
+ MMSYS_ROUTE(CCORR, AAL0,
+ MT8192_DISP_AAL0_SEL_IN, MT8192_AAL0_SEL_IN_CCORR0,
+ MT8192_AAL0_SEL_IN_CCORR0),
+ MMSYS_ROUTE(DITHER0, DSI0,
+ MT8192_DISP_DSI0_SEL_IN, MT8192_DSI0_SEL_IN_DITHER0,
+ MT8192_DSI0_SEL_IN_DITHER0),
+ MMSYS_ROUTE(RDMA0, COLOR0,
+ MT8192_DISP_RDMA0_SOUT_SEL, MT8192_RDMA0_SOUT_COLOR0,
+ MT8192_RDMA0_SOUT_COLOR0),
+ MMSYS_ROUTE(CCORR, AAL0,
+ MT8192_DISP_CCORR0_SOUT_SEL, MT8192_CCORR0_SOUT_AAL0,
+ MT8192_CCORR0_SOUT_AAL0),
+ MMSYS_ROUTE(OVL0, OVL_2L0,
+ MT8192_MMSYS_OVL_MOUT_EN, MT8192_DISP_OVL0_GO_BG,
+ MT8192_DISP_OVL0_GO_BG),
+ MMSYS_ROUTE(OVL_2L0, RDMA0,
+ MT8192_MMSYS_OVL_MOUT_EN, MT8192_DISP_OVL0_2L_GO_BLEND,
+ MT8192_DISP_OVL0_2L_GO_BLEND),
};
#endif /* __SOC_MEDIATEK_MT8192_MMSYS_H */
diff --git a/drivers/soc/mediatek/mt8195-mmsys.h b/drivers/soc/mediatek/mt8195-mmsys.h
index 9be2df2832a4..f69929a2a4d4 100644
--- a/drivers/soc/mediatek/mt8195-mmsys.h
+++ b/drivers/soc/mediatek/mt8195-mmsys.h
@@ -160,370 +160,278 @@
#define MT8195_SVPP3_MDP_RSZ BIT(5)
static const struct mtk_mmsys_routes mmsys_mt8195_routing_table[] = {
- {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
- MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL0_TO_DISP_RDMA0,
- MT8195_MOUT_DISP_OVL0_TO_DISP_RDMA0
- }, {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_WDMA0,
- MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL0_TO_DISP_WDMA0,
- MT8195_MOUT_DISP_OVL0_TO_DISP_WDMA0
- }, {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_OVL1,
- MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL0_TO_DISP_OVL1,
- MT8195_MOUT_DISP_OVL0_TO_DISP_OVL1
- }, {
- DDP_COMPONENT_OVL1, DDP_COMPONENT_RDMA1,
- MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL1_TO_DISP_RDMA1,
- MT8195_MOUT_DISP_OVL1_TO_DISP_RDMA1
- }, {
- DDP_COMPONENT_OVL1, DDP_COMPONENT_WDMA1,
- MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL1_TO_DISP_WDMA1,
- MT8195_MOUT_DISP_OVL1_TO_DISP_WDMA1
- }, {
- DDP_COMPONENT_OVL1, DDP_COMPONENT_OVL0,
- MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL1_TO_DISP_OVL0,
- MT8195_MOUT_DISP_OVL1_TO_DISP_OVL0
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_MERGE0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_VPP_MERGE_FROM_MASK,
- MT8195_SEL_IN_VPP_MERGE_FROM_DSC_WRAP0_OUT
- }, {
- DDP_COMPONENT_DITHER1, DDP_COMPONENT_MERGE0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_VPP_MERGE_FROM_MASK,
- MT8195_SEL_IN_VPP_MERGE_FROM_DISP_DITHER1
- }, {
- DDP_COMPONENT_MERGE5, DDP_COMPONENT_MERGE0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_VPP_MERGE_FROM_MASK,
- MT8195_SEL_IN_VPP_MERGE_FROM_VDO1_VIRTUAL0
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSC0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP0_IN_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP0_IN_FROM_DISP_DITHER0
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSC0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP0_IN_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP0_IN_FROM_VPP_MERGE
- }, {
- DDP_COMPONENT_DITHER1, DDP_COMPONENT_DSC1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_IN_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP1_IN_FROM_DISP_DITHER1
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSC1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_IN_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP1_IN_FROM_VPP_MERGE
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DP_INTF1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
- MT8195_SEL_IN_SINA_VIRTUAL0_FROM_VPP_MERGE
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DPI0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
- MT8195_SEL_IN_SINA_VIRTUAL0_FROM_VPP_MERGE
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DPI1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
- MT8195_SEL_IN_SINA_VIRTUAL0_FROM_VPP_MERGE
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DP_INTF1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
- MT8195_SEL_IN_SINA_VIRTUAL0_FROM_DSC_WRAP1_OUT
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DPI0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
- MT8195_SEL_IN_SINA_VIRTUAL0_FROM_DSC_WRAP1_OUT
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DPI1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
- MT8195_SEL_IN_SINA_VIRTUAL0_FROM_DSC_WRAP1_OUT
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_DP_INTF1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINB_VIRTUAL0_FROM_MASK,
- MT8195_SEL_IN_SINB_VIRTUAL0_FROM_DSC_WRAP0_OUT
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_DPI0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINB_VIRTUAL0_FROM_MASK,
- MT8195_SEL_IN_SINB_VIRTUAL0_FROM_DSC_WRAP0_OUT
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_DPI1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINB_VIRTUAL0_FROM_MASK,
- MT8195_SEL_IN_SINB_VIRTUAL0_FROM_DSC_WRAP0_OUT
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DP_INTF0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DP_INTF0_FROM_MASK,
- MT8195_SEL_IN_DP_INTF0_FROM_DSC_WRAP1_OUT
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DP_INTF0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DP_INTF0_FROM_MASK,
- MT8195_SEL_IN_DP_INTF0_FROM_VPP_MERGE
- }, {
- DDP_COMPONENT_MERGE5, DDP_COMPONENT_DP_INTF0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DP_INTF0_FROM_MASK,
- MT8195_SEL_IN_DP_INTF0_FROM_VDO1_VIRTUAL0
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_DSI0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSI0_FROM_MASK,
- MT8195_SEL_IN_DSI0_FROM_DSC_WRAP0_OUT
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSI0_FROM_MASK,
- MT8195_SEL_IN_DSI0_FROM_DISP_DITHER0
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DSI1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSI1_FROM_MASK,
- MT8195_SEL_IN_DSI1_FROM_DSC_WRAP1_OUT
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSI1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSI1_FROM_MASK,
- MT8195_SEL_IN_DSI1_FROM_VPP_MERGE
- }, {
- DDP_COMPONENT_OVL1, DDP_COMPONENT_WDMA1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DISP_WDMA1_FROM_MASK,
- MT8195_SEL_IN_DISP_WDMA1_FROM_DISP_OVL1
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_WDMA1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DISP_WDMA1_FROM_MASK,
- MT8195_SEL_IN_DISP_WDMA1_FROM_VPP_MERGE
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DSI1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DP_INTF0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DP_INTF1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DPI0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DPI1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_MERGE0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN
- }, {
- DDP_COMPONENT_DITHER1, DDP_COMPONENT_DSI1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1
- }, {
- DDP_COMPONENT_DITHER1, DDP_COMPONENT_DP_INTF0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1
- }, {
- DDP_COMPONENT_DITHER1, DDP_COMPONENT_DPI0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1
- }, {
- DDP_COMPONENT_DITHER1, DDP_COMPONENT_DPI1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1
- }, {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_WDMA0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DISP_WDMA0_FROM_MASK,
- MT8195_SEL_IN_DISP_WDMA0_FROM_DISP_OVL0
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSC0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER0_TO_MASK,
- MT8195_SOUT_DISP_DITHER0_TO_DSC_WRAP0_IN
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER0_TO_MASK,
- MT8195_SOUT_DISP_DITHER0_TO_DSI0
- }, {
- DDP_COMPONENT_DITHER1, DDP_COMPONENT_DSC1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
- MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_IN
- }, {
- DDP_COMPONENT_DITHER1, DDP_COMPONENT_MERGE0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
- MT8195_SOUT_DISP_DITHER1_TO_VPP_MERGE
- }, {
- DDP_COMPONENT_DITHER1, DDP_COMPONENT_DSI1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
- MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT
- }, {
- DDP_COMPONENT_DITHER1, DDP_COMPONENT_DP_INTF0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
- MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT
- }, {
- DDP_COMPONENT_DITHER1, DDP_COMPONENT_DP_INTF1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
- MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT
- }, {
- DDP_COMPONENT_DITHER1, DDP_COMPONENT_DPI0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
- MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT
- }, {
- DDP_COMPONENT_DITHER1, DDP_COMPONENT_DPI1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
- MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT
- }, {
- DDP_COMPONENT_MERGE5, DDP_COMPONENT_MERGE0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_VDO1_VIRTUAL0_TO_MASK,
- MT8195_SOUT_VDO1_VIRTUAL0_TO_VPP_MERGE
- }, {
- DDP_COMPONENT_MERGE5, DDP_COMPONENT_DP_INTF0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_VDO1_VIRTUAL0_TO_MASK,
- MT8195_SOUT_VDO1_VIRTUAL0_TO_DP_INTF0
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSI1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
- MT8195_SOUT_VPP_MERGE_TO_DSI1
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DP_INTF0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
- MT8195_SOUT_VPP_MERGE_TO_DP_INTF0
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DP_INTF1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
- MT8195_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DPI0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
- MT8195_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DPI1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
- MT8195_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_WDMA1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
- MT8195_SOUT_VPP_MERGE_TO_DISP_WDMA1
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSC0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
- MT8195_SOUT_VPP_MERGE_TO_DSC_WRAP0_IN
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSC1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_DSC_WRAP1_IN_MASK,
- MT8195_SOUT_VPP_MERGE_TO_DSC_WRAP1_IN
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_DSI0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
- MT8195_SOUT_DSC_WRAP0_OUT_TO_DSI0
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_DP_INTF1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
- MT8195_SOUT_DSC_WRAP0_OUT_TO_SINB_VIRTUAL0
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_DPI0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
- MT8195_SOUT_DSC_WRAP0_OUT_TO_SINB_VIRTUAL0
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_DPI1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
- MT8195_SOUT_DSC_WRAP0_OUT_TO_SINB_VIRTUAL0
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_MERGE0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
- MT8195_SOUT_DSC_WRAP0_OUT_TO_VPP_MERGE
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DSI1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
- MT8195_SOUT_DSC_WRAP1_OUT_TO_DSI1
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DP_INTF0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
- MT8195_SOUT_DSC_WRAP1_OUT_TO_DP_INTF0
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DP_INTF1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
- MT8195_SOUT_DSC_WRAP1_OUT_TO_SINA_VIRTUAL0
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DPI0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
- MT8195_SOUT_DSC_WRAP1_OUT_TO_SINA_VIRTUAL0
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DPI1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
- MT8195_SOUT_DSC_WRAP1_OUT_TO_SINA_VIRTUAL0
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_MERGE0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
- MT8195_SOUT_DSC_WRAP1_OUT_TO_VPP_MERGE
- }
+ MMSYS_ROUTE(OVL0, RDMA0,
+ MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL0_TO_DISP_RDMA0,
+ MT8195_MOUT_DISP_OVL0_TO_DISP_RDMA0),
+ MMSYS_ROUTE(OVL0, WDMA0,
+ MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL0_TO_DISP_WDMA0,
+ MT8195_MOUT_DISP_OVL0_TO_DISP_WDMA0),
+ MMSYS_ROUTE(OVL0, OVL1,
+ MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL0_TO_DISP_OVL1,
+ MT8195_MOUT_DISP_OVL0_TO_DISP_OVL1),
+ MMSYS_ROUTE(OVL1, RDMA1,
+ MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL1_TO_DISP_RDMA1,
+ MT8195_MOUT_DISP_OVL1_TO_DISP_RDMA1),
+ MMSYS_ROUTE(OVL1, WDMA1,
+ MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL1_TO_DISP_WDMA1,
+ MT8195_MOUT_DISP_OVL1_TO_DISP_WDMA1),
+ MMSYS_ROUTE(OVL1, OVL0,
+ MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL1_TO_DISP_OVL0,
+ MT8195_MOUT_DISP_OVL1_TO_DISP_OVL0),
+ MMSYS_ROUTE(DSC0, MERGE0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_VPP_MERGE_FROM_MASK,
+ MT8195_SEL_IN_VPP_MERGE_FROM_DSC_WRAP0_OUT),
+ MMSYS_ROUTE(DITHER1, MERGE0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_VPP_MERGE_FROM_MASK,
+ MT8195_SEL_IN_VPP_MERGE_FROM_DISP_DITHER1),
+ MMSYS_ROUTE(MERGE5, MERGE0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_VPP_MERGE_FROM_MASK,
+ MT8195_SEL_IN_VPP_MERGE_FROM_VDO1_VIRTUAL0),
+ MMSYS_ROUTE(DITHER0, DSC0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP0_IN_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP0_IN_FROM_DISP_DITHER0),
+ MMSYS_ROUTE(MERGE0, DSC0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP0_IN_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP0_IN_FROM_VPP_MERGE),
+ MMSYS_ROUTE(DITHER1, DSC1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_IN_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_IN_FROM_DISP_DITHER1),
+ MMSYS_ROUTE(MERGE0, DSC1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_IN_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_IN_FROM_VPP_MERGE),
+ MMSYS_ROUTE(MERGE0, DP_INTF1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINA_VIRTUAL0_FROM_VPP_MERGE),
+ MMSYS_ROUTE(MERGE0, DPI0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINA_VIRTUAL0_FROM_VPP_MERGE),
+ MMSYS_ROUTE(MERGE0, DPI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINA_VIRTUAL0_FROM_VPP_MERGE),
+ MMSYS_ROUTE(DSC1, DP_INTF1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINA_VIRTUAL0_FROM_DSC_WRAP1_OUT),
+ MMSYS_ROUTE(DSC1, DPI0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINA_VIRTUAL0_FROM_DSC_WRAP1_OUT),
+ MMSYS_ROUTE(DSC1, DPI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINA_VIRTUAL0_FROM_DSC_WRAP1_OUT),
+ MMSYS_ROUTE(DSC0, DP_INTF1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINB_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINB_VIRTUAL0_FROM_DSC_WRAP0_OUT),
+ MMSYS_ROUTE(DSC0, DPI0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINB_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINB_VIRTUAL0_FROM_DSC_WRAP0_OUT),
+ MMSYS_ROUTE(DSC0, DPI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINB_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINB_VIRTUAL0_FROM_DSC_WRAP0_OUT),
+ MMSYS_ROUTE(DSC1, DP_INTF0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DP_INTF0_FROM_MASK,
+ MT8195_SEL_IN_DP_INTF0_FROM_DSC_WRAP1_OUT),
+ MMSYS_ROUTE(MERGE0, DP_INTF0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DP_INTF0_FROM_MASK,
+ MT8195_SEL_IN_DP_INTF0_FROM_VPP_MERGE),
+ MMSYS_ROUTE(MERGE5, DP_INTF0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DP_INTF0_FROM_MASK,
+ MT8195_SEL_IN_DP_INTF0_FROM_VDO1_VIRTUAL0),
+ MMSYS_ROUTE(DSC0, DSI0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSI0_FROM_MASK,
+ MT8195_SEL_IN_DSI0_FROM_DSC_WRAP0_OUT),
+ MMSYS_ROUTE(DITHER0, DSI0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSI0_FROM_MASK,
+ MT8195_SEL_IN_DSI0_FROM_DISP_DITHER0),
+ MMSYS_ROUTE(DSC1, DSI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSI1_FROM_MASK,
+ MT8195_SEL_IN_DSI1_FROM_DSC_WRAP1_OUT),
+ MMSYS_ROUTE(MERGE0, DSI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSI1_FROM_MASK,
+ MT8195_SEL_IN_DSI1_FROM_VPP_MERGE),
+ MMSYS_ROUTE(OVL1, WDMA1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DISP_WDMA1_FROM_MASK,
+ MT8195_SEL_IN_DISP_WDMA1_FROM_DISP_OVL1),
+ MMSYS_ROUTE(MERGE0, WDMA1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DISP_WDMA1_FROM_MASK,
+ MT8195_SEL_IN_DISP_WDMA1_FROM_VPP_MERGE),
+ MMSYS_ROUTE(DSC1, DSI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN),
+ MMSYS_ROUTE(DSC1, DP_INTF0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN),
+ MMSYS_ROUTE(DSC1, DP_INTF1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN),
+ MMSYS_ROUTE(DSC1, DPI0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN),
+ MMSYS_ROUTE(DSC1, DPI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN),
+ MMSYS_ROUTE(DSC1, MERGE0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN),
+ MMSYS_ROUTE(DITHER1, DSI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1),
+ MMSYS_ROUTE(DITHER1, DP_INTF0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1),
+ MMSYS_ROUTE(DITHER1, DPI0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1),
+ MMSYS_ROUTE(DITHER1, DPI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1),
+ MMSYS_ROUTE(OVL0, WDMA0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DISP_WDMA0_FROM_MASK,
+ MT8195_SEL_IN_DISP_WDMA0_FROM_DISP_OVL0),
+ MMSYS_ROUTE(DITHER0, DSC0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER0_TO_MASK,
+ MT8195_SOUT_DISP_DITHER0_TO_DSC_WRAP0_IN),
+ MMSYS_ROUTE(DITHER0, DSI0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER0_TO_MASK,
+ MT8195_SOUT_DISP_DITHER0_TO_DSI0),
+ MMSYS_ROUTE(DITHER1, DSC1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
+ MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_IN),
+ MMSYS_ROUTE(DITHER1, MERGE0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
+ MT8195_SOUT_DISP_DITHER1_TO_VPP_MERGE),
+ MMSYS_ROUTE(DITHER1, DSI1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
+ MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT),
+ MMSYS_ROUTE(DITHER1, DP_INTF0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
+ MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT),
+ MMSYS_ROUTE(DITHER1, DP_INTF1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
+ MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT),
+ MMSYS_ROUTE(DITHER1, DPI0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
+ MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT),
+ MMSYS_ROUTE(DITHER1, DPI1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
+ MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT),
+ MMSYS_ROUTE(MERGE5, MERGE0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VDO1_VIRTUAL0_TO_MASK,
+ MT8195_SOUT_VDO1_VIRTUAL0_TO_VPP_MERGE),
+ MMSYS_ROUTE(MERGE5, DP_INTF0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VDO1_VIRTUAL0_TO_MASK,
+ MT8195_SOUT_VDO1_VIRTUAL0_TO_DP_INTF0),
+ MMSYS_ROUTE(MERGE0, DSI1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
+ MT8195_SOUT_VPP_MERGE_TO_DSI1),
+ MMSYS_ROUTE(MERGE0, DP_INTF0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
+ MT8195_SOUT_VPP_MERGE_TO_DP_INTF0),
+ MMSYS_ROUTE(MERGE0, DP_INTF1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
+ MT8195_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0),
+ MMSYS_ROUTE(MERGE0, DPI0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
+ MT8195_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0),
+ MMSYS_ROUTE(MERGE0, DPI1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
+ MT8195_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0),
+ MMSYS_ROUTE(MERGE0, WDMA1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
+ MT8195_SOUT_VPP_MERGE_TO_DISP_WDMA1),
+ MMSYS_ROUTE(MERGE0, DSC0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
+ MT8195_SOUT_VPP_MERGE_TO_DSC_WRAP0_IN),
+ MMSYS_ROUTE(MERGE0, DSC1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_DSC_WRAP1_IN_MASK,
+ MT8195_SOUT_VPP_MERGE_TO_DSC_WRAP1_IN),
+ MMSYS_ROUTE(DSC0, DSI0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP0_OUT_TO_DSI0),
+ MMSYS_ROUTE(DSC0, DP_INTF1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP0_OUT_TO_SINB_VIRTUAL0),
+ MMSYS_ROUTE(DSC0, DPI0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP0_OUT_TO_SINB_VIRTUAL0),
+ MMSYS_ROUTE(DSC0, DPI1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP0_OUT_TO_SINB_VIRTUAL0),
+ MMSYS_ROUTE(DSC0, MERGE0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP0_OUT_TO_VPP_MERGE),
+ MMSYS_ROUTE(DSC1, DSI1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP1_OUT_TO_DSI1),
+ MMSYS_ROUTE(DSC1, DP_INTF0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP1_OUT_TO_DP_INTF0),
+ MMSYS_ROUTE(DSC1, DP_INTF1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP1_OUT_TO_SINA_VIRTUAL0),
+ MMSYS_ROUTE(DSC1, DPI0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP1_OUT_TO_SINA_VIRTUAL0),
+ MMSYS_ROUTE(DSC1, DPI1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP1_OUT_TO_SINA_VIRTUAL0),
+ MMSYS_ROUTE(DSC1, MERGE0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP1_OUT_TO_VPP_MERGE),
};
static const struct mtk_mmsys_routes mmsys_mt8195_vdo1_routing_table[] = {
- {
- DDP_COMPONENT_MDP_RDMA0, DDP_COMPONENT_MERGE1,
- MT8195_VDO1_VPP_MERGE0_P0_SEL_IN, GENMASK(0, 0),
- MT8195_VPP_MERGE0_P0_SEL_IN_FROM_MDP_RDMA0
- }, {
- DDP_COMPONENT_MDP_RDMA1, DDP_COMPONENT_MERGE1,
- MT8195_VDO1_VPP_MERGE0_P1_SEL_IN, GENMASK(0, 0),
- MT8195_VPP_MERGE0_P1_SEL_IN_FROM_MDP_RDMA1
- }, {
- DDP_COMPONENT_MDP_RDMA2, DDP_COMPONENT_MERGE2,
- MT8195_VDO1_VPP_MERGE1_P0_SEL_IN, GENMASK(0, 0),
- MT8195_VPP_MERGE1_P0_SEL_IN_FROM_MDP_RDMA2
- }, {
- DDP_COMPONENT_MERGE1, DDP_COMPONENT_ETHDR_MIXER,
- MT8195_VDO1_MERGE0_ASYNC_SOUT_SEL, GENMASK(1, 0),
- MT8195_SOUT_TO_MIXER_IN1_SEL
- }, {
- DDP_COMPONENT_MERGE2, DDP_COMPONENT_ETHDR_MIXER,
- MT8195_VDO1_MERGE1_ASYNC_SOUT_SEL, GENMASK(1, 0),
- MT8195_SOUT_TO_MIXER_IN2_SEL
- }, {
- DDP_COMPONENT_MERGE3, DDP_COMPONENT_ETHDR_MIXER,
- MT8195_VDO1_MERGE2_ASYNC_SOUT_SEL, GENMASK(1, 0),
- MT8195_SOUT_TO_MIXER_IN3_SEL
- }, {
- DDP_COMPONENT_MERGE4, DDP_COMPONENT_ETHDR_MIXER,
- MT8195_VDO1_MERGE3_ASYNC_SOUT_SEL, GENMASK(1, 0),
- MT8195_SOUT_TO_MIXER_IN4_SEL
- }, {
- DDP_COMPONENT_ETHDR_MIXER, DDP_COMPONENT_MERGE5,
- MT8195_VDO1_MIXER_OUT_SOUT_SEL, GENMASK(0, 0),
- MT8195_MIXER_SOUT_TO_MERGE4_ASYNC_SEL
- }, {
- DDP_COMPONENT_MERGE1, DDP_COMPONENT_ETHDR_MIXER,
- MT8195_VDO1_MIXER_IN1_SEL_IN, GENMASK(0, 0),
- MT8195_MIXER_IN1_SEL_IN_FROM_MERGE0_ASYNC_SOUT
- }, {
- DDP_COMPONENT_MERGE2, DDP_COMPONENT_ETHDR_MIXER,
- MT8195_VDO1_MIXER_IN2_SEL_IN, GENMASK(0, 0),
- MT8195_MIXER_IN2_SEL_IN_FROM_MERGE1_ASYNC_SOUT
- }, {
- DDP_COMPONENT_MERGE3, DDP_COMPONENT_ETHDR_MIXER,
- MT8195_VDO1_MIXER_IN3_SEL_IN, GENMASK(0, 0),
- MT8195_MIXER_IN3_SEL_IN_FROM_MERGE2_ASYNC_SOUT
- }, {
- DDP_COMPONENT_MERGE4, DDP_COMPONENT_ETHDR_MIXER,
- MT8195_VDO1_MIXER_IN4_SEL_IN, GENMASK(0, 0),
- MT8195_MIXER_IN4_SEL_IN_FROM_MERGE3_ASYNC_SOUT
- }, {
- DDP_COMPONENT_ETHDR_MIXER, DDP_COMPONENT_MERGE5,
- MT8195_VDO1_MIXER_SOUT_SEL_IN, GENMASK(2, 0),
- MT8195_MIXER_SOUT_SEL_IN_FROM_DISP_MIXER
- }, {
- DDP_COMPONENT_ETHDR_MIXER, DDP_COMPONENT_MERGE5,
- MT8195_VDO1_MERGE4_ASYNC_SEL_IN, GENMASK(2, 0),
- MT8195_MERGE4_ASYNC_SEL_IN_FROM_MIXER_OUT_SOUT
- }, {
- DDP_COMPONENT_MERGE5, DDP_COMPONENT_DPI1,
- MT8195_VDO1_DISP_DPI1_SEL_IN, GENMASK(1, 0),
- MT8195_DISP_DPI1_SEL_IN_FROM_VPP_MERGE4_MOUT
- }, {
- DDP_COMPONENT_MERGE5, DDP_COMPONENT_DPI1,
- MT8195_VDO1_MERGE4_SOUT_SEL, GENMASK(1, 0),
- MT8195_MERGE4_SOUT_TO_DPI1_SEL
- }, {
- DDP_COMPONENT_MERGE5, DDP_COMPONENT_DP_INTF1,
- MT8195_VDO1_DISP_DP_INTF0_SEL_IN, GENMASK(1, 0),
- MT8195_DISP_DP_INTF0_SEL_IN_FROM_VPP_MERGE4_MOUT
- }, {
- DDP_COMPONENT_MERGE5, DDP_COMPONENT_DP_INTF1,
- MT8195_VDO1_MERGE4_SOUT_SEL, GENMASK(1, 0),
- MT8195_MERGE4_SOUT_TO_DP_INTF0_SEL
- }
+ MMSYS_ROUTE(MDP_RDMA0, MERGE1,
+ MT8195_VDO1_VPP_MERGE0_P0_SEL_IN, GENMASK(0, 0),
+ MT8195_VPP_MERGE0_P0_SEL_IN_FROM_MDP_RDMA0),
+ MMSYS_ROUTE(MDP_RDMA1, MERGE1,
+ MT8195_VDO1_VPP_MERGE0_P1_SEL_IN, GENMASK(0, 0),
+ MT8195_VPP_MERGE0_P1_SEL_IN_FROM_MDP_RDMA1),
+ MMSYS_ROUTE(MDP_RDMA2, MERGE2,
+ MT8195_VDO1_VPP_MERGE1_P0_SEL_IN, GENMASK(0, 0),
+ MT8195_VPP_MERGE1_P0_SEL_IN_FROM_MDP_RDMA2),
+ MMSYS_ROUTE(MERGE1, ETHDR_MIXER,
+ MT8195_VDO1_MERGE0_ASYNC_SOUT_SEL, GENMASK(1, 0),
+ MT8195_SOUT_TO_MIXER_IN1_SEL),
+ MMSYS_ROUTE(MERGE2, ETHDR_MIXER,
+ MT8195_VDO1_MERGE1_ASYNC_SOUT_SEL, GENMASK(1, 0),
+ MT8195_SOUT_TO_MIXER_IN2_SEL),
+ MMSYS_ROUTE(MERGE3, ETHDR_MIXER,
+ MT8195_VDO1_MERGE2_ASYNC_SOUT_SEL, GENMASK(1, 0),
+ MT8195_SOUT_TO_MIXER_IN3_SEL),
+ MMSYS_ROUTE(MERGE4, ETHDR_MIXER,
+ MT8195_VDO1_MERGE3_ASYNC_SOUT_SEL, GENMASK(1, 0),
+ MT8195_SOUT_TO_MIXER_IN4_SEL),
+ MMSYS_ROUTE(ETHDR_MIXER, MERGE5,
+ MT8195_VDO1_MIXER_OUT_SOUT_SEL, GENMASK(0, 0),
+ MT8195_MIXER_SOUT_TO_MERGE4_ASYNC_SEL),
+ MMSYS_ROUTE(MERGE1, ETHDR_MIXER,
+ MT8195_VDO1_MIXER_IN1_SEL_IN, GENMASK(0, 0),
+ MT8195_MIXER_IN1_SEL_IN_FROM_MERGE0_ASYNC_SOUT),
+ MMSYS_ROUTE(MERGE2, ETHDR_MIXER,
+ MT8195_VDO1_MIXER_IN2_SEL_IN, GENMASK(0, 0),
+ MT8195_MIXER_IN2_SEL_IN_FROM_MERGE1_ASYNC_SOUT),
+ MMSYS_ROUTE(MERGE3, ETHDR_MIXER,
+ MT8195_VDO1_MIXER_IN3_SEL_IN, GENMASK(0, 0),
+ MT8195_MIXER_IN3_SEL_IN_FROM_MERGE2_ASYNC_SOUT),
+ MMSYS_ROUTE(MERGE4, ETHDR_MIXER,
+ MT8195_VDO1_MIXER_IN4_SEL_IN, GENMASK(0, 0),
+ MT8195_MIXER_IN4_SEL_IN_FROM_MERGE3_ASYNC_SOUT),
+ MMSYS_ROUTE(ETHDR_MIXER, MERGE5,
+ MT8195_VDO1_MIXER_SOUT_SEL_IN, GENMASK(2, 0),
+ MT8195_MIXER_SOUT_SEL_IN_FROM_DISP_MIXER),
+ MMSYS_ROUTE(ETHDR_MIXER, MERGE5,
+ MT8195_VDO1_MERGE4_ASYNC_SEL_IN, GENMASK(2, 0),
+ MT8195_MERGE4_ASYNC_SEL_IN_FROM_MIXER_OUT_SOUT),
+ MMSYS_ROUTE(MERGE5, DPI1,
+ MT8195_VDO1_DISP_DPI1_SEL_IN, GENMASK(1, 0),
+ MT8195_DISP_DPI1_SEL_IN_FROM_VPP_MERGE4_MOUT),
+ MMSYS_ROUTE(MERGE5, DPI1,
+ MT8195_VDO1_MERGE4_SOUT_SEL, GENMASK(1, 0),
+ MT8195_MERGE4_SOUT_TO_DPI1_SEL),
+ MMSYS_ROUTE(MERGE5, DP_INTF1,
+ MT8195_VDO1_DISP_DP_INTF0_SEL_IN, GENMASK(1, 0),
+ MT8195_DISP_DP_INTF0_SEL_IN_FROM_VPP_MERGE4_MOUT),
+ MMSYS_ROUTE(MERGE5, DP_INTF1,
+ MT8195_VDO1_MERGE4_SOUT_SEL, GENMASK(1, 0),
+ MT8195_MERGE4_SOUT_TO_DP_INTF0_SEL),
};
#endif /* __SOC_MEDIATEK_MT8195_MMSYS_H */
diff --git a/drivers/soc/mediatek/mt8365-mmsys.h b/drivers/soc/mediatek/mt8365-mmsys.h
index 7abaf048d91e..533a3fd0923b 100644
--- a/drivers/soc/mediatek/mt8365-mmsys.h
+++ b/drivers/soc/mediatek/mt8365-mmsys.h
@@ -14,8 +14,9 @@
#define MT8365_DISP_REG_CONFIG_DISP_DPI0_SEL_IN 0xfd8
#define MT8365_DISP_REG_CONFIG_DISP_LVDS_SYS_CFG_00 0xfdc
+#define MT8365_DISP_MS_IN_OUT_MASK GENMASK(3, 0)
#define MT8365_RDMA0_SOUT_COLOR0 0x1
-#define MT8365_DITHER_MOUT_EN_DSI0 0x1
+#define MT8365_DITHER_MOUT_EN_DSI0 BIT(0)
#define MT8365_DSI0_SEL_IN_DITHER 0x1
#define MT8365_RDMA0_SEL_IN_OVL0 0x0
#define MT8365_RDMA0_RSZ0_SEL_IN_RDMA0 0x0
@@ -27,56 +28,37 @@
#define MT8365_DPI0_SEL_IN_RDMA1 0x0
static const struct mtk_mmsys_routes mt8365_mmsys_routing_table[] = {
- {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
- MT8365_DISP_REG_CONFIG_DISP_OVL0_MOUT_EN,
- MT8365_OVL0_MOUT_PATH0_SEL, MT8365_OVL0_MOUT_PATH0_SEL
- },
- {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
- MT8365_DISP_REG_CONFIG_DISP_RDMA0_SEL_IN,
- MT8365_RDMA0_SEL_IN_OVL0, MT8365_RDMA0_SEL_IN_OVL0
- },
- {
- DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0,
- MT8365_DISP_REG_CONFIG_DISP_RDMA0_SOUT_SEL,
- MT8365_RDMA0_SOUT_COLOR0, MT8365_RDMA0_SOUT_COLOR0
- },
- {
- DDP_COMPONENT_COLOR0, DDP_COMPONENT_CCORR,
- MT8365_DISP_REG_CONFIG_DISP_COLOR0_SEL_IN,
- MT8365_DISP_COLOR_SEL_IN_COLOR0,MT8365_DISP_COLOR_SEL_IN_COLOR0
- },
- {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
- MT8365_DISP_REG_CONFIG_DISP_DITHER0_MOUT_EN,
- MT8365_DITHER_MOUT_EN_DSI0, MT8365_DITHER_MOUT_EN_DSI0
- },
- {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
- MT8365_DISP_REG_CONFIG_DISP_DSI0_SEL_IN,
- MT8365_DSI0_SEL_IN_DITHER, MT8365_DSI0_SEL_IN_DITHER
- },
- {
- DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0,
- MT8365_DISP_REG_CONFIG_DISP_RDMA0_RSZ0_SEL_IN,
- MT8365_RDMA0_RSZ0_SEL_IN_RDMA0, MT8365_RDMA0_RSZ0_SEL_IN_RDMA0
- },
- {
- DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
- MT8365_DISP_REG_CONFIG_DISP_LVDS_SYS_CFG_00,
- MT8365_LVDS_SYS_CFG_00_SEL_LVDS_PXL_CLK, MT8365_LVDS_SYS_CFG_00_SEL_LVDS_PXL_CLK
- },
- {
- DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
- MT8365_DISP_REG_CONFIG_DISP_DPI0_SEL_IN,
- MT8365_DPI0_SEL_IN_RDMA1, MT8365_DPI0_SEL_IN_RDMA1
- },
- {
- DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
- MT8365_DISP_REG_CONFIG_DISP_RDMA1_SOUT_SEL,
- MT8365_RDMA1_SOUT_DPI0, MT8365_RDMA1_SOUT_DPI0
- },
+ MMSYS_ROUTE(OVL0, RDMA0,
+ MT8365_DISP_REG_CONFIG_DISP_OVL0_MOUT_EN,
+ MT8365_DISP_MS_IN_OUT_MASK, MT8365_OVL0_MOUT_PATH0_SEL),
+ MMSYS_ROUTE(OVL0, RDMA0,
+ MT8365_DISP_REG_CONFIG_DISP_RDMA0_SEL_IN,
+ MT8365_DISP_MS_IN_OUT_MASK, MT8365_RDMA0_SEL_IN_OVL0),
+ MMSYS_ROUTE(RDMA0, COLOR0,
+ MT8365_DISP_REG_CONFIG_DISP_RDMA0_SOUT_SEL,
+ MT8365_DISP_MS_IN_OUT_MASK, MT8365_RDMA0_SOUT_COLOR0),
+ MMSYS_ROUTE(COLOR0, CCORR,
+ MT8365_DISP_REG_CONFIG_DISP_COLOR0_SEL_IN,
+ MT8365_DISP_MS_IN_OUT_MASK, MT8365_DISP_COLOR_SEL_IN_COLOR0),
+ MMSYS_ROUTE(DITHER0, DSI0,
+ MT8365_DISP_REG_CONFIG_DISP_DITHER0_MOUT_EN,
+ MT8365_DISP_MS_IN_OUT_MASK, MT8365_DITHER_MOUT_EN_DSI0),
+ MMSYS_ROUTE(DITHER0, DSI0,
+ MT8365_DISP_REG_CONFIG_DISP_DSI0_SEL_IN,
+ MT8365_DISP_MS_IN_OUT_MASK, MT8365_DSI0_SEL_IN_DITHER),
+ MMSYS_ROUTE(RDMA0, COLOR0,
+ MT8365_DISP_REG_CONFIG_DISP_RDMA0_RSZ0_SEL_IN,
+ MT8365_DISP_MS_IN_OUT_MASK, MT8365_RDMA0_RSZ0_SEL_IN_RDMA0),
+ MMSYS_ROUTE(RDMA1, DPI0,
+ MT8365_DISP_REG_CONFIG_DISP_LVDS_SYS_CFG_00,
+ MT8365_LVDS_SYS_CFG_00_SEL_LVDS_PXL_CLK,
+ MT8365_LVDS_SYS_CFG_00_SEL_LVDS_PXL_CLK),
+ MMSYS_ROUTE(RDMA1, DPI0,
+ MT8365_DISP_REG_CONFIG_DISP_DPI0_SEL_IN,
+ MT8365_DISP_MS_IN_OUT_MASK, MT8365_DPI0_SEL_IN_RDMA1),
+ MMSYS_ROUTE(RDMA1, DPI0,
+ MT8365_DISP_REG_CONFIG_DISP_RDMA1_SOUT_SEL,
+ MT8365_DISP_MS_IN_OUT_MASK, MT8365_RDMA1_SOUT_DPI0),
};
#endif /* __SOC_MEDIATEK_MT8365_MMSYS_H */
diff --git a/drivers/soc/mediatek/mtk-mmsys.h b/drivers/soc/mediatek/mtk-mmsys.h
index d370192737ca..fe628d5f5198 100644
--- a/drivers/soc/mediatek/mtk-mmsys.h
+++ b/drivers/soc/mediatek/mtk-mmsys.h
@@ -80,6 +80,20 @@
#define MMSYS_RST_NR(bank, bit) (((bank) * 32) + (bit))
+/*
+ * This macro adds a compile time check to make sure that the in/out
+ * selection bit(s) fit in the register mask, similar to bitfield
+ * macros, but this does not transform the value.
+ */
+#define MMSYS_ROUTE(from, to, reg_addr, reg_mask, selection) \
+ { DDP_COMPONENT_##from, DDP_COMPONENT_##to, reg_addr, reg_mask, \
+ (__BUILD_BUG_ON_ZERO_MSG((reg_mask) == 0, "Invalid mask") + \
+ __BUILD_BUG_ON_ZERO_MSG(~(reg_mask) & (selection), \
+ #selection " does not fit in " \
+ #reg_mask) + \
+ (selection)) \
+ }
+
struct mtk_mmsys_routes {
u32 from_comp;
u32 to_comp;
diff --git a/drivers/soc/mediatek/mtk-mutex.c b/drivers/soc/mediatek/mtk-mutex.c
index 5250c1d702eb..aaa965d4b050 100644
--- a/drivers/soc/mediatek/mtk-mutex.c
+++ b/drivers/soc/mediatek/mtk-mutex.c
@@ -155,6 +155,7 @@
#define MT8188_MUTEX_MOD_DISP1_VPP_MERGE3 23
#define MT8188_MUTEX_MOD_DISP1_VPP_MERGE4 24
#define MT8188_MUTEX_MOD_DISP1_DISP_MIXER 30
+#define MT8188_MUTEX_MOD_DISP1_DPI1 38
#define MT8188_MUTEX_MOD_DISP1_DP_INTF1 39
#define MT8195_MUTEX_MOD_DISP_OVL0 0
@@ -289,6 +290,7 @@
#define MT8188_MUTEX_SOF_DSI0 1
#define MT8188_MUTEX_SOF_DP_INTF0 3
#define MT8188_MUTEX_SOF_DP_INTF1 4
+#define MT8188_MUTEX_SOF_DPI1 5
#define MT8195_MUTEX_SOF_DSI0 1
#define MT8195_MUTEX_SOF_DSI1 2
#define MT8195_MUTEX_SOF_DP_INTF0 3
@@ -301,6 +303,7 @@
#define MT8188_MUTEX_EOF_DSI0 (MT8188_MUTEX_SOF_DSI0 << 7)
#define MT8188_MUTEX_EOF_DP_INTF0 (MT8188_MUTEX_SOF_DP_INTF0 << 7)
#define MT8188_MUTEX_EOF_DP_INTF1 (MT8188_MUTEX_SOF_DP_INTF1 << 7)
+#define MT8188_MUTEX_EOF_DPI1 (MT8188_MUTEX_SOF_DPI1 << 7)
#define MT8195_MUTEX_EOF_DSI0 (MT8195_MUTEX_SOF_DSI0 << 7)
#define MT8195_MUTEX_EOF_DSI1 (MT8195_MUTEX_SOF_DSI1 << 7)
#define MT8195_MUTEX_EOF_DP_INTF0 (MT8195_MUTEX_SOF_DP_INTF0 << 7)
@@ -472,6 +475,7 @@ static const u8 mt8188_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_PWM0] = MT8188_MUTEX_MOD2_DISP_PWM0,
[DDP_COMPONENT_DP_INTF0] = MT8188_MUTEX_MOD_DISP_DP_INTF0,
[DDP_COMPONENT_DP_INTF1] = MT8188_MUTEX_MOD_DISP1_DP_INTF1,
+ [DDP_COMPONENT_DPI1] = MT8188_MUTEX_MOD_DISP1_DPI1,
[DDP_COMPONENT_ETHDR_MIXER] = MT8188_MUTEX_MOD_DISP1_DISP_MIXER,
[DDP_COMPONENT_MDP_RDMA0] = MT8188_MUTEX_MOD_DISP1_MDP_RDMA0,
[DDP_COMPONENT_MDP_RDMA1] = MT8188_MUTEX_MOD_DISP1_MDP_RDMA1,
@@ -686,6 +690,8 @@ static const u16 mt8188_mutex_sof[DDP_MUTEX_SOF_MAX] = {
[MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
[MUTEX_SOF_DSI0] =
MT8188_MUTEX_SOF_DSI0 | MT8188_MUTEX_EOF_DSI0,
+ [MUTEX_SOF_DPI1] =
+ MT8188_MUTEX_SOF_DPI1 | MT8188_MUTEX_EOF_DPI1,
[MUTEX_SOF_DP_INTF0] =
MT8188_MUTEX_SOF_DP_INTF0 | MT8188_MUTEX_EOF_DP_INTF0,
[MUTEX_SOF_DP_INTF1] =
diff --git a/drivers/soc/mediatek/mtk-socinfo.c b/drivers/soc/mediatek/mtk-socinfo.c
index 123b12cd2543..c697a0398d91 100644
--- a/drivers/soc/mediatek/mtk-socinfo.c
+++ b/drivers/soc/mediatek/mtk-socinfo.c
@@ -56,29 +56,39 @@ static struct socinfo_data socinfo_data_table[] = {
MTK_SOCINFO_ENTRY("MT8195", "MT8195GV/EHZA", "Kompanio 1200", 0x81950304, CELL_NOT_USED),
MTK_SOCINFO_ENTRY("MT8195", "MT8195TV/EZA", "Kompanio 1380", 0x81950400, CELL_NOT_USED),
MTK_SOCINFO_ENTRY("MT8195", "MT8195TV/EHZA", "Kompanio 1380", 0x81950404, CELL_NOT_USED),
+ MTK_SOCINFO_ENTRY("MT8370", "MT8370AV/AZA", "Genio 510", 0x83700000, 0x00000081),
+ MTK_SOCINFO_ENTRY("MT8390", "MT8390AV/AZA", "Genio 700", 0x83900000, 0x00000080),
MTK_SOCINFO_ENTRY("MT8395", "MT8395AV/ZA", "Genio 1200", 0x83950100, CELL_NOT_USED),
+ MTK_SOCINFO_ENTRY("MT8395", "MT8395AV/ZA", "Genio 1200", 0x83950800, CELL_NOT_USED),
};
static int mtk_socinfo_create_socinfo_node(struct mtk_socinfo *mtk_socinfop)
{
struct soc_device_attribute *attrs;
- static char machine[30] = {0};
+ struct socinfo_data *data = mtk_socinfop->socinfo_data;
static const char *soc_manufacturer = "MediaTek";
attrs = devm_kzalloc(mtk_socinfop->dev, sizeof(*attrs), GFP_KERNEL);
if (!attrs)
return -ENOMEM;
- snprintf(machine, sizeof(machine), "%s (%s)", mtk_socinfop->socinfo_data->marketing_name,
- mtk_socinfop->socinfo_data->soc_name);
- attrs->family = soc_manufacturer;
- attrs->machine = machine;
+ if (data->marketing_name != NULL && data->marketing_name[0] != '\0')
+ attrs->family = devm_kasprintf(mtk_socinfop->dev, GFP_KERNEL, "MediaTek %s",
+ data->marketing_name);
+ else
+ attrs->family = soc_manufacturer;
+
+ attrs->soc_id = data->soc_name;
+ /*
+ * The "machine" field will be populated automatically with the model
+ * name from board DTS (if available).
+ **/
mtk_socinfop->soc_dev = soc_device_register(attrs);
if (IS_ERR(mtk_socinfop->soc_dev))
return PTR_ERR(mtk_socinfop->soc_dev);
- dev_info(mtk_socinfop->dev, "%s %s SoC detected.\n", soc_manufacturer, attrs->machine);
+ dev_info(mtk_socinfop->dev, "%s (%s) SoC detected.\n", attrs->family, attrs->soc_id);
return 0;
}
diff --git a/drivers/soc/qcom/ice.c b/drivers/soc/qcom/ice.c
index 393d2d1d275f..2310afa77b76 100644
--- a/drivers/soc/qcom/ice.c
+++ b/drivers/soc/qcom/ice.c
@@ -11,6 +11,7 @@
#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/device.h>
#include <linux/iopoll.h>
#include <linux/of.h>
#include <linux/of_platform.h>
@@ -261,7 +262,7 @@ static struct qcom_ice *qcom_ice_create(struct device *dev,
* Return: ICE pointer on success, NULL if there is no ICE data provided by the
* consumer or ERR_PTR() on error.
*/
-struct qcom_ice *of_qcom_ice_get(struct device *dev)
+static struct qcom_ice *of_qcom_ice_get(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct qcom_ice *ice;
@@ -322,7 +323,53 @@ struct qcom_ice *of_qcom_ice_get(struct device *dev)
return ice;
}
-EXPORT_SYMBOL_GPL(of_qcom_ice_get);
+
+static void qcom_ice_put(const struct qcom_ice *ice)
+{
+ struct platform_device *pdev = to_platform_device(ice->dev);
+
+ if (!platform_get_resource_byname(pdev, IORESOURCE_MEM, "ice"))
+ platform_device_put(pdev);
+}
+
+static void devm_of_qcom_ice_put(struct device *dev, void *res)
+{
+ qcom_ice_put(*(struct qcom_ice **)res);
+}
+
+/**
+ * devm_of_qcom_ice_get() - Devres managed helper to get an ICE instance from
+ * a DT node.
+ * @dev: device pointer for the consumer device.
+ *
+ * This function will provide an ICE instance either by creating one for the
+ * consumer device if its DT node provides the 'ice' reg range and the 'ice'
+ * clock (for legacy DT style). On the other hand, if consumer provides a
+ * phandle via 'qcom,ice' property to an ICE DT, the ICE instance will already
+ * be created and so this function will return that instead.
+ *
+ * Return: ICE pointer on success, NULL if there is no ICE data provided by the
+ * consumer or ERR_PTR() on error.
+ */
+struct qcom_ice *devm_of_qcom_ice_get(struct device *dev)
+{
+ struct qcom_ice *ice, **dr;
+
+ dr = devres_alloc(devm_of_qcom_ice_put, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return ERR_PTR(-ENOMEM);
+
+ ice = of_qcom_ice_get(dev);
+ if (!IS_ERR_OR_NULL(ice)) {
+ *dr = ice;
+ devres_add(dev, dr);
+ } else {
+ devres_free(dr);
+ }
+
+ return ice;
+}
+EXPORT_SYMBOL_GPL(devm_of_qcom_ice_get);
static int qcom_ice_probe(struct platform_device *pdev)
{
diff --git a/drivers/soc/qcom/pdr_internal.h b/drivers/soc/qcom/pdr_internal.h
index 8d17f7fb79e7..039508c1bbf7 100644
--- a/drivers/soc/qcom/pdr_internal.h
+++ b/drivers/soc/qcom/pdr_internal.h
@@ -91,7 +91,6 @@ struct servreg_loc_pfr_resp {
struct qmi_response_type_v01 rsp;
};
-extern const struct qmi_elem_info servreg_location_entry_ei[];
extern const struct qmi_elem_info servreg_get_domain_list_req_ei[];
extern const struct qmi_elem_info servreg_get_domain_list_resp_ei[];
extern const struct qmi_elem_info servreg_register_listener_req_ei[];
diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c
index 0320ad3b9148..a543ab9bee6c 100644
--- a/drivers/soc/qcom/qcom_aoss.c
+++ b/drivers/soc/qcom/qcom_aoss.c
@@ -12,6 +12,7 @@
#include <linux/platform_device.h>
#include <linux/thermal.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/soc/qcom/qcom_aoss.h>
#define CREATE_TRACE_POINTS
@@ -358,7 +359,7 @@ static int qmp_cdev_set_cur_state(struct thermal_cooling_device *cdev,
return 0;
ret = qmp_send(qmp_cdev->qmp, "{class: volt_flr, event:zero_temp, res:%s, value:%s}",
- qmp_cdev->name, cdev_state ? "on" : "off");
+ qmp_cdev->name, str_on_off(cdev_state));
if (!ret)
qmp_cdev->state = cdev_state;
diff --git a/drivers/soc/qcom/qcom_pd_mapper.c b/drivers/soc/qcom/qcom_pd_mapper.c
index 154ca5beb471..1d1c438be3e7 100644
--- a/drivers/soc/qcom/qcom_pd_mapper.c
+++ b/drivers/soc/qcom/qcom_pd_mapper.c
@@ -429,6 +429,16 @@ static const struct qcom_pdm_domain_data *sc8280xp_domains[] = {
NULL,
};
+/* Unlike SDM660, SDM630/636 lack CDSP */
+static const struct qcom_pdm_domain_data *sdm630_domains[] = {
+ &adsp_audio_pd,
+ &adsp_root_pd,
+ &adsp_sensor_pd,
+ &mpss_root_pd,
+ &mpss_wlan_pd,
+ NULL,
+};
+
static const struct qcom_pdm_domain_data *sdm660_domains[] = {
&adsp_audio_pd,
&adsp_root_pd,
@@ -546,6 +556,8 @@ static const struct of_device_id qcom_pdm_domains[] __maybe_unused = {
{ .compatible = "qcom,sc7280", .data = sc7280_domains, },
{ .compatible = "qcom,sc8180x", .data = sc8180x_domains, },
{ .compatible = "qcom,sc8280xp", .data = sc8280xp_domains, },
+ { .compatible = "qcom,sdm630", .data = sdm630_domains, },
+ { .compatible = "qcom,sdm636", .data = sdm630_domains, },
{ .compatible = "qcom,sda660", .data = sdm660_domains, },
{ .compatible = "qcom,sdm660", .data = sdm660_domains, },
{ .compatible = "qcom,sdm670", .data = sdm670_domains, },
diff --git a/drivers/soc/qcom/qcom_pdr_msg.c b/drivers/soc/qcom/qcom_pdr_msg.c
index bf3e4a47165e..ca98932140d8 100644
--- a/drivers/soc/qcom/qcom_pdr_msg.c
+++ b/drivers/soc/qcom/qcom_pdr_msg.c
@@ -8,7 +8,7 @@
#include "pdr_internal.h"
-const struct qmi_elem_info servreg_location_entry_ei[] = {
+static const struct qmi_elem_info servreg_location_entry_ei[] = {
{
.data_type = QMI_STRING,
.elem_len = SERVREG_NAME_LENGTH + 1,
@@ -47,7 +47,6 @@ const struct qmi_elem_info servreg_location_entry_ei[] = {
},
{}
};
-EXPORT_SYMBOL_GPL(servreg_location_entry_ei);
const struct qmi_elem_info servreg_get_domain_list_req_ei[] = {
{
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index 6d2e135eed89..49648cf28bd2 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -334,6 +334,7 @@ config ARCH_R9A07G054
config ARCH_R9A08G045
bool "ARM64 Platform support for RZ/G3S"
select ARCH_RZG2L
+ select SYSC_R9A08G045
help
This enables support for the Renesas RZ/G3S SoC variants.
@@ -347,12 +348,14 @@ config ARCH_R9A09G011
config ARCH_R9A09G047
bool "ARM64 Platform support for RZ/G3E"
+ select SYS_R9A09G047
help
This enables support for the Renesas RZ/G3E SoC variants.
config ARCH_R9A09G057
bool "ARM64 Platform support for RZ/V2H(P)"
select RENESAS_RZV2H_ICU
+ select SYS_R9A09G057
help
This enables support for the Renesas RZ/V2H(P) SoC variants.
@@ -383,4 +386,19 @@ config PWC_RZV2M
config RST_RCAR
bool "Reset Controller support for R-Car" if COMPILE_TEST
+config SYSC_RZ
+ bool "System controller for RZ SoCs" if COMPILE_TEST
+
+config SYSC_R9A08G045
+ bool "Renesas RZ/G3S System controller support" if COMPILE_TEST
+ select SYSC_RZ
+
+config SYS_R9A09G047
+ bool "Renesas RZ/G3E System controller support" if COMPILE_TEST
+ select SYSC_RZ
+
+config SYS_R9A09G057
+ bool "Renesas RZ/V2H System controller support" if COMPILE_TEST
+ select SYSC_RZ
+
endif # SOC_RENESAS
diff --git a/drivers/soc/renesas/Makefile b/drivers/soc/renesas/Makefile
index 734f8f8cefa4..81d4c5726e4c 100644
--- a/drivers/soc/renesas/Makefile
+++ b/drivers/soc/renesas/Makefile
@@ -6,7 +6,11 @@ obj-$(CONFIG_SOC_RENESAS) += renesas-soc.o
ifdef CONFIG_SMP
obj-$(CONFIG_ARCH_R9A06G032) += r9a06g032-smp.o
endif
+obj-$(CONFIG_SYSC_R9A08G045) += r9a08g045-sysc.o
+obj-$(CONFIG_SYS_R9A09G047) += r9a09g047-sys.o
+obj-$(CONFIG_SYS_R9A09G057) += r9a09g057-sys.o
# Family
obj-$(CONFIG_PWC_RZV2M) += pwc-rzv2m.o
obj-$(CONFIG_RST_RCAR) += rcar-rst.o
+obj-$(CONFIG_SYSC_RZ) += rz-sysc.o
diff --git a/drivers/soc/renesas/r9a08g045-sysc.c b/drivers/soc/renesas/r9a08g045-sysc.c
new file mode 100644
index 000000000000..f4db1431e036
--- /dev/null
+++ b/drivers/soc/renesas/r9a08g045-sysc.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RZ/G3S System controller driver
+ *
+ * Copyright (C) 2024 Renesas Electronics Corp.
+ */
+
+#include <linux/bits.h>
+#include <linux/init.h>
+
+#include "rz-sysc.h"
+
+static const struct rz_sysc_soc_id_init_data rzg3s_sysc_soc_id_init_data __initconst = {
+ .family = "RZ/G3S",
+ .id = 0x85e0447,
+ .devid_offset = 0xa04,
+ .revision_mask = GENMASK(31, 28),
+ .specific_id_mask = GENMASK(27, 0),
+};
+
+const struct rz_sysc_init_data rzg3s_sysc_init_data __initconst = {
+ .soc_id_init_data = &rzg3s_sysc_soc_id_init_data,
+};
diff --git a/drivers/soc/renesas/r9a09g047-sys.c b/drivers/soc/renesas/r9a09g047-sys.c
new file mode 100644
index 000000000000..cd2eb7782cfe
--- /dev/null
+++ b/drivers/soc/renesas/r9a09g047-sys.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RZ/G3E System controller (SYS) driver
+ *
+ * Copyright (C) 2025 Renesas Electronics Corp.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/io.h>
+
+#include "rz-sysc.h"
+
+/* Register Offsets */
+#define SYS_LSI_MODE 0x300
+/*
+ * BOOTPLLCA[1:0]
+ * [0,0] => 1.1GHZ
+ * [0,1] => 1.5GHZ
+ * [1,0] => 1.6GHZ
+ * [1,1] => 1.7GHZ
+ */
+#define SYS_LSI_MODE_STAT_BOOTPLLCA55 GENMASK(12, 11)
+#define SYS_LSI_MODE_CA55_1_7GHZ 0x3
+
+#define SYS_LSI_PRR 0x308
+#define SYS_LSI_PRR_CA55_DIS BIT(8)
+#define SYS_LSI_PRR_NPU_DIS BIT(1)
+
+static void rzg3e_sys_print_id(struct device *dev,
+ void __iomem *sysc_base,
+ struct soc_device_attribute *soc_dev_attr)
+{
+ bool is_quad_core, npu_enabled;
+ u32 prr_val, mode_val;
+
+ prr_val = readl(sysc_base + SYS_LSI_PRR);
+ mode_val = readl(sysc_base + SYS_LSI_MODE);
+
+ /* Check CPU and NPU configuration */
+ is_quad_core = !(prr_val & SYS_LSI_PRR_CA55_DIS);
+ npu_enabled = !(prr_val & SYS_LSI_PRR_NPU_DIS);
+
+ dev_info(dev, "Detected Renesas %s Core %s %s Rev %s%s\n",
+ is_quad_core ? "Quad" : "Dual", soc_dev_attr->family,
+ soc_dev_attr->soc_id, soc_dev_attr->revision,
+ npu_enabled ? " with Ethos-U55" : "");
+
+ /* Check CA55 PLL configuration */
+ if (FIELD_GET(SYS_LSI_MODE_STAT_BOOTPLLCA55, mode_val) != SYS_LSI_MODE_CA55_1_7GHZ)
+ dev_warn(dev, "CA55 PLL is not set to 1.7GHz\n");
+}
+
+static const struct rz_sysc_soc_id_init_data rzg3e_sys_soc_id_init_data __initconst = {
+ .family = "RZ/G3E",
+ .id = 0x8679447,
+ .devid_offset = 0x304,
+ .revision_mask = GENMASK(31, 28),
+ .specific_id_mask = GENMASK(27, 0),
+ .print_id = rzg3e_sys_print_id,
+};
+
+const struct rz_sysc_init_data rzg3e_sys_init_data = {
+ .soc_id_init_data = &rzg3e_sys_soc_id_init_data,
+};
diff --git a/drivers/soc/renesas/r9a09g057-sys.c b/drivers/soc/renesas/r9a09g057-sys.c
new file mode 100644
index 000000000000..4c21cc29edbc
--- /dev/null
+++ b/drivers/soc/renesas/r9a09g057-sys.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RZ/V2H System controller (SYS) driver
+ *
+ * Copyright (C) 2025 Renesas Electronics Corp.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/io.h>
+
+#include "rz-sysc.h"
+
+/* Register Offsets */
+#define SYS_LSI_MODE 0x300
+/*
+ * BOOTPLLCA[1:0]
+ * [0,0] => 1.1GHZ
+ * [0,1] => 1.5GHZ
+ * [1,0] => 1.6GHZ
+ * [1,1] => 1.7GHZ
+ */
+#define SYS_LSI_MODE_STAT_BOOTPLLCA55 GENMASK(12, 11)
+#define SYS_LSI_MODE_CA55_1_7GHZ 0x3
+
+#define SYS_LSI_PRR 0x308
+#define SYS_LSI_PRR_GPU_DIS BIT(0)
+#define SYS_LSI_PRR_ISP_DIS BIT(4)
+
+static void rzv2h_sys_print_id(struct device *dev,
+ void __iomem *sysc_base,
+ struct soc_device_attribute *soc_dev_attr)
+{
+ bool gpu_enabled, isp_enabled;
+ u32 prr_val, mode_val;
+
+ prr_val = readl(sysc_base + SYS_LSI_PRR);
+ mode_val = readl(sysc_base + SYS_LSI_MODE);
+
+ /* Check GPU and ISP configuration */
+ gpu_enabled = !(prr_val & SYS_LSI_PRR_GPU_DIS);
+ isp_enabled = !(prr_val & SYS_LSI_PRR_ISP_DIS);
+
+ dev_info(dev, "Detected Renesas %s %s Rev %s%s%s\n",
+ soc_dev_attr->family, soc_dev_attr->soc_id, soc_dev_attr->revision,
+ gpu_enabled ? " with GE3D (Mali-G31)" : "",
+ isp_enabled ? " with ISP (Mali-C55)" : "");
+
+ /* Check CA55 PLL configuration */
+ if (FIELD_GET(SYS_LSI_MODE_STAT_BOOTPLLCA55, mode_val) != SYS_LSI_MODE_CA55_1_7GHZ)
+ dev_warn(dev, "CA55 PLL is not set to 1.7GHz\n");
+}
+
+static const struct rz_sysc_soc_id_init_data rzv2h_sys_soc_id_init_data __initconst = {
+ .family = "RZ/V2H",
+ .id = 0x847a447,
+ .devid_offset = 0x304,
+ .revision_mask = GENMASK(31, 28),
+ .specific_id_mask = GENMASK(27, 0),
+ .print_id = rzv2h_sys_print_id,
+};
+
+const struct rz_sysc_init_data rzv2h_sys_init_data = {
+ .soc_id_init_data = &rzv2h_sys_soc_id_init_data,
+};
diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c
index 172d59e6fbcf..df2b38417b80 100644
--- a/drivers/soc/renesas/renesas-soc.c
+++ b/drivers/soc/renesas/renesas-soc.c
@@ -71,14 +71,6 @@ static const struct renesas_family fam_rzg2ul __initconst __maybe_unused = {
.name = "RZ/G2UL",
};
-static const struct renesas_family fam_rzg3s __initconst __maybe_unused = {
- .name = "RZ/G3S",
-};
-
-static const struct renesas_family fam_rzv2h __initconst __maybe_unused = {
- .name = "RZ/V2H",
-};
-
static const struct renesas_family fam_rzv2l __initconst __maybe_unused = {
.name = "RZ/V2L",
};
@@ -176,16 +168,6 @@ static const struct renesas_soc soc_rz_g2ul __initconst __maybe_unused = {
.id = 0x8450447,
};
-static const struct renesas_soc soc_rz_g3s __initconst __maybe_unused = {
- .family = &fam_rzg3s,
- .id = 0x85e0447,
-};
-
-static const struct renesas_soc soc_rz_v2h __initconst __maybe_unused = {
- .family = &fam_rzv2h,
- .id = 0x847a447,
-};
-
static const struct renesas_soc soc_rz_v2l __initconst __maybe_unused = {
.family = &fam_rzv2l,
.id = 0x8447447,
@@ -289,7 +271,6 @@ static const struct renesas_soc soc_shmobile_ag5 __initconst __maybe_unused = {
.id = 0x37,
};
-
static const struct of_device_id renesas_socs[] __initconst __maybe_unused = {
#ifdef CONFIG_ARCH_R7S72100
{ .compatible = "renesas,r7s72100", .data = &soc_rz_a1h },
@@ -410,15 +391,9 @@ static const struct of_device_id renesas_socs[] __initconst __maybe_unused = {
#ifdef CONFIG_ARCH_R9A07G054
{ .compatible = "renesas,r9a07g054", .data = &soc_rz_v2l },
#endif
-#ifdef CONFIG_ARCH_R9A08G045
- { .compatible = "renesas,r9a08g045", .data = &soc_rz_g3s },
-#endif
#ifdef CONFIG_ARCH_R9A09G011
{ .compatible = "renesas,r9a09g011", .data = &soc_rz_v2m },
#endif
-#ifdef CONFIG_ARCH_R9A09G057
- { .compatible = "renesas,r9a09g057", .data = &soc_rz_v2h },
-#endif
#ifdef CONFIG_ARCH_SH73A0
{ .compatible = "renesas,sh73a0", .data = &soc_shmobile_ag5 },
#endif
@@ -444,11 +419,6 @@ static const struct renesas_id id_rzg2l __initconst = {
.mask = 0xfffffff,
};
-static const struct renesas_id id_rzv2h __initconst = {
- .offset = 0x304,
- .mask = 0xfffffff,
-};
-
static const struct renesas_id id_rzv2m __initconst = {
.offset = 0x104,
.mask = 0xff,
@@ -466,7 +436,6 @@ static const struct of_device_id renesas_ids[] __initconst = {
{ .compatible = "renesas,r9a07g054-sysc", .data = &id_rzg2l },
{ .compatible = "renesas,r9a08g045-sysc", .data = &id_rzg2l },
{ .compatible = "renesas,r9a09g011-sys", .data = &id_rzv2m },
- { .compatible = "renesas,r9a09g057-sys", .data = &id_rzv2h },
{ .compatible = "renesas,prr", .data = &id_prr },
{ /* sentinel */ }
};
@@ -531,7 +500,7 @@ static int __init renesas_soc_init(void)
eslo = product & 0xf;
soc_dev_attr->revision = kasprintf(GFP_KERNEL, "ES%u.%u",
eshi, eslo);
- } else if (id == &id_rzg2l || id == &id_rzv2h) {
+ } else if (id == &id_rzg2l) {
eshi = ((product >> 28) & 0x0f);
soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%u",
eshi);
diff --git a/drivers/soc/renesas/rz-sysc.c b/drivers/soc/renesas/rz-sysc.c
new file mode 100644
index 000000000000..14db508f669f
--- /dev/null
+++ b/drivers/soc/renesas/rz-sysc.c
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RZ System controller driver
+ *
+ * Copyright (C) 2024 Renesas Electronics Corp.
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/sys_soc.h>
+
+#include "rz-sysc.h"
+
+#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
+
+/**
+ * struct rz_sysc - RZ SYSC private data structure
+ * @base: SYSC base address
+ * @dev: SYSC device pointer
+ */
+struct rz_sysc {
+ void __iomem *base;
+ struct device *dev;
+};
+
+static int rz_sysc_soc_init(struct rz_sysc *sysc, const struct of_device_id *match)
+{
+ const struct rz_sysc_init_data *sysc_data = match->data;
+ const struct rz_sysc_soc_id_init_data *soc_data = sysc_data->soc_id_init_data;
+ struct soc_device_attribute *soc_dev_attr;
+ const char *soc_id_start, *soc_id_end;
+ u32 val, revision, specific_id;
+ struct soc_device *soc_dev;
+ char soc_id[32] = {0};
+ size_t size;
+
+ soc_id_start = strchr(match->compatible, ',') + 1;
+ soc_id_end = strchr(match->compatible, '-');
+ size = soc_id_end - soc_id_start + 1;
+ if (size > 32)
+ size = sizeof(soc_id);
+ strscpy(soc_id, soc_id_start, size);
+
+ soc_dev_attr = devm_kzalloc(sysc->dev, sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+ return -ENOMEM;
+
+ soc_dev_attr->family = devm_kstrdup(sysc->dev, soc_data->family, GFP_KERNEL);
+ if (!soc_dev_attr->family)
+ return -ENOMEM;
+
+ soc_dev_attr->soc_id = devm_kstrdup(sysc->dev, soc_id, GFP_KERNEL);
+ if (!soc_dev_attr->soc_id)
+ return -ENOMEM;
+
+ val = readl(sysc->base + soc_data->devid_offset);
+ revision = field_get(soc_data->revision_mask, val);
+ specific_id = field_get(soc_data->specific_id_mask, val);
+ soc_dev_attr->revision = devm_kasprintf(sysc->dev, GFP_KERNEL, "%u", revision);
+ if (!soc_dev_attr->revision)
+ return -ENOMEM;
+
+ if (soc_data->id && specific_id != soc_data->id) {
+ dev_warn(sysc->dev, "SoC mismatch (product = 0x%x)\n", specific_id);
+ return -ENODEV;
+ }
+
+ /* Try to call SoC-specific device identification */
+ if (soc_data->print_id) {
+ soc_data->print_id(sysc->dev, sysc->base, soc_dev_attr);
+ } else {
+ dev_info(sysc->dev, "Detected Renesas %s %s Rev %s\n",
+ soc_dev_attr->family, soc_dev_attr->soc_id, soc_dev_attr->revision);
+ }
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev))
+ return PTR_ERR(soc_dev);
+
+ return 0;
+}
+
+static const struct of_device_id rz_sysc_match[] = {
+#ifdef CONFIG_SYSC_R9A08G045
+ { .compatible = "renesas,r9a08g045-sysc", .data = &rzg3s_sysc_init_data },
+#endif
+#ifdef CONFIG_SYS_R9A09G047
+ { .compatible = "renesas,r9a09g047-sys", .data = &rzg3e_sys_init_data },
+#endif
+#ifdef CONFIG_SYS_R9A09G057
+ { .compatible = "renesas,r9a09g057-sys", .data = &rzv2h_sys_init_data },
+#endif
+ { }
+};
+MODULE_DEVICE_TABLE(of, rz_sysc_match);
+
+static int rz_sysc_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ struct device *dev = &pdev->dev;
+ struct rz_sysc *sysc;
+
+ match = of_match_node(rz_sysc_match, dev->of_node);
+ if (!match)
+ return -ENODEV;
+
+ sysc = devm_kzalloc(dev, sizeof(*sysc), GFP_KERNEL);
+ if (!sysc)
+ return -ENOMEM;
+
+ sysc->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(sysc->base))
+ return PTR_ERR(sysc->base);
+
+ sysc->dev = dev;
+ return rz_sysc_soc_init(sysc, match);
+}
+
+static struct platform_driver rz_sysc_driver = {
+ .driver = {
+ .name = "renesas-rz-sysc",
+ .suppress_bind_attrs = true,
+ .of_match_table = rz_sysc_match
+ },
+ .probe = rz_sysc_probe
+};
+
+static int __init rz_sysc_init(void)
+{
+ return platform_driver_register(&rz_sysc_driver);
+}
+subsys_initcall(rz_sysc_init);
+
+MODULE_DESCRIPTION("Renesas RZ System Controller Driver");
+MODULE_AUTHOR("Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/renesas/rz-sysc.h b/drivers/soc/renesas/rz-sysc.h
new file mode 100644
index 000000000000..aa83948c5117
--- /dev/null
+++ b/drivers/soc/renesas/rz-sysc.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Renesas RZ System Controller
+ *
+ * Copyright (C) 2024 Renesas Electronics Corp.
+ */
+
+#ifndef __SOC_RENESAS_RZ_SYSC_H__
+#define __SOC_RENESAS_RZ_SYSC_H__
+
+#include <linux/device.h>
+#include <linux/sys_soc.h>
+#include <linux/types.h>
+
+/**
+ * struct rz_syc_soc_id_init_data - RZ SYSC SoC identification initialization data
+ * @family: RZ SoC family
+ * @id: RZ SoC expected ID
+ * @devid_offset: SYSC SoC ID register offset
+ * @revision_mask: SYSC SoC ID revision mask
+ * @specific_id_mask: SYSC SoC ID specific ID mask
+ * @print_id: print SoC-specific extended device identification
+ */
+struct rz_sysc_soc_id_init_data {
+ const char * const family;
+ u32 id;
+ u32 devid_offset;
+ u32 revision_mask;
+ u32 specific_id_mask;
+ void (*print_id)(struct device *dev, void __iomem *sysc_base,
+ struct soc_device_attribute *soc_dev_attr);
+};
+
+/**
+ * struct rz_sysc_init_data - RZ SYSC initialization data
+ * @soc_id_init_data: RZ SYSC SoC ID initialization data
+ */
+struct rz_sysc_init_data {
+ const struct rz_sysc_soc_id_init_data *soc_id_init_data;
+};
+
+extern const struct rz_sysc_init_data rzg3e_sys_init_data;
+extern const struct rz_sysc_init_data rzg3s_sysc_init_data;
+extern const struct rz_sysc_init_data rzv2h_sys_init_data;
+
+#endif /* __SOC_RENESAS_RZ_SYSC_H__ */
diff --git a/drivers/soc/samsung/exynos-asv.c b/drivers/soc/samsung/exynos-asv.c
index 97006cc3b946..8e681f519526 100644
--- a/drivers/soc/samsung/exynos-asv.c
+++ b/drivers/soc/samsung/exynos-asv.c
@@ -9,6 +9,7 @@
* Samsung Exynos SoC Adaptive Supply Voltage support
*/
+#include <linux/array_size.h>
#include <linux/cpu.h>
#include <linux/device.h>
#include <linux/energy_model.h>
diff --git a/drivers/soc/samsung/exynos-chipid.c b/drivers/soc/samsung/exynos-chipid.c
index e37dde1fb588..c86f1058ceed 100644
--- a/drivers/soc/samsung/exynos-chipid.c
+++ b/drivers/soc/samsung/exynos-chipid.c
@@ -12,6 +12,7 @@
* Samsung Exynos SoC Adaptive Supply Voltage and Chip ID support
*/
+#include <linux/array_size.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/mfd/syscon.h>
@@ -55,7 +56,9 @@ static const struct exynos_soc_id {
{ "EXYNOS5440", 0xE5440000 },
{ "EXYNOS5800", 0xE5422000 },
{ "EXYNOS7420", 0xE7420000 },
+ { "EXYNOS7870", 0xE7870000 },
/* Compatible with: samsung,exynos850-chipid */
+ { "EXYNOS2200", 0xE9925000 },
{ "EXYNOS7885", 0xE7885000 },
{ "EXYNOS850", 0xE3830000 },
{ "EXYNOS8895", 0xE8895000 },
@@ -134,6 +137,8 @@ static int exynos_chipid_probe(struct platform_device *pdev)
soc_dev_attr->revision = devm_kasprintf(&pdev->dev, GFP_KERNEL,
"%x", soc_info.revision);
+ if (!soc_dev_attr->revision)
+ return -ENOMEM;
soc_dev_attr->soc_id = product_id_to_soc_id(soc_info.product_id);
if (!soc_dev_attr->soc_id) {
pr_err("Unknown SoC\n");
diff --git a/drivers/soc/samsung/exynos-pmu.c b/drivers/soc/samsung/exynos-pmu.c
index dd5256e5aae1..c40313886a01 100644
--- a/drivers/soc/samsung/exynos-pmu.c
+++ b/drivers/soc/samsung/exynos-pmu.c
@@ -5,6 +5,7 @@
//
// Exynos - CPU PMU(Power Management Unit) support
+#include <linux/array_size.h>
#include <linux/arm-smccc.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/soc/samsung/exynos-usi.c b/drivers/soc/samsung/exynos-usi.c
index 114352695ac2..c5661ac19f7b 100644
--- a/drivers/soc/samsung/exynos-usi.c
+++ b/drivers/soc/samsung/exynos-usi.c
@@ -6,6 +6,7 @@
* Samsung Exynos USI driver (Universal Serial Interface).
*/
+#include <linux/array_size.h>
#include <linux/clk.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
@@ -16,6 +17,18 @@
#include <dt-bindings/soc/samsung,exynos-usi.h>
+/* USIv1: System Register: SW_CONF register bits */
+#define USI_V1_SW_CONF_NONE 0x0
+#define USI_V1_SW_CONF_I2C0 0x1
+#define USI_V1_SW_CONF_I2C1 0x2
+#define USI_V1_SW_CONF_I2C0_1 0x3
+#define USI_V1_SW_CONF_SPI 0x4
+#define USI_V1_SW_CONF_UART 0x8
+#define USI_V1_SW_CONF_UART_I2C1 0xa
+#define USI_V1_SW_CONF_MASK (USI_V1_SW_CONF_I2C0 | USI_V1_SW_CONF_I2C1 | \
+ USI_V1_SW_CONF_I2C0_1 | USI_V1_SW_CONF_SPI | \
+ USI_V1_SW_CONF_UART | USI_V1_SW_CONF_UART_I2C1)
+
/* USIv2: System Register: SW_CONF register bits */
#define USI_V2_SW_CONF_NONE 0x0
#define USI_V2_SW_CONF_UART BIT(0)
@@ -34,7 +47,8 @@
#define USI_OPTION_CLKSTOP_ON BIT(2)
enum exynos_usi_ver {
- USI_VER2 = 2,
+ USI_VER1 = 0,
+ USI_VER2,
};
struct exynos_usi_variant {
@@ -66,19 +80,39 @@ struct exynos_usi_mode {
unsigned int val; /* mode register value */
};
-static const struct exynos_usi_mode exynos_usi_modes[] = {
- [USI_V2_NONE] = { .name = "none", .val = USI_V2_SW_CONF_NONE },
- [USI_V2_UART] = { .name = "uart", .val = USI_V2_SW_CONF_UART },
- [USI_V2_SPI] = { .name = "spi", .val = USI_V2_SW_CONF_SPI },
- [USI_V2_I2C] = { .name = "i2c", .val = USI_V2_SW_CONF_I2C },
+#define USI_MODES_MAX (USI_MODE_UART_I2C1 + 1)
+static const struct exynos_usi_mode exynos_usi_modes[][USI_MODES_MAX] = {
+ [USI_VER1] = {
+ [USI_MODE_NONE] = { .name = "none", .val = USI_V1_SW_CONF_NONE },
+ [USI_MODE_UART] = { .name = "uart", .val = USI_V1_SW_CONF_UART },
+ [USI_MODE_SPI] = { .name = "spi", .val = USI_V1_SW_CONF_SPI },
+ [USI_MODE_I2C] = { .name = "i2c", .val = USI_V1_SW_CONF_I2C0 },
+ [USI_MODE_I2C1] = { .name = "i2c1", .val = USI_V1_SW_CONF_I2C1 },
+ [USI_MODE_I2C0_1] = { .name = "i2c0_1", .val = USI_V1_SW_CONF_I2C0_1 },
+ [USI_MODE_UART_I2C1] = { .name = "uart_i2c1", .val = USI_V1_SW_CONF_UART_I2C1 },
+ }, [USI_VER2] = {
+ [USI_MODE_NONE] = { .name = "none", .val = USI_V2_SW_CONF_NONE },
+ [USI_MODE_UART] = { .name = "uart", .val = USI_V2_SW_CONF_UART },
+ [USI_MODE_SPI] = { .name = "spi", .val = USI_V2_SW_CONF_SPI },
+ [USI_MODE_I2C] = { .name = "i2c", .val = USI_V2_SW_CONF_I2C },
+ },
};
static const char * const exynos850_usi_clk_names[] = { "pclk", "ipclk" };
static const struct exynos_usi_variant exynos850_usi_data = {
.ver = USI_VER2,
.sw_conf_mask = USI_V2_SW_CONF_MASK,
- .min_mode = USI_V2_NONE,
- .max_mode = USI_V2_I2C,
+ .min_mode = USI_MODE_NONE,
+ .max_mode = USI_MODE_I2C,
+ .num_clks = ARRAY_SIZE(exynos850_usi_clk_names),
+ .clk_names = exynos850_usi_clk_names,
+};
+
+static const struct exynos_usi_variant exynos8895_usi_data = {
+ .ver = USI_VER1,
+ .sw_conf_mask = USI_V1_SW_CONF_MASK,
+ .min_mode = USI_MODE_NONE,
+ .max_mode = USI_MODE_UART_I2C1,
.num_clks = ARRAY_SIZE(exynos850_usi_clk_names),
.clk_names = exynos850_usi_clk_names,
};
@@ -87,6 +121,9 @@ static const struct of_device_id exynos_usi_dt_match[] = {
{
.compatible = "samsung,exynos850-usi",
.data = &exynos850_usi_data,
+ }, {
+ .compatible = "samsung,exynos8895-usi",
+ .data = &exynos8895_usi_data,
},
{ } /* sentinel */
};
@@ -109,14 +146,15 @@ static int exynos_usi_set_sw_conf(struct exynos_usi *usi, size_t mode)
if (mode < usi->data->min_mode || mode > usi->data->max_mode)
return -EINVAL;
- val = exynos_usi_modes[mode].val;
+ val = exynos_usi_modes[usi->data->ver][mode].val;
ret = regmap_update_bits(usi->sysreg, usi->sw_conf,
usi->data->sw_conf_mask, val);
if (ret)
return ret;
usi->mode = mode;
- dev_dbg(usi->dev, "protocol: %s\n", exynos_usi_modes[usi->mode].name);
+ dev_dbg(usi->dev, "protocol: %s\n",
+ exynos_usi_modes[usi->data->ver][usi->mode].name);
return 0;
}
@@ -168,10 +206,42 @@ static int exynos_usi_configure(struct exynos_usi *usi)
if (ret)
return ret;
- if (usi->data->ver == USI_VER2)
- return exynos_usi_enable(usi);
+ if (usi->data->ver == USI_VER1)
+ ret = clk_bulk_prepare_enable(usi->data->num_clks,
+ usi->clks);
+ else if (usi->data->ver == USI_VER2)
+ ret = exynos_usi_enable(usi);
- return 0;
+ return ret;
+}
+
+static void exynos_usi_unconfigure(void *data)
+{
+ struct exynos_usi *usi = data;
+ u32 val;
+ int ret;
+
+ if (usi->data->ver == USI_VER1) {
+ clk_bulk_disable_unprepare(usi->data->num_clks, usi->clks);
+ return;
+ }
+
+ ret = clk_bulk_prepare_enable(usi->data->num_clks, usi->clks);
+ if (ret)
+ return;
+
+ /* Make sure that we've stopped providing the clock to USI IP */
+ val = readl(usi->regs + USI_OPTION);
+ val &= ~USI_OPTION_CLKREQ_ON;
+ val |= ~USI_OPTION_CLKSTOP_ON;
+ writel(val, usi->regs + USI_OPTION);
+
+ /* Set USI block state to reset */
+ val = readl(usi->regs + USI_CON);
+ val |= USI_CON_RESET;
+ writel(val, usi->regs + USI_CON);
+
+ clk_bulk_disable_unprepare(usi->data->num_clks, usi->clks);
}
static int exynos_usi_parse_dt(struct device_node *np, struct exynos_usi *usi)
@@ -186,15 +256,11 @@ static int exynos_usi_parse_dt(struct device_node *np, struct exynos_usi *usi)
return -EINVAL;
usi->mode = mode;
- usi->sysreg = syscon_regmap_lookup_by_phandle(np, "samsung,sysreg");
+ usi->sysreg = syscon_regmap_lookup_by_phandle_args(np, "samsung,sysreg",
+ 1, &usi->sw_conf);
if (IS_ERR(usi->sysreg))
return PTR_ERR(usi->sysreg);
- ret = of_property_read_u32_index(np, "samsung,sysreg", 1,
- &usi->sw_conf);
- if (ret)
- return ret;
-
usi->clkreq_on = of_property_read_bool(np, "samsung,clkreq-on");
return 0;
@@ -255,6 +321,10 @@ static int exynos_usi_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ret = devm_add_action_or_reset(&pdev->dev, exynos_usi_unconfigure, usi);
+ if (ret)
+ return ret;
+
/* Make it possible to embed protocol nodes into USI np */
return of_platform_populate(np, NULL, NULL, dev);
}
diff --git a/drivers/soc/samsung/exynos3250-pmu.c b/drivers/soc/samsung/exynos3250-pmu.c
index 30f230ed1769..4bad12a99542 100644
--- a/drivers/soc/samsung/exynos3250-pmu.c
+++ b/drivers/soc/samsung/exynos3250-pmu.c
@@ -5,6 +5,7 @@
//
// Exynos3250 - CPU PMU (Power Management Unit) support
+#include <linux/array_size.h>
#include <linux/soc/samsung/exynos-regs-pmu.h>
#include <linux/soc/samsung/exynos-pmu.h>
diff --git a/drivers/soc/samsung/exynos5250-pmu.c b/drivers/soc/samsung/exynos5250-pmu.c
index 7a2d50be6b4a..2ae5c3e1b07a 100644
--- a/drivers/soc/samsung/exynos5250-pmu.c
+++ b/drivers/soc/samsung/exynos5250-pmu.c
@@ -5,6 +5,7 @@
//
// Exynos5250 - CPU PMU (Power Management Unit) support
+#include <linux/array_size.h>
#include <linux/soc/samsung/exynos-regs-pmu.h>
#include <linux/soc/samsung/exynos-pmu.h>
diff --git a/drivers/soc/samsung/exynos5420-pmu.c b/drivers/soc/samsung/exynos5420-pmu.c
index 6fedcd78cb45..58a2209795f7 100644
--- a/drivers/soc/samsung/exynos5420-pmu.c
+++ b/drivers/soc/samsung/exynos5420-pmu.c
@@ -5,6 +5,7 @@
//
// Exynos5420 - CPU PMU (Power Management Unit) support
+#include <linux/array_size.h>
#include <linux/pm.h>
#include <linux/soc/samsung/exynos-regs-pmu.h>
#include <linux/soc/samsung/exynos-pmu.h>
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index a08c377933c5..51b9d852bb6a 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -47,6 +47,7 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/string_choices.h>
#include <linux/syscore_ops.h>
#include <soc/tegra/common.h>
@@ -1181,7 +1182,7 @@ static int powergate_show(struct seq_file *s, void *data)
continue;
seq_printf(s, " %9s %7s\n", pmc->soc->powergates[i],
- status ? "yes" : "no");
+ str_yes_no(status));
}
return 0;
diff --git a/drivers/soc/ti/k3-socinfo.c b/drivers/soc/ti/k3-socinfo.c
index 4fb0f0a24828..704039eb3c07 100644
--- a/drivers/soc/ti/k3-socinfo.c
+++ b/drivers/soc/ti/k3-socinfo.c
@@ -105,6 +105,12 @@ err_unknown_variant:
return -ENODEV;
}
+static const struct regmap_config k3_chipinfo_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
static int k3_chipinfo_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
@@ -112,13 +118,18 @@ static int k3_chipinfo_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct soc_device *soc_dev;
struct regmap *regmap;
+ void __iomem *base;
u32 partno_id;
u32 variant;
u32 jtag_id;
u32 mfg;
int ret;
- regmap = device_node_to_regmap(node);
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap = regmap_init_mmio(dev, base, &k3_chipinfo_regmap_cfg);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
diff --git a/drivers/soc/ti/ti_sci_inta_msi.c b/drivers/soc/ti/ti_sci_inta_msi.c
index 193266f5e3f9..c36364522157 100644
--- a/drivers/soc/ti/ti_sci_inta_msi.c
+++ b/drivers/soc/ti/ti_sci_inta_msi.c
@@ -103,15 +103,19 @@ int ti_sci_inta_msi_domain_alloc_irqs(struct device *dev,
if (ret)
return ret;
- guard(msi_descs_lock)(dev);
+ msi_lock_descs(dev);
nvec = ti_sci_inta_msi_alloc_descs(dev, res);
- if (nvec <= 0)
- return nvec;
+ if (nvec <= 0) {
+ ret = nvec;
+ goto unlock;
+ }
/* Use alloc ALL as it's unclear whether there are gaps in the indices */
ret = msi_domain_alloc_irqs_all_locked(dev, MSI_DEFAULT_DOMAIN, nvec);
if (ret)
dev_err(dev, "Failed to allocate IRQs %d\n", ret);
+unlock:
+ msi_unlock_descs(dev);
return ret;
}
EXPORT_SYMBOL_GPL(ti_sci_inta_msi_domain_alloc_irqs);
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index 63a494d36a1f..7fb81bbaee60 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -383,7 +383,24 @@ config NULL_TTY
available or desired.
In order to use this driver, you should redirect the console to this
- TTY, or boot the kernel with console=ttynull.
+ TTY, boot the kernel with console=ttynull, or enable
+ NULL_TTY_DEFAULT_CONSOLE.
+
+ If unsure, say N.
+
+config NULL_TTY_DEFAULT_CONSOLE
+ bool "Support for console on ttynull"
+ depends on NULL_TTY=y && !VT_CONSOLE
+ help
+ Say Y here if you want the NULL TTY to be used as a /dev/console
+ device by default.
+
+ For example, it might be useful to prevent a VT-less kernel from
+ writing the system log to a random device connected to the serial
+ port.
+
+ Another console driver still might get preferred via the command
+ line, SPCR, or the device tree.
If unsure, say N.
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 92f7e752f862..d46650e578e5 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -2426,10 +2426,10 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
}
/*
- * Disable the console device before suspending.
+ * Suspend the console device before suspending the port.
*/
if (uart_console(uport))
- console_stop(uport->cons);
+ console_suspend(uport->cons);
uart_change_pm(state, UART_PM_STATE_OFF);
@@ -2484,7 +2484,7 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
uart_port_unlock_irq(uport);
}
if (console_suspend_enabled)
- console_start(uport->cons);
+ console_resume(uport->cons);
}
if (tty_port_suspended(port)) {
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 9b99d39f5bec..1b37449fbffc 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -147,7 +147,7 @@ static int ufs_qcom_ice_init(struct ufs_qcom_host *host)
int err;
int i;
- ice = of_qcom_ice_get(dev);
+ ice = devm_of_qcom_ice_get(dev);
if (ice == ERR_PTR(-EOPNOTSUPP)) {
dev_warn(dev, "Disabling inline encryption support\n");
ice = NULL;
@@ -1806,19 +1806,15 @@ static void ufs_qcom_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
ufshcd_mcq_config_esi(hba, msg);
}
-struct ufs_qcom_irq {
- unsigned int irq;
- unsigned int idx;
- struct ufs_hba *hba;
-};
-
static irqreturn_t ufs_qcom_mcq_esi_handler(int irq, void *data)
{
- struct ufs_qcom_irq *qi = data;
- struct ufs_hba *hba = qi->hba;
- struct ufs_hw_queue *hwq = &hba->uhq[qi->idx];
+ struct msi_desc *desc = data;
+ struct device *dev = msi_desc_to_dev(desc);
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ u32 id = desc->msi_index;
+ struct ufs_hw_queue *hwq = &hba->uhq[id];
- ufshcd_mcq_write_cqis(hba, 0x1, qi->idx);
+ ufshcd_mcq_write_cqis(hba, 0x1, id);
ufshcd_mcq_poll_cqe_lock(hba, hwq);
return IRQ_HANDLED;
@@ -1827,7 +1823,8 @@ static irqreturn_t ufs_qcom_mcq_esi_handler(int irq, void *data)
static int ufs_qcom_config_esi(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct ufs_qcom_irq *qi;
+ struct msi_desc *desc;
+ struct msi_desc *failed_desc = NULL;
int nr_irqs, ret;
if (host->esi_enabled)
@@ -1838,47 +1835,47 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
* 2. Poll queues do not need ESI.
*/
nr_irqs = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL];
- qi = devm_kcalloc(hba->dev, nr_irqs, sizeof(*qi), GFP_KERNEL);
- if (qi)
- return -ENOMEM;
-
ret = platform_device_msi_init_and_alloc_irqs(hba->dev, nr_irqs,
ufs_qcom_write_msi_msg);
if (ret) {
dev_err(hba->dev, "Failed to request Platform MSI %d\n", ret);
- goto cleanup;
+ return ret;
}
- for (int idx = 0; idx < nr_irqs; idx++) {
- qi[idx].irq = msi_get_virq(hba->dev, idx);
- qi[idx].idx = idx;
- qi[idx].hba = hba;
-
- ret = devm_request_irq(hba->dev, qi[idx].irq, ufs_qcom_mcq_esi_handler,
- IRQF_SHARED, "qcom-mcq-esi", qi + idx);
+ msi_lock_descs(hba->dev);
+ msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) {
+ ret = devm_request_irq(hba->dev, desc->irq,
+ ufs_qcom_mcq_esi_handler,
+ IRQF_SHARED, "qcom-mcq-esi", desc);
if (ret) {
dev_err(hba->dev, "%s: Fail to request IRQ for %d, err = %d\n",
- __func__, qi[idx].irq, ret);
- qi[idx].irq = 0;
- goto cleanup;
+ __func__, desc->irq, ret);
+ failed_desc = desc;
+ break;
}
}
+ msi_unlock_descs(hba->dev);
- if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 &&
- host->hw_ver.step == 0) {
- ufshcd_rmwl(hba, ESI_VEC_MASK,
- FIELD_PREP(ESI_VEC_MASK, MAX_ESI_VEC - 1),
- REG_UFS_CFG3);
+ if (ret) {
+ /* Rewind */
+ msi_lock_descs(hba->dev);
+ msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) {
+ if (desc == failed_desc)
+ break;
+ devm_free_irq(hba->dev, desc->irq, hba);
+ }
+ msi_unlock_descs(hba->dev);
+ platform_device_msi_free_irqs_all(hba->dev);
+ } else {
+ if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 &&
+ host->hw_ver.step == 0)
+ ufshcd_rmwl(hba, ESI_VEC_MASK,
+ FIELD_PREP(ESI_VEC_MASK, MAX_ESI_VEC - 1),
+ REG_UFS_CFG3);
+ ufshcd_mcq_enable_esi(hba);
+ host->esi_enabled = true;
}
- ufshcd_mcq_enable_esi(hba);
- host->esi_enabled = true;
- return 0;
-cleanup:
- for (int idx = 0; qi[idx].irq; idx++)
- devm_free_irq(hba->dev, qi[idx].irq, hba);
- platform_device_msi_free_irqs_all(hba->dev);
- devm_kfree(hba->dev, qi);
return ret;
}