aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/devfreq
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/devfreq')
-rw-r--r--drivers/devfreq/Kconfig19
-rw-r--r--drivers/devfreq/Makefile2
-rw-r--r--drivers/devfreq/devfreq.c104
-rw-r--r--drivers/devfreq/event/exynos-ppmu.c8
-rw-r--r--drivers/devfreq/event/rockchip-dfi.c7
-rw-r--r--drivers/devfreq/exynos-bus.c27
-rw-r--r--drivers/devfreq/governor.h27
-rw-r--r--drivers/devfreq/governor_passive.c391
-rw-r--r--drivers/devfreq/imx-bus.c3
-rw-r--r--drivers/devfreq/mtk-cci-devfreq.c444
-rw-r--r--drivers/devfreq/rk3399_dmc.c312
-rw-r--r--drivers/devfreq/sun8i-a33-mbus.c511
-rw-r--r--drivers/devfreq/tegra30-devfreq.c26
13 files changed, 1574 insertions, 307 deletions
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index e87d01c0b76a..9754d8b31621 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -120,6 +120,16 @@ config ARM_TEGRA_DEVFREQ
It reads ACTMON counters of memory controllers and adjusts the
operating frequencies and voltages with OPP support.
+config ARM_MEDIATEK_CCI_DEVFREQ
+ tristate "MEDIATEK CCI DEVFREQ Driver"
+ depends on ARM_MEDIATEK_CPUFREQ || COMPILE_TEST
+ select DEVFREQ_GOV_PASSIVE
+ help
+ This adds a devfreq driver for MediaTek Cache Coherent Interconnect
+ which is shared the same regulators with the cpu cluster. It can track
+ buck voltages and update a proper CCI frequency. Use the notification
+ to get the regulator status.
+
config ARM_RK3399_DMC_DEVFREQ
tristate "ARM RK3399 DMC DEVFREQ Driver"
depends on (ARCH_ROCKCHIP && HAVE_ARM_SMCCC) || \
@@ -132,6 +142,15 @@ config ARM_RK3399_DMC_DEVFREQ
It sets the frequency for the memory controller and reads the usage counts
from hardware.
+config ARM_SUN8I_A33_MBUS_DEVFREQ
+ tristate "sun8i/sun50i MBUS DEVFREQ Driver"
+ depends on ARCH_SUNXI || COMPILE_TEST
+ depends on COMMON_CLK
+ select DEVFREQ_GOV_SIMPLE_ONDEMAND
+ help
+ This adds the DEVFREQ driver for the MBUS controller in some
+ Allwinner sun8i (A33 through H3) and sun50i (A64 and H5) SoCs.
+
source "drivers/devfreq/event/Kconfig"
endif # PM_DEVFREQ
diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile
index a16333ea7034..bf40d04928d0 100644
--- a/drivers/devfreq/Makefile
+++ b/drivers/devfreq/Makefile
@@ -11,7 +11,9 @@ obj-$(CONFIG_DEVFREQ_GOV_PASSIVE) += governor_passive.o
obj-$(CONFIG_ARM_EXYNOS_BUS_DEVFREQ) += exynos-bus.o
obj-$(CONFIG_ARM_IMX_BUS_DEVFREQ) += imx-bus.o
obj-$(CONFIG_ARM_IMX8M_DDRC_DEVFREQ) += imx8m-ddrc.o
+obj-$(CONFIG_ARM_MEDIATEK_CCI_DEVFREQ) += mtk-cci-devfreq.o
obj-$(CONFIG_ARM_RK3399_DMC_DEVFREQ) += rk3399_dmc.o
+obj-$(CONFIG_ARM_SUN8I_A33_MBUS_DEVFREQ) += sun8i-a33-mbus.o
obj-$(CONFIG_ARM_TEGRA_DEVFREQ) += tegra30-devfreq.o
# DEVFREQ Event Drivers
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 7906220d025c..63347a5ae599 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -112,18 +112,18 @@ static unsigned long find_available_max_freq(struct devfreq *devfreq)
}
/**
- * get_freq_range() - Get the current freq range
+ * devfreq_get_freq_range() - Get the current freq range
* @devfreq: the devfreq instance
* @min_freq: the min frequency
* @max_freq: the max frequency
*
* This takes into consideration all constraints.
*/
-static void get_freq_range(struct devfreq *devfreq,
- unsigned long *min_freq,
- unsigned long *max_freq)
+void devfreq_get_freq_range(struct devfreq *devfreq,
+ unsigned long *min_freq,
+ unsigned long *max_freq)
{
- unsigned long *freq_table = devfreq->profile->freq_table;
+ unsigned long *freq_table = devfreq->freq_table;
s32 qos_min_freq, qos_max_freq;
lockdep_assert_held(&devfreq->lock);
@@ -133,11 +133,11 @@ static void get_freq_range(struct devfreq *devfreq,
* The devfreq drivers can initialize this in either ascending or
* descending order and devfreq core supports both.
*/
- if (freq_table[0] < freq_table[devfreq->profile->max_state - 1]) {
+ if (freq_table[0] < freq_table[devfreq->max_state - 1]) {
*min_freq = freq_table[0];
- *max_freq = freq_table[devfreq->profile->max_state - 1];
+ *max_freq = freq_table[devfreq->max_state - 1];
} else {
- *min_freq = freq_table[devfreq->profile->max_state - 1];
+ *min_freq = freq_table[devfreq->max_state - 1];
*max_freq = freq_table[0];
}
@@ -158,6 +158,7 @@ static void get_freq_range(struct devfreq *devfreq,
if (*min_freq > *max_freq)
*min_freq = *max_freq;
}
+EXPORT_SYMBOL(devfreq_get_freq_range);
/**
* devfreq_get_freq_level() - Lookup freq_table for the frequency
@@ -168,8 +169,8 @@ static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq)
{
int lev;
- for (lev = 0; lev < devfreq->profile->max_state; lev++)
- if (freq == devfreq->profile->freq_table[lev])
+ for (lev = 0; lev < devfreq->max_state; lev++)
+ if (freq == devfreq->freq_table[lev])
return lev;
return -EINVAL;
@@ -177,7 +178,6 @@ static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq)
static int set_freq_table(struct devfreq *devfreq)
{
- struct devfreq_dev_profile *profile = devfreq->profile;
struct dev_pm_opp *opp;
unsigned long freq;
int i, count;
@@ -187,25 +187,22 @@ static int set_freq_table(struct devfreq *devfreq)
if (count <= 0)
return -EINVAL;
- profile->max_state = count;
- profile->freq_table = devm_kcalloc(devfreq->dev.parent,
- profile->max_state,
- sizeof(*profile->freq_table),
- GFP_KERNEL);
- if (!profile->freq_table) {
- profile->max_state = 0;
+ devfreq->max_state = count;
+ devfreq->freq_table = devm_kcalloc(devfreq->dev.parent,
+ devfreq->max_state,
+ sizeof(*devfreq->freq_table),
+ GFP_KERNEL);
+ if (!devfreq->freq_table)
return -ENOMEM;
- }
- for (i = 0, freq = 0; i < profile->max_state; i++, freq++) {
+ for (i = 0, freq = 0; i < devfreq->max_state; i++, freq++) {
opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &freq);
if (IS_ERR(opp)) {
- devm_kfree(devfreq->dev.parent, profile->freq_table);
- profile->max_state = 0;
+ devm_kfree(devfreq->dev.parent, devfreq->freq_table);
return PTR_ERR(opp);
}
dev_pm_opp_put(opp);
- profile->freq_table[i] = freq;
+ devfreq->freq_table[i] = freq;
}
return 0;
@@ -245,7 +242,7 @@ int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
if (lev != prev_lev) {
devfreq->stats.trans_table[
- (prev_lev * devfreq->profile->max_state) + lev]++;
+ (prev_lev * devfreq->max_state) + lev]++;
devfreq->stats.total_trans++;
}
@@ -382,8 +379,8 @@ static int devfreq_set_target(struct devfreq *devfreq, unsigned long new_freq,
devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
if (devfreq_update_status(devfreq, new_freq))
- dev_err(&devfreq->dev,
- "Couldn't update frequency transition information.\n");
+ dev_warn(&devfreq->dev,
+ "Couldn't update frequency transition information.\n");
devfreq->previous_freq = new_freq;
@@ -418,7 +415,7 @@ int devfreq_update_target(struct devfreq *devfreq, unsigned long freq)
err = devfreq->governor->get_target_freq(devfreq, &freq);
if (err)
return err;
- get_freq_range(devfreq, &min_freq, &max_freq);
+ devfreq_get_freq_range(devfreq, &min_freq, &max_freq);
if (freq < min_freq) {
freq = min_freq;
@@ -699,6 +696,8 @@ static int qos_notifier_call(struct devfreq *devfreq)
/**
* qos_min_notifier_call() - Callback for QoS min_freq changes.
* @nb: Should be devfreq->nb_min
+ * @val: not used
+ * @ptr: not used
*/
static int qos_min_notifier_call(struct notifier_block *nb,
unsigned long val, void *ptr)
@@ -709,6 +708,8 @@ static int qos_min_notifier_call(struct notifier_block *nb,
/**
* qos_max_notifier_call() - Callback for QoS max_freq changes.
* @nb: Should be devfreq->nb_max
+ * @val: not used
+ * @ptr: not used
*/
static int qos_max_notifier_call(struct notifier_block *nb,
unsigned long val, void *ptr)
@@ -785,6 +786,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
{
struct devfreq *devfreq;
struct devfreq_governor *governor;
+ unsigned long min_freq, max_freq;
int err = 0;
if (!dev || !profile || !governor_name) {
@@ -833,6 +835,9 @@ struct devfreq *devfreq_add_device(struct device *dev,
if (err < 0)
goto err_dev;
mutex_lock(&devfreq->lock);
+ } else {
+ devfreq->freq_table = devfreq->profile->freq_table;
+ devfreq->max_state = devfreq->profile->max_state;
}
devfreq->scaling_min_freq = find_available_min_freq(devfreq);
@@ -849,6 +854,8 @@ struct devfreq *devfreq_add_device(struct device *dev,
goto err_dev;
}
+ devfreq_get_freq_range(devfreq, &min_freq, &max_freq);
+
devfreq->suspend_freq = dev_pm_opp_get_suspend_opp_freq(dev);
devfreq->opp_table = dev_pm_opp_get_opp_table(dev);
if (IS_ERR(devfreq->opp_table))
@@ -866,8 +873,8 @@ struct devfreq *devfreq_add_device(struct device *dev,
devfreq->stats.trans_table = devm_kzalloc(&devfreq->dev,
array3_size(sizeof(unsigned int),
- devfreq->profile->max_state,
- devfreq->profile->max_state),
+ devfreq->max_state,
+ devfreq->max_state),
GFP_KERNEL);
if (!devfreq->stats.trans_table) {
mutex_unlock(&devfreq->lock);
@@ -876,7 +883,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
}
devfreq->stats.time_in_state = devm_kcalloc(&devfreq->dev,
- devfreq->profile->max_state,
+ devfreq->max_state,
sizeof(*devfreq->stats.time_in_state),
GFP_KERNEL);
if (!devfreq->stats.time_in_state) {
@@ -928,8 +935,9 @@ struct devfreq *devfreq_add_device(struct device *dev,
err = devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_START,
NULL);
if (err) {
- dev_err(dev, "%s: Unable to start governor for the device\n",
- __func__);
+ dev_err_probe(dev, err,
+ "%s: Unable to start governor for the device\n",
+ __func__);
goto err_init;
}
create_sysfs_files(devfreq, devfreq->governor);
@@ -1587,7 +1595,7 @@ static ssize_t min_freq_show(struct device *dev, struct device_attribute *attr,
unsigned long min_freq, max_freq;
mutex_lock(&df->lock);
- get_freq_range(df, &min_freq, &max_freq);
+ devfreq_get_freq_range(df, &min_freq, &max_freq);
mutex_unlock(&df->lock);
return sprintf(buf, "%lu\n", min_freq);
@@ -1641,7 +1649,7 @@ static ssize_t max_freq_show(struct device *dev, struct device_attribute *attr,
unsigned long min_freq, max_freq;
mutex_lock(&df->lock);
- get_freq_range(df, &min_freq, &max_freq);
+ devfreq_get_freq_range(df, &min_freq, &max_freq);
mutex_unlock(&df->lock);
return sprintf(buf, "%lu\n", max_freq);
@@ -1661,9 +1669,9 @@ static ssize_t available_frequencies_show(struct device *d,
mutex_lock(&df->lock);
- for (i = 0; i < df->profile->max_state; i++)
+ for (i = 0; i < df->max_state; i++)
count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
- "%lu ", df->profile->freq_table[i]);
+ "%lu ", df->freq_table[i]);
mutex_unlock(&df->lock);
/* Truncate the trailing space */
@@ -1686,7 +1694,7 @@ static ssize_t trans_stat_show(struct device *dev,
if (!df->profile)
return -EINVAL;
- max_state = df->profile->max_state;
+ max_state = df->max_state;
if (max_state == 0)
return sprintf(buf, "Not Supported.\n");
@@ -1703,19 +1711,17 @@ static ssize_t trans_stat_show(struct device *dev,
len += sprintf(buf + len, " :");
for (i = 0; i < max_state; i++)
len += sprintf(buf + len, "%10lu",
- df->profile->freq_table[i]);
+ df->freq_table[i]);
len += sprintf(buf + len, " time(ms)\n");
for (i = 0; i < max_state; i++) {
- if (df->profile->freq_table[i]
- == df->previous_freq) {
+ if (df->freq_table[i] == df->previous_freq)
len += sprintf(buf + len, "*");
- } else {
+ else
len += sprintf(buf + len, " ");
- }
- len += sprintf(buf + len, "%10lu:",
- df->profile->freq_table[i]);
+
+ len += sprintf(buf + len, "%10lu:", df->freq_table[i]);
for (j = 0; j < max_state; j++)
len += sprintf(buf + len, "%10u",
df->stats.trans_table[(i * max_state) + j]);
@@ -1739,7 +1745,7 @@ static ssize_t trans_stat_store(struct device *dev,
if (!df->profile)
return -EINVAL;
- if (df->profile->max_state == 0)
+ if (df->max_state == 0)
return count;
err = kstrtoint(buf, 10, &value);
@@ -1747,11 +1753,11 @@ static ssize_t trans_stat_store(struct device *dev,
return -EINVAL;
mutex_lock(&df->lock);
- memset(df->stats.time_in_state, 0, (df->profile->max_state *
+ memset(df->stats.time_in_state, 0, (df->max_state *
sizeof(*df->stats.time_in_state)));
memset(df->stats.trans_table, 0, array3_size(sizeof(unsigned int),
- df->profile->max_state,
- df->profile->max_state));
+ df->max_state,
+ df->max_state));
df->stats.total_trans = 0;
df->stats.last_update = get_jiffies_64();
mutex_unlock(&df->lock);
@@ -1955,7 +1961,7 @@ static int devfreq_summary_show(struct seq_file *s, void *data)
mutex_lock(&devfreq->lock);
cur_freq = devfreq->previous_freq;
- get_freq_range(devfreq, &min_freq, &max_freq);
+ devfreq_get_freq_range(devfreq, &min_freq, &max_freq);
timer = devfreq->profile->timer;
if (IS_SUPPORTED_ATTR(devfreq->governor->attrs, POLLING_INTERVAL))
diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c
index 9b849d781116..a443e7c42daf 100644
--- a/drivers/devfreq/event/exynos-ppmu.c
+++ b/drivers/devfreq/event/exynos-ppmu.c
@@ -519,15 +519,19 @@ static int of_get_devfreq_events(struct device_node *np,
count = of_get_child_count(events_np);
desc = devm_kcalloc(dev, count, sizeof(*desc), GFP_KERNEL);
- if (!desc)
+ if (!desc) {
+ of_node_put(events_np);
return -ENOMEM;
+ }
info->num_events = count;
of_id = of_match_device(exynos_ppmu_id_match, dev);
if (of_id)
info->ppmu_type = (enum exynos_ppmu_type)of_id->data;
- else
+ else {
+ of_node_put(events_np);
return -EINVAL;
+ }
j = 0;
for_each_child_of_node(events_np, node) {
diff --git a/drivers/devfreq/event/rockchip-dfi.c b/drivers/devfreq/event/rockchip-dfi.c
index 9a88faaf8b27..39ac069cabc7 100644
--- a/drivers/devfreq/event/rockchip-dfi.c
+++ b/drivers/devfreq/event/rockchip-dfi.c
@@ -189,10 +189,9 @@ static int rockchip_dfi_probe(struct platform_device *pdev)
return PTR_ERR(data->regs);
data->clk = devm_clk_get(dev, "pclk_ddr_mon");
- if (IS_ERR(data->clk)) {
- dev_err(dev, "Cannot get the clk dmc_clk\n");
- return PTR_ERR(data->clk);
- }
+ if (IS_ERR(data->clk))
+ return dev_err_probe(dev, PTR_ERR(data->clk),
+ "Cannot get the clk pclk_ddr_mon\n");
/* try to find the optional reference to the pmu syscon */
node = of_parse_phandle(np, "rockchip,pmu", 0);
diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c
index e689101abc93..027e8f336acc 100644
--- a/drivers/devfreq/exynos-bus.c
+++ b/drivers/devfreq/exynos-bus.c
@@ -33,7 +33,7 @@ struct exynos_bus {
unsigned long curr_freq;
- struct opp_table *opp_table;
+ int opp_token;
struct clk *clk;
unsigned int ratio;
};
@@ -161,8 +161,7 @@ static void exynos_bus_exit(struct device *dev)
dev_pm_opp_of_remove_table(dev);
clk_disable_unprepare(bus->clk);
- dev_pm_opp_put_regulators(bus->opp_table);
- bus->opp_table = NULL;
+ dev_pm_opp_put_regulators(bus->opp_token);
}
static void exynos_bus_passive_exit(struct device *dev)
@@ -179,18 +178,16 @@ static int exynos_bus_parent_parse_of(struct device_node *np,
struct exynos_bus *bus)
{
struct device *dev = bus->dev;
- struct opp_table *opp_table;
- const char *vdd = "vdd";
+ const char *supplies[] = { "vdd", NULL };
int i, ret, count, size;
- opp_table = dev_pm_opp_set_regulators(dev, &vdd, 1);
- if (IS_ERR(opp_table)) {
- ret = PTR_ERR(opp_table);
+ ret = dev_pm_opp_set_regulators(dev, supplies);
+ if (ret < 0) {
dev_err(dev, "failed to set regulators %d\n", ret);
return ret;
}
- bus->opp_table = opp_table;
+ bus->opp_token = ret;
/*
* Get the devfreq-event devices to get the current utilization of
@@ -236,8 +233,7 @@ static int exynos_bus_parent_parse_of(struct device_node *np,
return 0;
err_regulator:
- dev_pm_opp_put_regulators(bus->opp_table);
- bus->opp_table = NULL;
+ dev_pm_opp_put_regulators(bus->opp_token);
return ret;
}
@@ -447,9 +443,9 @@ static int exynos_bus_probe(struct platform_device *pdev)
}
}
- max_state = bus->devfreq->profile->max_state;
- min_freq = (bus->devfreq->profile->freq_table[0] / 1000);
- max_freq = (bus->devfreq->profile->freq_table[max_state - 1] / 1000);
+ max_state = bus->devfreq->max_state;
+ min_freq = (bus->devfreq->freq_table[0] / 1000);
+ max_freq = (bus->devfreq->freq_table[max_state - 1] / 1000);
pr_info("exynos-bus: new bus device registered: %s (%6ld KHz ~ %6ld KHz)\n",
dev_name(dev), min_freq, max_freq);
@@ -459,8 +455,7 @@ err:
dev_pm_opp_of_remove_table(dev);
clk_disable_unprepare(bus->clk);
err_reg:
- dev_pm_opp_put_regulators(bus->opp_table);
- bus->opp_table = NULL;
+ dev_pm_opp_put_regulators(bus->opp_token);
return ret;
}
diff --git a/drivers/devfreq/governor.h b/drivers/devfreq/governor.h
index 002a7d67e39d..0adfebc0467a 100644
--- a/drivers/devfreq/governor.h
+++ b/drivers/devfreq/governor.h
@@ -48,6 +48,31 @@
#define DEVFREQ_GOV_ATTR_TIMER BIT(1)
/**
+ * struct devfreq_cpu_data - Hold the per-cpu data
+ * @node: list node
+ * @dev: reference to cpu device.
+ * @first_cpu: the cpumask of the first cpu of a policy.
+ * @opp_table: reference to cpu opp table.
+ * @cur_freq: the current frequency of the cpu.
+ * @min_freq: the min frequency of the cpu.
+ * @max_freq: the max frequency of the cpu.
+ *
+ * This structure stores the required cpu_data of a cpu.
+ * This is auto-populated by the governor.
+ */
+struct devfreq_cpu_data {
+ struct list_head node;
+
+ struct device *dev;
+ unsigned int first_cpu;
+
+ struct opp_table *opp_table;
+ unsigned int cur_freq;
+ unsigned int min_freq;
+ unsigned int max_freq;
+};
+
+/**
* struct devfreq_governor - Devfreq policy governor
* @node: list node - contains registered devfreq governors
* @name: Governor's name
@@ -89,6 +114,8 @@ int devm_devfreq_add_governor(struct device *dev,
int devfreq_update_status(struct devfreq *devfreq, unsigned long freq);
int devfreq_update_target(struct devfreq *devfreq, unsigned long freq);
+void devfreq_get_freq_range(struct devfreq *devfreq, unsigned long *min_freq,
+ unsigned long *max_freq);
static inline int devfreq_update_stats(struct devfreq *df)
{
diff --git a/drivers/devfreq/governor_passive.c b/drivers/devfreq/governor_passive.c
index fc09324a03e0..953cf9a1e9f7 100644
--- a/drivers/devfreq/governor_passive.c
+++ b/drivers/devfreq/governor_passive.c
@@ -8,20 +8,172 @@
*/
#include <linux/module.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/cpumask.h>
+#include <linux/slab.h>
#include <linux/device.h>
#include <linux/devfreq.h>
+#include <linux/units.h>
#include "governor.h"
-static int devfreq_passive_get_target_freq(struct devfreq *devfreq,
+static struct devfreq_cpu_data *
+get_parent_cpu_data(struct devfreq_passive_data *p_data,
+ struct cpufreq_policy *policy)
+{
+ struct devfreq_cpu_data *parent_cpu_data;
+
+ if (!p_data || !policy)
+ return NULL;
+
+ list_for_each_entry(parent_cpu_data, &p_data->cpu_data_list, node)
+ if (parent_cpu_data->first_cpu == cpumask_first(policy->related_cpus))
+ return parent_cpu_data;
+
+ return NULL;
+}
+
+static void delete_parent_cpu_data(struct devfreq_passive_data *p_data)
+{
+ struct devfreq_cpu_data *parent_cpu_data, *tmp;
+
+ list_for_each_entry_safe(parent_cpu_data, tmp, &p_data->cpu_data_list, node) {
+ list_del(&parent_cpu_data->node);
+
+ if (parent_cpu_data->opp_table)
+ dev_pm_opp_put_opp_table(parent_cpu_data->opp_table);
+
+ kfree(parent_cpu_data);
+ }
+}
+
+static unsigned long get_target_freq_by_required_opp(struct device *p_dev,
+ struct opp_table *p_opp_table,
+ struct opp_table *opp_table,
+ unsigned long *freq)
+{
+ struct dev_pm_opp *opp = NULL, *p_opp = NULL;
+ unsigned long target_freq;
+
+ if (!p_dev || !p_opp_table || !opp_table || !freq)
+ return 0;
+
+ p_opp = devfreq_recommended_opp(p_dev, freq, 0);
+ if (IS_ERR(p_opp))
+ return 0;
+
+ opp = dev_pm_opp_xlate_required_opp(p_opp_table, opp_table, p_opp);
+ dev_pm_opp_put(p_opp);
+
+ if (IS_ERR(opp))
+ return 0;
+
+ target_freq = dev_pm_opp_get_freq(opp);
+ dev_pm_opp_put(opp);
+
+ return target_freq;
+}
+
+static int get_target_freq_with_cpufreq(struct devfreq *devfreq,
+ unsigned long *target_freq)
+{
+ struct devfreq_passive_data *p_data =
+ (struct devfreq_passive_data *)devfreq->data;
+ struct devfreq_cpu_data *parent_cpu_data;
+ struct cpufreq_policy *policy;
+ unsigned long cpu, cpu_cur, cpu_min, cpu_max, cpu_percent;
+ unsigned long dev_min, dev_max;
+ unsigned long freq = 0;
+ int ret = 0;
+
+ for_each_online_cpu(cpu) {
+ policy = cpufreq_cpu_get(cpu);
+ if (!policy) {
+ ret = -EINVAL;
+ continue;
+ }
+
+ parent_cpu_data = get_parent_cpu_data(p_data, policy);
+ if (!parent_cpu_data) {
+ cpufreq_cpu_put(policy);
+ continue;
+ }
+
+ /* Get target freq via required opps */
+ cpu_cur = parent_cpu_data->cur_freq * HZ_PER_KHZ;
+ freq = get_target_freq_by_required_opp(parent_cpu_data->dev,
+ parent_cpu_data->opp_table,
+ devfreq->opp_table, &cpu_cur);
+ if (freq) {
+ *target_freq = max(freq, *target_freq);
+ cpufreq_cpu_put(policy);
+ continue;
+ }
+
+ /* Use interpolation if required opps is not available */
+ devfreq_get_freq_range(devfreq, &dev_min, &dev_max);
+
+ cpu_min = parent_cpu_data->min_freq;
+ cpu_max = parent_cpu_data->max_freq;
+ cpu_cur = parent_cpu_data->cur_freq;
+
+ cpu_percent = ((cpu_cur - cpu_min) * 100) / (cpu_max - cpu_min);
+ freq = dev_min + mult_frac(dev_max - dev_min, cpu_percent, 100);
+
+ *target_freq = max(freq, *target_freq);
+ cpufreq_cpu_put(policy);
+ }
+
+ return ret;
+}
+
+static int get_target_freq_with_devfreq(struct devfreq *devfreq,
unsigned long *freq)
{
struct devfreq_passive_data *p_data
= (struct devfreq_passive_data *)devfreq->data;
struct devfreq *parent_devfreq = (struct devfreq *)p_data->parent;
unsigned long child_freq = ULONG_MAX;
- struct dev_pm_opp *opp, *p_opp;
int i, count;
+ /* Get target freq via required opps */
+ child_freq = get_target_freq_by_required_opp(parent_devfreq->dev.parent,
+ parent_devfreq->opp_table,
+ devfreq->opp_table, freq);
+ if (child_freq)
+ goto out;
+
+ /* Use interpolation if required opps is not available */
+ for (i = 0; i < parent_devfreq->max_state; i++)
+ if (parent_devfreq->freq_table[i] == *freq)
+ break;
+
+ if (i == parent_devfreq->max_state)
+ return -EINVAL;
+
+ if (i < devfreq->max_state) {
+ child_freq = devfreq->freq_table[i];
+ } else {
+ count = devfreq->max_state;
+ child_freq = devfreq->freq_table[count - 1];
+ }
+
+out:
+ *freq = child_freq;
+
+ return 0;
+}
+
+static int devfreq_passive_get_target_freq(struct devfreq *devfreq,
+ unsigned long *freq)
+{
+ struct devfreq_passive_data *p_data =
+ (struct devfreq_passive_data *)devfreq->data;
+ int ret;
+
+ if (!p_data)
+ return -EINVAL;
+
/*
* If the devfreq device with passive governor has the specific method
* to determine the next frequency, should use the get_target_freq()
@@ -30,75 +182,158 @@ static int devfreq_passive_get_target_freq(struct devfreq *devfreq,
if (p_data->get_target_freq)
return p_data->get_target_freq(devfreq, freq);
- /*
- * If the parent and passive devfreq device uses the OPP table,
- * get the next frequency by using the OPP table.
- */
+ switch (p_data->parent_type) {
+ case DEVFREQ_PARENT_DEV:
+ ret = get_target_freq_with_devfreq(devfreq, freq);
+ break;
+ case CPUFREQ_PARENT_DEV:
+ ret = get_target_freq_with_cpufreq(devfreq, freq);
+ break;
+ default:
+ ret = -EINVAL;
+ dev_err(&devfreq->dev, "Invalid parent type\n");
+ break;
+ }
- /*
- * - parent devfreq device uses the governors except for passive.
- * - passive devfreq device uses the passive governor.
- *
- * Each devfreq has the OPP table. After deciding the new frequency
- * from the governor of parent devfreq device, the passive governor
- * need to get the index of new frequency on OPP table of parent
- * device. And then the index is used for getting the suitable
- * new frequency for passive devfreq device.
- */
- if (!devfreq->profile || !devfreq->profile->freq_table
- || devfreq->profile->max_state <= 0)
- return -EINVAL;
+ return ret;
+}
- /*
- * The passive governor have to get the correct frequency from OPP
- * list of parent device. Because in this case, *freq is temporary
- * value which is decided by ondemand governor.
- */
- if (devfreq->opp_table && parent_devfreq->opp_table) {
- p_opp = devfreq_recommended_opp(parent_devfreq->dev.parent,
- freq, 0);
- if (IS_ERR(p_opp))
- return PTR_ERR(p_opp);
+static int cpufreq_passive_notifier_call(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct devfreq_passive_data *p_data =
+ container_of(nb, struct devfreq_passive_data, nb);
+ struct devfreq *devfreq = (struct devfreq *)p_data->this;
+ struct devfreq_cpu_data *parent_cpu_data;
+ struct cpufreq_freqs *freqs = ptr;
+ unsigned int cur_freq;
+ int ret;
- opp = dev_pm_opp_xlate_required_opp(parent_devfreq->opp_table,
- devfreq->opp_table, p_opp);
- dev_pm_opp_put(p_opp);
+ if (event != CPUFREQ_POSTCHANGE || !freqs)
+ return 0;
- if (IS_ERR(opp))
- goto no_required_opp;
+ parent_cpu_data = get_parent_cpu_data(p_data, freqs->policy);
+ if (!parent_cpu_data || parent_cpu_data->cur_freq == freqs->new)
+ return 0;
- *freq = dev_pm_opp_get_freq(opp);
- dev_pm_opp_put(opp);
+ cur_freq = parent_cpu_data->cur_freq;
+ parent_cpu_data->cur_freq = freqs->new;
- return 0;
+ mutex_lock(&devfreq->lock);
+ ret = devfreq_update_target(devfreq, freqs->new);
+ mutex_unlock(&devfreq->lock);
+ if (ret) {
+ parent_cpu_data->cur_freq = cur_freq;
+ dev_err(&devfreq->dev, "failed to update the frequency.\n");
+ return ret;
}
-no_required_opp:
- /*
- * Get the OPP table's index of decided frequency by governor
- * of parent device.
- */
- for (i = 0; i < parent_devfreq->profile->max_state; i++)
- if (parent_devfreq->profile->freq_table[i] == *freq)
- break;
+ return 0;
+}
- if (i == parent_devfreq->profile->max_state)
- return -EINVAL;
+static int cpufreq_passive_unregister_notifier(struct devfreq *devfreq)
+{
+ struct devfreq_passive_data *p_data
+ = (struct devfreq_passive_data *)devfreq->data;
+ int ret;
- /* Get the suitable frequency by using index of parent device. */
- if (i < devfreq->profile->max_state) {
- child_freq = devfreq->profile->freq_table[i];
- } else {
- count = devfreq->profile->max_state;
- child_freq = devfreq->profile->freq_table[count - 1];
+ if (p_data->nb.notifier_call) {
+ ret = cpufreq_unregister_notifier(&p_data->nb,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ if (ret < 0)
+ return ret;
}
- /* Return the suitable frequency for passive device. */
- *freq = child_freq;
+ delete_parent_cpu_data(p_data);
return 0;
}
+static int cpufreq_passive_register_notifier(struct devfreq *devfreq)
+{
+ struct devfreq_passive_data *p_data
+ = (struct devfreq_passive_data *)devfreq->data;
+ struct device *dev = devfreq->dev.parent;
+ struct opp_table *opp_table = NULL;
+ struct devfreq_cpu_data *parent_cpu_data;
+ struct cpufreq_policy *policy;
+ struct device *cpu_dev;
+ unsigned int cpu;
+ int ret;
+
+ p_data->cpu_data_list
+ = (struct list_head)LIST_HEAD_INIT(p_data->cpu_data_list);
+
+ p_data->nb.notifier_call = cpufreq_passive_notifier_call;
+ ret = cpufreq_register_notifier(&p_data->nb, CPUFREQ_TRANSITION_NOTIFIER);
+ if (ret) {
+ dev_err(dev, "failed to register cpufreq notifier\n");
+ p_data->nb.notifier_call = NULL;
+ goto err;
+ }
+
+ for_each_possible_cpu(cpu) {
+ policy = cpufreq_cpu_get(cpu);
+ if (!policy) {
+ ret = -EPROBE_DEFER;
+ goto err;
+ }
+
+ parent_cpu_data = get_parent_cpu_data(p_data, policy);
+ if (parent_cpu_data) {
+ cpufreq_cpu_put(policy);
+ continue;
+ }
+
+ parent_cpu_data = kzalloc(sizeof(*parent_cpu_data),
+ GFP_KERNEL);
+ if (!parent_cpu_data) {
+ ret = -ENOMEM;
+ goto err_put_policy;
+ }
+
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev) {
+ dev_err(dev, "failed to get cpu device\n");
+ ret = -ENODEV;
+ goto err_free_cpu_data;
+ }
+
+ opp_table = dev_pm_opp_get_opp_table(cpu_dev);
+ if (IS_ERR(opp_table)) {
+ dev_err(dev, "failed to get opp_table of cpu%d\n", cpu);
+ ret = PTR_ERR(opp_table);
+ goto err_free_cpu_data;
+ }
+
+ parent_cpu_data->dev = cpu_dev;
+ parent_cpu_data->opp_table = opp_table;
+ parent_cpu_data->first_cpu = cpumask_first(policy->related_cpus);
+ parent_cpu_data->cur_freq = policy->cur;
+ parent_cpu_data->min_freq = policy->cpuinfo.min_freq;
+ parent_cpu_data->max_freq = policy->cpuinfo.max_freq;
+
+ list_add_tail(&parent_cpu_data->node, &p_data->cpu_data_list);
+ cpufreq_cpu_put(policy);
+ }
+
+ mutex_lock(&devfreq->lock);
+ ret = devfreq_update_target(devfreq, 0L);
+ mutex_unlock(&devfreq->lock);
+ if (ret)
+ dev_err(dev, "failed to update the frequency\n");
+
+ return ret;
+
+err_free_cpu_data:
+ kfree(parent_cpu_data);
+err_put_policy:
+ cpufreq_cpu_put(policy);
+err:
+
+ return ret;
+}
+
static int devfreq_passive_notifier_call(struct notifier_block *nb,
unsigned long event, void *ptr)
{
@@ -131,30 +366,54 @@ static int devfreq_passive_notifier_call(struct notifier_block *nb,
return NOTIFY_DONE;
}
-static int devfreq_passive_event_handler(struct devfreq *devfreq,
- unsigned int event, void *data)
+static int devfreq_passive_unregister_notifier(struct devfreq *devfreq)
+{
+ struct devfreq_passive_data *p_data
+ = (struct devfreq_passive_data *)devfreq->data;
+ struct devfreq *parent = (struct devfreq *)p_data->parent;
+ struct notifier_block *nb = &p_data->nb;
+
+ return devfreq_unregister_notifier(parent, nb, DEVFREQ_TRANSITION_NOTIFIER);
+}
+
+static int devfreq_passive_register_notifier(struct devfreq *devfreq)
{
struct devfreq_passive_data *p_data
= (struct devfreq_passive_data *)devfreq->data;
struct devfreq *parent = (struct devfreq *)p_data->parent;
struct notifier_block *nb = &p_data->nb;
- int ret = 0;
if (!parent)
return -EPROBE_DEFER;
+ nb->notifier_call = devfreq_passive_notifier_call;
+ return devfreq_register_notifier(parent, nb, DEVFREQ_TRANSITION_NOTIFIER);
+}
+
+static int devfreq_passive_event_handler(struct devfreq *devfreq,
+ unsigned int event, void *data)
+{
+ struct devfreq_passive_data *p_data
+ = (struct devfreq_passive_data *)devfreq->data;
+ int ret = 0;
+
+ if (!p_data)
+ return -EINVAL;
+
+ p_data->this = devfreq;
+
switch (event) {
case DEVFREQ_GOV_START:
- if (!p_data->this)
- p_data->this = devfreq;
-
- nb->notifier_call = devfreq_passive_notifier_call;
- ret = devfreq_register_notifier(parent, nb,
- DEVFREQ_TRANSITION_NOTIFIER);
+ if (p_data->parent_type == DEVFREQ_PARENT_DEV)
+ ret = devfreq_passive_register_notifier(devfreq);
+ else if (p_data->parent_type == CPUFREQ_PARENT_DEV)
+ ret = cpufreq_passive_register_notifier(devfreq);
break;
case DEVFREQ_GOV_STOP:
- WARN_ON(devfreq_unregister_notifier(parent, nb,
- DEVFREQ_TRANSITION_NOTIFIER));
+ if (p_data->parent_type == DEVFREQ_PARENT_DEV)
+ WARN_ON(devfreq_passive_unregister_notifier(devfreq));
+ else if (p_data->parent_type == CPUFREQ_PARENT_DEV)
+ WARN_ON(cpufreq_passive_unregister_notifier(devfreq));
break;
default:
break;
diff --git a/drivers/devfreq/imx-bus.c b/drivers/devfreq/imx-bus.c
index f3f6e25053ed..a727067980fb 100644
--- a/drivers/devfreq/imx-bus.c
+++ b/drivers/devfreq/imx-bus.c
@@ -59,7 +59,7 @@ static int imx_bus_init_icc(struct device *dev)
struct imx_bus *priv = dev_get_drvdata(dev);
const char *icc_driver_name;
- if (!of_get_property(dev->of_node, "#interconnect-cells", 0))
+ if (!of_get_property(dev->of_node, "#interconnect-cells", NULL))
return 0;
if (!IS_ENABLED(CONFIG_INTERCONNECT_IMX)) {
dev_warn(dev, "imx interconnect drivers disabled\n");
@@ -145,6 +145,7 @@ static const struct of_device_id imx_bus_of_match[] = {
{ .compatible = "fsl,imx8mq-noc", .data = "imx8mq-interconnect", },
{ .compatible = "fsl,imx8mm-noc", .data = "imx8mm-interconnect", },
{ .compatible = "fsl,imx8mn-noc", .data = "imx8mn-interconnect", },
+ { .compatible = "fsl,imx8mp-noc", .data = "imx8mp-interconnect", },
{ .compatible = "fsl,imx8m-noc", },
{ .compatible = "fsl,imx8m-nic", },
{ /* sentinel */ },
diff --git a/drivers/devfreq/mtk-cci-devfreq.c b/drivers/devfreq/mtk-cci-devfreq.c
new file mode 100644
index 000000000000..e5458ada5197
--- /dev/null
+++ b/drivers/devfreq/mtk-cci-devfreq.c
@@ -0,0 +1,444 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 MediaTek Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/devfreq.h>
+#include <linux/minmax.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/regulator/consumer.h>
+
+struct mtk_ccifreq_platform_data {
+ int min_volt_shift;
+ int max_volt_shift;
+ int proc_max_volt;
+ int sram_min_volt;
+ int sram_max_volt;
+};
+
+struct mtk_ccifreq_drv {
+ struct device *dev;
+ struct devfreq *devfreq;
+ struct regulator *proc_reg;
+ struct regulator *sram_reg;
+ struct clk *cci_clk;
+ struct clk *inter_clk;
+ int inter_voltage;
+ unsigned long pre_freq;
+ /* Avoid race condition for regulators between notify and policy */
+ struct mutex reg_lock;
+ struct notifier_block opp_nb;
+ const struct mtk_ccifreq_platform_data *soc_data;
+ int vtrack_max;
+};
+
+static int mtk_ccifreq_set_voltage(struct mtk_ccifreq_drv *drv, int new_voltage)
+{
+ const struct mtk_ccifreq_platform_data *soc_data = drv->soc_data;
+ struct device *dev = drv->dev;
+ int pre_voltage, pre_vsram, new_vsram, vsram, voltage, ret;
+ int retry_max = drv->vtrack_max;
+
+ if (!drv->sram_reg) {
+ ret = regulator_set_voltage(drv->proc_reg, new_voltage,
+ drv->soc_data->proc_max_volt);
+ return ret;
+ }
+
+ pre_voltage = regulator_get_voltage(drv->proc_reg);
+ if (pre_voltage < 0) {
+ dev_err(dev, "invalid vproc value: %d\n", pre_voltage);
+ return pre_voltage;
+ }
+
+ pre_vsram = regulator_get_voltage(drv->sram_reg);
+ if (pre_vsram < 0) {
+ dev_err(dev, "invalid vsram value: %d\n", pre_vsram);
+ return pre_vsram;
+ }
+
+ new_vsram = clamp(new_voltage + soc_data->min_volt_shift,
+ soc_data->sram_min_volt, soc_data->sram_max_volt);
+
+ do {
+ if (pre_voltage <= new_voltage) {
+ vsram = clamp(pre_voltage + soc_data->max_volt_shift,
+ soc_data->sram_min_volt, new_vsram);
+ ret = regulator_set_voltage(drv->sram_reg, vsram,
+ soc_data->sram_max_volt);
+ if (ret)
+ return ret;
+
+ if (vsram == soc_data->sram_max_volt ||
+ new_vsram == soc_data->sram_min_volt)
+ voltage = new_voltage;
+ else
+ voltage = vsram - soc_data->min_volt_shift;
+
+ ret = regulator_set_voltage(drv->proc_reg, voltage,
+ soc_data->proc_max_volt);
+ if (ret) {
+ regulator_set_voltage(drv->sram_reg, pre_vsram,
+ soc_data->sram_max_volt);
+ return ret;
+ }
+ } else if (pre_voltage > new_voltage) {
+ voltage = max(new_voltage,
+ pre_vsram - soc_data->max_volt_shift);
+ ret = regulator_set_voltage(drv->proc_reg, voltage,
+ soc_data->proc_max_volt);
+ if (ret)
+ return ret;
+
+ if (voltage == new_voltage)
+ vsram = new_vsram;
+ else
+ vsram = max(new_vsram,
+ voltage + soc_data->min_volt_shift);
+
+ ret = regulator_set_voltage(drv->sram_reg, vsram,
+ soc_data->sram_max_volt);
+ if (ret) {
+ regulator_set_voltage(drv->proc_reg, pre_voltage,
+ soc_data->proc_max_volt);
+ return ret;
+ }
+ }
+
+ pre_voltage = voltage;
+ pre_vsram = vsram;
+
+ if (--retry_max < 0) {
+ dev_err(dev,
+ "over loop count, failed to set voltage\n");
+ return -EINVAL;
+ }
+ } while (voltage != new_voltage || vsram != new_vsram);
+
+ return 0;
+}
+
+static int mtk_ccifreq_target(struct device *dev, unsigned long *freq,
+ u32 flags)
+{
+ struct mtk_ccifreq_drv *drv = dev_get_drvdata(dev);
+ struct clk *cci_pll = clk_get_parent(drv->cci_clk);
+ struct dev_pm_opp *opp;
+ unsigned long opp_rate;
+ int voltage, pre_voltage, inter_voltage, target_voltage, ret;
+
+ if (!drv)
+ return -EINVAL;
+
+ if (drv->pre_freq == *freq)
+ return 0;
+
+ inter_voltage = drv->inter_voltage;
+
+ opp_rate = *freq;
+ opp = devfreq_recommended_opp(dev, &opp_rate, 1);
+ if (IS_ERR(opp)) {
+ dev_err(dev, "failed to find opp for freq: %ld\n", opp_rate);
+ return PTR_ERR(opp);
+ }
+
+ mutex_lock(&drv->reg_lock);
+
+ voltage = dev_pm_opp_get_voltage(opp);
+ dev_pm_opp_put(opp);
+
+ pre_voltage = regulator_get_voltage(drv->proc_reg);
+ if (pre_voltage < 0) {
+ dev_err(dev, "invalid vproc value: %d\n", pre_voltage);
+ ret = pre_voltage;
+ goto out_unlock;
+ }
+
+ /* scale up: set voltage first then freq. */
+ target_voltage = max(inter_voltage, voltage);
+ if (pre_voltage <= target_voltage) {
+ ret = mtk_ccifreq_set_voltage(drv, target_voltage);
+ if (ret) {
+ dev_err(dev, "failed to scale up voltage\n");
+ goto out_restore_voltage;
+ }
+ }
+
+ /* switch the cci clock to intermediate clock source. */
+ ret = clk_set_parent(drv->cci_clk, drv->inter_clk);
+ if (ret) {
+ dev_err(dev, "failed to re-parent cci clock\n");
+ goto out_restore_voltage;
+ }
+
+ /* set the original clock to target rate. */
+ ret = clk_set_rate(cci_pll, *freq);
+ if (ret) {
+ dev_err(dev, "failed to set cci pll rate: %d\n", ret);
+ clk_set_parent(drv->cci_clk, cci_pll);
+ goto out_restore_voltage;
+ }
+
+ /* switch the cci clock back to the original clock source. */
+ ret = clk_set_parent(drv->cci_clk, cci_pll);
+ if (ret) {
+ dev_err(dev, "failed to re-parent cci clock\n");
+ mtk_ccifreq_set_voltage(drv, inter_voltage);
+ goto out_unlock;
+ }
+
+ /*
+ * If the new voltage is lower than the intermediate voltage or the
+ * original voltage, scale down to the new voltage.
+ */
+ if (voltage < inter_voltage || voltage < pre_voltage) {
+ ret = mtk_ccifreq_set_voltage(drv, voltage);
+ if (ret) {
+ dev_err(dev, "failed to scale down voltage\n");
+ goto out_unlock;
+ }
+ }
+
+ drv->pre_freq = *freq;
+ mutex_unlock(&drv->reg_lock);
+
+ return 0;
+
+out_restore_voltage:
+ mtk_ccifreq_set_voltage(drv, pre_voltage);
+
+out_unlock:
+ mutex_unlock(&drv->reg_lock);
+ return ret;
+}
+
+static int mtk_ccifreq_opp_notifier(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct dev_pm_opp *opp = data;
+ struct mtk_ccifreq_drv *drv;
+ unsigned long freq, volt;
+
+ drv = container_of(nb, struct mtk_ccifreq_drv, opp_nb);
+
+ if (event == OPP_EVENT_ADJUST_VOLTAGE) {
+ freq = dev_pm_opp_get_freq(opp);
+
+ mutex_lock(&drv->reg_lock);
+ /* current opp item is changed */
+ if (freq == drv->pre_freq) {
+ volt = dev_pm_opp_get_voltage(opp);
+ mtk_ccifreq_set_voltage(drv, volt);
+ }
+ mutex_unlock(&drv->reg_lock);
+ }
+
+ return 0;
+}
+
+static struct devfreq_dev_profile mtk_ccifreq_profile = {
+ .target = mtk_ccifreq_target,
+};
+
+static int mtk_ccifreq_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtk_ccifreq_drv *drv;
+ struct devfreq_passive_data *passive_data;
+ struct dev_pm_opp *opp;
+ unsigned long rate, opp_volt;
+ int ret;
+
+ drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
+ if (!drv)
+ return -ENOMEM;
+
+ drv->dev = dev;
+ drv->soc_data = (const struct mtk_ccifreq_platform_data *)
+ of_device_get_match_data(&pdev->dev);
+ mutex_init(&drv->reg_lock);
+ platform_set_drvdata(pdev, drv);
+
+ drv->cci_clk = devm_clk_get(dev, "cci");
+ if (IS_ERR(drv->cci_clk)) {
+ ret = PTR_ERR(drv->cci_clk);
+ return dev_err_probe(dev, ret, "failed to get cci clk\n");
+ }
+
+ drv->inter_clk = devm_clk_get(dev, "intermediate");
+ if (IS_ERR(drv->inter_clk)) {
+ ret = PTR_ERR(drv->inter_clk);
+ return dev_err_probe(dev, ret,
+ "failed to get intermediate clk\n");
+ }
+
+ drv->proc_reg = devm_regulator_get_optional(dev, "proc");
+ if (IS_ERR(drv->proc_reg)) {
+ ret = PTR_ERR(drv->proc_reg);
+ return dev_err_probe(dev, ret,
+ "failed to get proc regulator\n");
+ }
+
+ ret = regulator_enable(drv->proc_reg);
+ if (ret) {
+ dev_err(dev, "failed to enable proc regulator\n");
+ return ret;
+ }
+
+ drv->sram_reg = devm_regulator_get_optional(dev, "sram");
+ if (IS_ERR(drv->sram_reg)) {
+ ret = PTR_ERR(drv->sram_reg);
+ if (ret == -EPROBE_DEFER)
+ goto out_free_resources;
+
+ drv->sram_reg = NULL;
+ } else {
+ ret = regulator_enable(drv->sram_reg);
+ if (ret) {
+ dev_err(dev, "failed to enable sram regulator\n");
+ goto out_free_resources;
+ }
+ }
+
+ /*
+ * We assume min voltage is 0 and tracking target voltage using
+ * min_volt_shift for each iteration.
+ * The retry_max is 3 times of expected iteration count.
+ */
+ drv->vtrack_max = 3 * DIV_ROUND_UP(max(drv->soc_data->sram_max_volt,
+ drv->soc_data->proc_max_volt),
+ drv->soc_data->min_volt_shift);
+
+ ret = clk_prepare_enable(drv->cci_clk);
+ if (ret)
+ goto out_free_resources;
+
+ ret = dev_pm_opp_of_add_table(dev);
+ if (ret) {
+ dev_err(dev, "failed to add opp table: %d\n", ret);
+ goto out_disable_cci_clk;
+ }
+
+ rate = clk_get_rate(drv->inter_clk);
+ opp = dev_pm_opp_find_freq_ceil(dev, &rate);
+ if (IS_ERR(opp)) {
+ ret = PTR_ERR(opp);
+ dev_err(dev, "failed to get intermediate opp: %d\n", ret);
+ goto out_remove_opp_table;
+ }
+ drv->inter_voltage = dev_pm_opp_get_voltage(opp);
+ dev_pm_opp_put(opp);
+
+ rate = U32_MAX;
+ opp = dev_pm_opp_find_freq_floor(drv->dev, &rate);
+ if (IS_ERR(opp)) {
+ dev_err(dev, "failed to get opp\n");
+ ret = PTR_ERR(opp);
+ goto out_remove_opp_table;
+ }
+
+ opp_volt = dev_pm_opp_get_voltage(opp);
+ dev_pm_opp_put(opp);
+ ret = mtk_ccifreq_set_voltage(drv, opp_volt);
+ if (ret) {
+ dev_err(dev, "failed to scale to highest voltage %lu in proc_reg\n",
+ opp_volt);
+ goto out_remove_opp_table;
+ }
+
+ passive_data = devm_kzalloc(dev, sizeof(*passive_data), GFP_KERNEL);
+ if (!passive_data) {
+ ret = -ENOMEM;
+ goto out_remove_opp_table;
+ }
+
+ passive_data->parent_type = CPUFREQ_PARENT_DEV;
+ drv->devfreq = devm_devfreq_add_device(dev, &mtk_ccifreq_profile,
+ DEVFREQ_GOV_PASSIVE,
+ passive_data);
+ if (IS_ERR(drv->devfreq)) {
+ ret = -EPROBE_DEFER;
+ dev_err(dev, "failed to add devfreq device: %ld\n",
+ PTR_ERR(drv->devfreq));
+ goto out_remove_opp_table;
+ }
+
+ drv->opp_nb.notifier_call = mtk_ccifreq_opp_notifier;
+ ret = dev_pm_opp_register_notifier(dev, &drv->opp_nb);
+ if (ret) {
+ dev_err(dev, "failed to register opp notifier: %d\n", ret);
+ goto out_remove_opp_table;
+ }
+ return 0;
+
+out_remove_opp_table:
+ dev_pm_opp_of_remove_table(dev);
+
+out_disable_cci_clk:
+ clk_disable_unprepare(drv->cci_clk);
+
+out_free_resources:
+ if (regulator_is_enabled(drv->proc_reg))
+ regulator_disable(drv->proc_reg);
+ if (drv->sram_reg && regulator_is_enabled(drv->sram_reg))
+ regulator_disable(drv->sram_reg);
+
+ return ret;
+}
+
+static int mtk_ccifreq_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtk_ccifreq_drv *drv;
+
+ drv = platform_get_drvdata(pdev);
+
+ dev_pm_opp_unregister_notifier(dev, &drv->opp_nb);
+ dev_pm_opp_of_remove_table(dev);
+ clk_disable_unprepare(drv->cci_clk);
+ regulator_disable(drv->proc_reg);
+ if (drv->sram_reg)
+ regulator_disable(drv->sram_reg);
+
+ return 0;
+}
+
+static const struct mtk_ccifreq_platform_data mt8183_platform_data = {
+ .min_volt_shift = 100000,
+ .max_volt_shift = 200000,
+ .proc_max_volt = 1150000,
+};
+
+static const struct mtk_ccifreq_platform_data mt8186_platform_data = {
+ .min_volt_shift = 100000,
+ .max_volt_shift = 250000,
+ .proc_max_volt = 1118750,
+ .sram_min_volt = 850000,
+ .sram_max_volt = 1118750,
+};
+
+static const struct of_device_id mtk_ccifreq_machines[] = {
+ { .compatible = "mediatek,mt8183-cci", .data = &mt8183_platform_data },
+ { .compatible = "mediatek,mt8186-cci", .data = &mt8186_platform_data },
+ { },
+};
+MODULE_DEVICE_TABLE(of, mtk_ccifreq_machines);
+
+static struct platform_driver mtk_ccifreq_platdrv = {
+ .probe = mtk_ccifreq_probe,
+ .remove = mtk_ccifreq_remove,
+ .driver = {
+ .name = "mtk-ccifreq",
+ .of_match_table = mtk_ccifreq_machines,
+ },
+};
+module_platform_driver(mtk_ccifreq_platdrv);
+
+MODULE_DESCRIPTION("MediaTek CCI devfreq driver");
+MODULE_AUTHOR("Jia-Wei Chang <jia-wei.chang@mediatek.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/devfreq/rk3399_dmc.c b/drivers/devfreq/rk3399_dmc.c
index 293857ebfd75..daff40702615 100644
--- a/drivers/devfreq/rk3399_dmc.c
+++ b/drivers/devfreq/rk3399_dmc.c
@@ -5,6 +5,7 @@
*/
#include <linux/arm-smccc.h>
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/devfreq.h>
@@ -20,55 +21,49 @@
#include <linux/rwsem.h>
#include <linux/suspend.h>
+#include <soc/rockchip/pm_domains.h>
#include <soc/rockchip/rk3399_grf.h>
#include <soc/rockchip/rockchip_sip.h>
-struct dram_timing {
- unsigned int ddr3_speed_bin;
- unsigned int pd_idle;
- unsigned int sr_idle;
- unsigned int sr_mc_gate_idle;
- unsigned int srpd_lite_idle;
- unsigned int standby_idle;
- unsigned int auto_pd_dis_freq;
- unsigned int dram_dll_dis_freq;
- unsigned int phy_dll_dis_freq;
- unsigned int ddr3_odt_dis_freq;
- unsigned int ddr3_drv;
- unsigned int ddr3_odt;
- unsigned int phy_ddr3_ca_drv;
- unsigned int phy_ddr3_dq_drv;
- unsigned int phy_ddr3_odt;
- unsigned int lpddr3_odt_dis_freq;
- unsigned int lpddr3_drv;
- unsigned int lpddr3_odt;
- unsigned int phy_lpddr3_ca_drv;
- unsigned int phy_lpddr3_dq_drv;
- unsigned int phy_lpddr3_odt;
- unsigned int lpddr4_odt_dis_freq;
- unsigned int lpddr4_drv;
- unsigned int lpddr4_dq_odt;
- unsigned int lpddr4_ca_odt;
- unsigned int phy_lpddr4_ca_drv;
- unsigned int phy_lpddr4_ck_cs_drv;
- unsigned int phy_lpddr4_dq_drv;
- unsigned int phy_lpddr4_odt;
-};
+#define NS_TO_CYCLE(NS, MHz) (((NS) * (MHz)) / NSEC_PER_USEC)
+
+#define RK3399_SET_ODT_PD_0_SR_IDLE GENMASK(7, 0)
+#define RK3399_SET_ODT_PD_0_SR_MC_GATE_IDLE GENMASK(15, 8)
+#define RK3399_SET_ODT_PD_0_STANDBY_IDLE GENMASK(31, 16)
+
+#define RK3399_SET_ODT_PD_1_PD_IDLE GENMASK(11, 0)
+#define RK3399_SET_ODT_PD_1_SRPD_LITE_IDLE GENMASK(27, 16)
+
+#define RK3399_SET_ODT_PD_2_ODT_ENABLE BIT(0)
struct rk3399_dmcfreq {
struct device *dev;
struct devfreq *devfreq;
+ struct devfreq_dev_profile profile;
struct devfreq_simple_ondemand_data ondemand_data;
struct clk *dmc_clk;
struct devfreq_event_dev *edev;
struct mutex lock;
- struct dram_timing timing;
struct regulator *vdd_center;
struct regmap *regmap_pmu;
unsigned long rate, target_rate;
unsigned long volt, target_volt;
unsigned int odt_dis_freq;
- int odt_pd_arg0, odt_pd_arg1;
+
+ unsigned int pd_idle_ns;
+ unsigned int sr_idle_ns;
+ unsigned int sr_mc_gate_idle_ns;
+ unsigned int srpd_lite_idle_ns;
+ unsigned int standby_idle_ns;
+ unsigned int ddr3_odt_dis_freq;
+ unsigned int lpddr3_odt_dis_freq;
+ unsigned int lpddr4_odt_dis_freq;
+
+ unsigned int pd_idle_dis_freq;
+ unsigned int sr_idle_dis_freq;
+ unsigned int sr_mc_gate_idle_dis_freq;
+ unsigned int srpd_lite_idle_dis_freq;
+ unsigned int standby_idle_dis_freq;
};
static int rk3399_dmcfreq_target(struct device *dev, unsigned long *freq,
@@ -78,10 +73,14 @@ static int rk3399_dmcfreq_target(struct device *dev, unsigned long *freq,
struct dev_pm_opp *opp;
unsigned long old_clk_rate = dmcfreq->rate;
unsigned long target_volt, target_rate;
+ unsigned int ddrcon_mhz;
struct arm_smccc_res res;
- bool odt_enable = false;
int err;
+ u32 odt_pd_arg0 = 0;
+ u32 odt_pd_arg1 = 0;
+ u32 odt_pd_arg2 = 0;
+
opp = devfreq_recommended_opp(dev, freq, flags);
if (IS_ERR(opp))
return PTR_ERR(opp);
@@ -95,19 +94,71 @@ static int rk3399_dmcfreq_target(struct device *dev, unsigned long *freq,
mutex_lock(&dmcfreq->lock);
+ /*
+ * Ensure power-domain transitions don't interfere with ARM Trusted
+ * Firmware power-domain idling.
+ */
+ err = rockchip_pmu_block();
+ if (err) {
+ dev_err(dev, "Failed to block PMU: %d\n", err);
+ goto out_unlock;
+ }
+
+ /*
+ * Some idle parameters may be based on the DDR controller clock, which
+ * is half of the DDR frequency.
+ * pd_idle and standby_idle are based on the controller clock cycle.
+ * sr_idle_cycle, sr_mc_gate_idle_cycle, and srpd_lite_idle_cycle
+ * are based on the 1024 controller clock cycle
+ */
+ ddrcon_mhz = target_rate / USEC_PER_SEC / 2;
+
+ u32p_replace_bits(&odt_pd_arg1,
+ NS_TO_CYCLE(dmcfreq->pd_idle_ns, ddrcon_mhz),
+ RK3399_SET_ODT_PD_1_PD_IDLE);
+ u32p_replace_bits(&odt_pd_arg0,
+ NS_TO_CYCLE(dmcfreq->standby_idle_ns, ddrcon_mhz),
+ RK3399_SET_ODT_PD_0_STANDBY_IDLE);
+ u32p_replace_bits(&odt_pd_arg0,
+ DIV_ROUND_UP(NS_TO_CYCLE(dmcfreq->sr_idle_ns,
+ ddrcon_mhz), 1024),
+ RK3399_SET_ODT_PD_0_SR_IDLE);
+ u32p_replace_bits(&odt_pd_arg0,
+ DIV_ROUND_UP(NS_TO_CYCLE(dmcfreq->sr_mc_gate_idle_ns,
+ ddrcon_mhz), 1024),
+ RK3399_SET_ODT_PD_0_SR_MC_GATE_IDLE);
+ u32p_replace_bits(&odt_pd_arg1,
+ DIV_ROUND_UP(NS_TO_CYCLE(dmcfreq->srpd_lite_idle_ns,
+ ddrcon_mhz), 1024),
+ RK3399_SET_ODT_PD_1_SRPD_LITE_IDLE);
+
if (dmcfreq->regmap_pmu) {
+ if (target_rate >= dmcfreq->sr_idle_dis_freq)
+ odt_pd_arg0 &= ~RK3399_SET_ODT_PD_0_SR_IDLE;
+
+ if (target_rate >= dmcfreq->sr_mc_gate_idle_dis_freq)
+ odt_pd_arg0 &= ~RK3399_SET_ODT_PD_0_SR_MC_GATE_IDLE;
+
+ if (target_rate >= dmcfreq->standby_idle_dis_freq)
+ odt_pd_arg0 &= ~RK3399_SET_ODT_PD_0_STANDBY_IDLE;
+
+ if (target_rate >= dmcfreq->pd_idle_dis_freq)
+ odt_pd_arg1 &= ~RK3399_SET_ODT_PD_1_PD_IDLE;
+
+ if (target_rate >= dmcfreq->srpd_lite_idle_dis_freq)
+ odt_pd_arg1 &= ~RK3399_SET_ODT_PD_1_SRPD_LITE_IDLE;
+
if (target_rate >= dmcfreq->odt_dis_freq)
- odt_enable = true;
+ odt_pd_arg2 |= RK3399_SET_ODT_PD_2_ODT_ENABLE;
/*
* This makes a SMC call to the TF-A to set the DDR PD
* (power-down) timings and to enable or disable the
* ODT (on-die termination) resistors.
*/
- arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, dmcfreq->odt_pd_arg0,
- dmcfreq->odt_pd_arg1,
- ROCKCHIP_SIP_CONFIG_DRAM_SET_ODT_PD,
- odt_enable, 0, 0, 0, &res);
+ arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, odt_pd_arg0, odt_pd_arg1,
+ ROCKCHIP_SIP_CONFIG_DRAM_SET_ODT_PD, odt_pd_arg2,
+ 0, 0, 0, &res);
}
/*
@@ -158,6 +209,8 @@ static int rk3399_dmcfreq_target(struct device *dev, unsigned long *freq,
dmcfreq->volt = target_volt;
out:
+ rockchip_pmu_unblock();
+out_unlock:
mutex_unlock(&dmcfreq->lock);
return err;
}
@@ -189,13 +242,6 @@ static int rk3399_dmcfreq_get_cur_freq(struct device *dev, unsigned long *freq)
return 0;
}
-static struct devfreq_dev_profile rk3399_devfreq_dmc_profile = {
- .polling_ms = 200,
- .target = rk3399_dmcfreq_target,
- .get_dev_status = rk3399_dmcfreq_get_dev_status,
- .get_cur_freq = rk3399_dmcfreq_get_cur_freq,
-};
-
static __maybe_unused int rk3399_dmcfreq_suspend(struct device *dev)
{
struct rk3399_dmcfreq *dmcfreq = dev_get_drvdata(dev);
@@ -238,69 +284,48 @@ static __maybe_unused int rk3399_dmcfreq_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(rk3399_dmcfreq_pm, rk3399_dmcfreq_suspend,
rk3399_dmcfreq_resume);
-static int of_get_ddr_timings(struct dram_timing *timing,
- struct device_node *np)
+static int rk3399_dmcfreq_of_props(struct rk3399_dmcfreq *data,
+ struct device_node *np)
{
int ret = 0;
- ret = of_property_read_u32(np, "rockchip,ddr3_speed_bin",
- &timing->ddr3_speed_bin);
- ret |= of_property_read_u32(np, "rockchip,pd_idle",
- &timing->pd_idle);
- ret |= of_property_read_u32(np, "rockchip,sr_idle",
- &timing->sr_idle);
- ret |= of_property_read_u32(np, "rockchip,sr_mc_gate_idle",
- &timing->sr_mc_gate_idle);
- ret |= of_property_read_u32(np, "rockchip,srpd_lite_idle",
- &timing->srpd_lite_idle);
- ret |= of_property_read_u32(np, "rockchip,standby_idle",
- &timing->standby_idle);
- ret |= of_property_read_u32(np, "rockchip,auto_pd_dis_freq",
- &timing->auto_pd_dis_freq);
- ret |= of_property_read_u32(np, "rockchip,dram_dll_dis_freq",
- &timing->dram_dll_dis_freq);
- ret |= of_property_read_u32(np, "rockchip,phy_dll_dis_freq",
- &timing->phy_dll_dis_freq);
+ /*
+ * These are all optional, and serve as minimum bounds. Give them large
+ * (i.e., never "disabled") values if the DT doesn't specify one.
+ */
+ data->pd_idle_dis_freq =
+ data->sr_idle_dis_freq =
+ data->sr_mc_gate_idle_dis_freq =
+ data->srpd_lite_idle_dis_freq =
+ data->standby_idle_dis_freq = UINT_MAX;
+
+ ret |= of_property_read_u32(np, "rockchip,pd-idle-ns",
+ &data->pd_idle_ns);
+ ret |= of_property_read_u32(np, "rockchip,sr-idle-ns",
+ &data->sr_idle_ns);
+ ret |= of_property_read_u32(np, "rockchip,sr-mc-gate-idle-ns",
+ &data->sr_mc_gate_idle_ns);
+ ret |= of_property_read_u32(np, "rockchip,srpd-lite-idle-ns",
+ &data->srpd_lite_idle_ns);
+ ret |= of_property_read_u32(np, "rockchip,standby-idle-ns",
+ &data->standby_idle_ns);
ret |= of_property_read_u32(np, "rockchip,ddr3_odt_dis_freq",
- &timing->ddr3_odt_dis_freq);
- ret |= of_property_read_u32(np, "rockchip,ddr3_drv",
- &timing->ddr3_drv);
- ret |= of_property_read_u32(np, "rockchip,ddr3_odt",
- &timing->ddr3_odt);
- ret |= of_property_read_u32(np, "rockchip,phy_ddr3_ca_drv",
- &timing->phy_ddr3_ca_drv);
- ret |= of_property_read_u32(np, "rockchip,phy_ddr3_dq_drv",
- &timing->phy_ddr3_dq_drv);
- ret |= of_property_read_u32(np, "rockchip,phy_ddr3_odt",
- &timing->phy_ddr3_odt);
+ &data->ddr3_odt_dis_freq);
ret |= of_property_read_u32(np, "rockchip,lpddr3_odt_dis_freq",
- &timing->lpddr3_odt_dis_freq);
- ret |= of_property_read_u32(np, "rockchip,lpddr3_drv",
- &timing->lpddr3_drv);
- ret |= of_property_read_u32(np, "rockchip,lpddr3_odt",
- &timing->lpddr3_odt);
- ret |= of_property_read_u32(np, "rockchip,phy_lpddr3_ca_drv",
- &timing->phy_lpddr3_ca_drv);
- ret |= of_property_read_u32(np, "rockchip,phy_lpddr3_dq_drv",
- &timing->phy_lpddr3_dq_drv);
- ret |= of_property_read_u32(np, "rockchip,phy_lpddr3_odt",
- &timing->phy_lpddr3_odt);
+ &data->lpddr3_odt_dis_freq);
ret |= of_property_read_u32(np, "rockchip,lpddr4_odt_dis_freq",
- &timing->lpddr4_odt_dis_freq);
- ret |= of_property_read_u32(np, "rockchip,lpddr4_drv",
- &timing->lpddr4_drv);
- ret |= of_property_read_u32(np, "rockchip,lpddr4_dq_odt",
- &timing->lpddr4_dq_odt);
- ret |= of_property_read_u32(np, "rockchip,lpddr4_ca_odt",
- &timing->lpddr4_ca_odt);
- ret |= of_property_read_u32(np, "rockchip,phy_lpddr4_ca_drv",
- &timing->phy_lpddr4_ca_drv);
- ret |= of_property_read_u32(np, "rockchip,phy_lpddr4_ck_cs_drv",
- &timing->phy_lpddr4_ck_cs_drv);
- ret |= of_property_read_u32(np, "rockchip,phy_lpddr4_dq_drv",
- &timing->phy_lpddr4_dq_drv);
- ret |= of_property_read_u32(np, "rockchip,phy_lpddr4_odt",
- &timing->phy_lpddr4_odt);
+ &data->lpddr4_odt_dis_freq);
+
+ ret |= of_property_read_u32(np, "rockchip,pd-idle-dis-freq-hz",
+ &data->pd_idle_dis_freq);
+ ret |= of_property_read_u32(np, "rockchip,sr-idle-dis-freq-hz",
+ &data->sr_idle_dis_freq);
+ ret |= of_property_read_u32(np, "rockchip,sr-mc-gate-idle-dis-freq-hz",
+ &data->sr_mc_gate_idle_dis_freq);
+ ret |= of_property_read_u32(np, "rockchip,srpd-lite-idle-dis-freq-hz",
+ &data->srpd_lite_idle_dis_freq);
+ ret |= of_property_read_u32(np, "rockchip,standby-idle-dis-freq-hz",
+ &data->standby_idle_dis_freq);
return ret;
}
@@ -311,8 +336,7 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = pdev->dev.of_node, *node;
struct rk3399_dmcfreq *data;
- int ret, index, size;
- uint32_t *timing;
+ int ret;
struct dev_pm_opp *opp;
u32 ddr_type;
u32 val;
@@ -343,26 +367,7 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
return ret;
}
- /*
- * Get dram timing and pass it to arm trust firmware,
- * the dram driver in arm trust firmware will get these
- * timing and to do dram initial.
- */
- if (!of_get_ddr_timings(&data->timing, np)) {
- timing = &data->timing.ddr3_speed_bin;
- size = sizeof(struct dram_timing) / 4;
- for (index = 0; index < size; index++) {
- arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, *timing++, index,
- ROCKCHIP_SIP_CONFIG_DRAM_SET_PARAM,
- 0, 0, 0, 0, &res);
- if (res.a0) {
- dev_err(dev, "Failed to set dram param: %ld\n",
- res.a0);
- ret = -EINVAL;
- goto err_edev;
- }
- }
- }
+ rk3399_dmcfreq_of_props(data, np);
node = of_parse_phandle(np, "rockchip,pmu", 0);
if (!node)
@@ -381,13 +386,13 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
switch (ddr_type) {
case RK3399_PMUGRF_DDRTYPE_DDR3:
- data->odt_dis_freq = data->timing.ddr3_odt_dis_freq;
+ data->odt_dis_freq = data->ddr3_odt_dis_freq;
break;
case RK3399_PMUGRF_DDRTYPE_LPDDR3:
- data->odt_dis_freq = data->timing.lpddr3_odt_dis_freq;
+ data->odt_dis_freq = data->lpddr3_odt_dis_freq;
break;
case RK3399_PMUGRF_DDRTYPE_LPDDR4:
- data->odt_dis_freq = data->timing.lpddr4_odt_dis_freq;
+ data->odt_dis_freq = data->lpddr4_odt_dis_freq;
break;
default:
ret = -EINVAL;
@@ -400,62 +405,45 @@ no_pmu:
0, 0, 0, 0, &res);
/*
- * In TF-A there is a platform SIP call to set the PD (power-down)
- * timings and to enable or disable the ODT (on-die termination).
- * This call needs three arguments as follows:
- *
- * arg0:
- * bit[0-7] : sr_idle
- * bit[8-15] : sr_mc_gate_idle
- * bit[16-31] : standby idle
- * arg1:
- * bit[0-11] : pd_idle
- * bit[16-27] : srpd_lite_idle
- * arg2:
- * bit[0] : odt enable
- */
- data->odt_pd_arg0 = (data->timing.sr_idle & 0xff) |
- ((data->timing.sr_mc_gate_idle & 0xff) << 8) |
- ((data->timing.standby_idle & 0xffff) << 16);
- data->odt_pd_arg1 = (data->timing.pd_idle & 0xfff) |
- ((data->timing.srpd_lite_idle & 0xfff) << 16);
-
- /*
* We add a devfreq driver to our parent since it has a device tree node
* with operating points.
*/
- if (dev_pm_opp_of_add_table(dev)) {
+ if (devm_pm_opp_of_add_table(dev)) {
dev_err(dev, "Invalid operating-points in device tree.\n");
ret = -EINVAL;
goto err_edev;
}
- of_property_read_u32(np, "upthreshold",
- &data->ondemand_data.upthreshold);
- of_property_read_u32(np, "downdifferential",
- &data->ondemand_data.downdifferential);
+ data->ondemand_data.upthreshold = 25;
+ data->ondemand_data.downdifferential = 15;
data->rate = clk_get_rate(data->dmc_clk);
opp = devfreq_recommended_opp(dev, &data->rate, 0);
if (IS_ERR(opp)) {
ret = PTR_ERR(opp);
- goto err_free_opp;
+ goto err_edev;
}
data->rate = dev_pm_opp_get_freq(opp);
data->volt = dev_pm_opp_get_voltage(opp);
dev_pm_opp_put(opp);
- rk3399_devfreq_dmc_profile.initial_freq = data->rate;
+ data->profile = (struct devfreq_dev_profile) {
+ .polling_ms = 200,
+ .target = rk3399_dmcfreq_target,
+ .get_dev_status = rk3399_dmcfreq_get_dev_status,
+ .get_cur_freq = rk3399_dmcfreq_get_cur_freq,
+ .initial_freq = data->rate,
+ };
data->devfreq = devm_devfreq_add_device(dev,
- &rk3399_devfreq_dmc_profile,
+ &data->profile,
DEVFREQ_GOV_SIMPLE_ONDEMAND,
&data->ondemand_data);
if (IS_ERR(data->devfreq)) {
ret = PTR_ERR(data->devfreq);
- goto err_free_opp;
+ goto err_edev;
}
devm_devfreq_register_opp_notifier(dev, data->devfreq);
@@ -465,8 +453,6 @@ no_pmu:
return 0;
-err_free_opp:
- dev_pm_opp_of_remove_table(&pdev->dev);
err_edev:
devfreq_event_disable_edev(data->edev);
@@ -477,11 +463,7 @@ static int rk3399_dmcfreq_remove(struct platform_device *pdev)
{
struct rk3399_dmcfreq *dmcfreq = dev_get_drvdata(&pdev->dev);
- /*
- * Before remove the opp table we need to unregister the opp notifier.
- */
- devm_devfreq_unregister_opp_notifier(dmcfreq->dev, dmcfreq->devfreq);
- dev_pm_opp_of_remove_table(dmcfreq->dev);
+ devfreq_event_disable_edev(dmcfreq->edev);
return 0;
}
diff --git a/drivers/devfreq/sun8i-a33-mbus.c b/drivers/devfreq/sun8i-a33-mbus.c
new file mode 100644
index 000000000000..13d32213139f
--- /dev/null
+++ b/drivers/devfreq/sun8i-a33-mbus.c
@@ -0,0 +1,511 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (C) 2020-2021 Samuel Holland <samuel@sholland.org>
+//
+
+#include <linux/clk.h>
+#include <linux/devfreq.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+
+#define MBUS_CR 0x0000
+#define MBUS_CR_GET_DRAM_TYPE(x) (((x) >> 16) & 0x7)
+#define MBUS_CR_DRAM_TYPE_DDR2 2
+#define MBUS_CR_DRAM_TYPE_DDR3 3
+#define MBUS_CR_DRAM_TYPE_DDR4 4
+#define MBUS_CR_DRAM_TYPE_LPDDR2 6
+#define MBUS_CR_DRAM_TYPE_LPDDR3 7
+
+#define MBUS_TMR 0x000c
+#define MBUS_TMR_PERIOD(x) ((x) - 1)
+
+#define MBUS_PMU_CFG 0x009c
+#define MBUS_PMU_CFG_PERIOD(x) (((x) - 1) << 16)
+#define MBUS_PMU_CFG_UNIT (0x3 << 1)
+#define MBUS_PMU_CFG_UNIT_B (0x0 << 1)
+#define MBUS_PMU_CFG_UNIT_KB (0x1 << 1)
+#define MBUS_PMU_CFG_UNIT_MB (0x2 << 1)
+#define MBUS_PMU_CFG_ENABLE (0x1 << 0)
+
+#define MBUS_PMU_BWCR(n) (0x00a0 + (0x04 * (n)))
+
+#define MBUS_TOTAL_BWCR MBUS_PMU_BWCR(5)
+#define MBUS_TOTAL_BWCR_H616 MBUS_PMU_BWCR(13)
+
+#define MBUS_MDFSCR 0x0100
+#define MBUS_MDFSCR_BUFFER_TIMING (0x1 << 15)
+#define MBUS_MDFSCR_PAD_HOLD (0x1 << 13)
+#define MBUS_MDFSCR_BYPASS (0x1 << 4)
+#define MBUS_MDFSCR_MODE (0x1 << 1)
+#define MBUS_MDFSCR_MODE_DFS (0x0 << 1)
+#define MBUS_MDFSCR_MODE_CFS (0x1 << 1)
+#define MBUS_MDFSCR_START (0x1 << 0)
+
+#define MBUS_MDFSMRMR 0x0108
+
+#define DRAM_PWRCTL 0x0004
+#define DRAM_PWRCTL_SELFREF_EN (0x1 << 0)
+
+#define DRAM_RFSHTMG 0x0090
+#define DRAM_RFSHTMG_TREFI(x) ((x) << 16)
+#define DRAM_RFSHTMG_TRFC(x) ((x) << 0)
+
+#define DRAM_VTFCR 0x00b8
+#define DRAM_VTFCR_VTF_ENABLE (0x3 << 8)
+
+#define DRAM_ODTMAP 0x0120
+
+#define DRAM_DX_MAX 4
+
+#define DRAM_DXnGCR0(n) (0x0344 + 0x80 * (n))
+#define DRAM_DXnGCR0_DXODT (0x3 << 4)
+#define DRAM_DXnGCR0_DXODT_DYNAMIC (0x0 << 4)
+#define DRAM_DXnGCR0_DXODT_ENABLED (0x1 << 4)
+#define DRAM_DXnGCR0_DXODT_DISABLED (0x2 << 4)
+#define DRAM_DXnGCR0_DXEN (0x1 << 0)
+
+struct sun8i_a33_mbus_variant {
+ u32 min_dram_divider;
+ u32 max_dram_divider;
+ u32 odt_freq_mhz;
+};
+
+struct sun8i_a33_mbus {
+ const struct sun8i_a33_mbus_variant *variant;
+ void __iomem *reg_dram;
+ void __iomem *reg_mbus;
+ struct clk *clk_bus;
+ struct clk *clk_dram;
+ struct clk *clk_mbus;
+ struct devfreq *devfreq_dram;
+ struct devfreq_simple_ondemand_data gov_data;
+ struct devfreq_dev_profile profile;
+ u32 data_width;
+ u32 nominal_bw;
+ u32 odtmap;
+ u32 tREFI_ns;
+ u32 tRFC_ns;
+ unsigned long freq_table[];
+};
+
+/*
+ * The unit for this value is (MBUS clock cycles / MBUS_TMR_PERIOD). When
+ * MBUS_TMR_PERIOD is programmed to match the MBUS clock frequency in MHz, as
+ * it is during DRAM init and during probe, the resulting unit is microseconds.
+ */
+static int pmu_period = 50000;
+module_param(pmu_period, int, 0644);
+MODULE_PARM_DESC(pmu_period, "Bandwidth measurement period (microseconds)");
+
+static u32 sun8i_a33_mbus_get_peak_bw(struct sun8i_a33_mbus *priv)
+{
+ /* Returns the peak transfer (in KiB) during any single PMU period. */
+ return readl_relaxed(priv->reg_mbus + MBUS_TOTAL_BWCR);
+}
+
+static void sun8i_a33_mbus_restart_pmu_counters(struct sun8i_a33_mbus *priv)
+{
+ u32 pmu_cfg = MBUS_PMU_CFG_PERIOD(pmu_period) | MBUS_PMU_CFG_UNIT_KB;
+
+ /* All PMU counters are cleared on a disable->enable transition. */
+ writel_relaxed(pmu_cfg,
+ priv->reg_mbus + MBUS_PMU_CFG);
+ writel_relaxed(pmu_cfg | MBUS_PMU_CFG_ENABLE,
+ priv->reg_mbus + MBUS_PMU_CFG);
+
+}
+
+static void sun8i_a33_mbus_update_nominal_bw(struct sun8i_a33_mbus *priv,
+ u32 ddr_freq_mhz)
+{
+ /*
+ * Nominal bandwidth (KiB per PMU period):
+ *
+ * DDR transfers microseconds KiB
+ * ------------- * ------------ * --------
+ * microsecond PMU period transfer
+ */
+ priv->nominal_bw = ddr_freq_mhz * pmu_period * priv->data_width / 1024;
+}
+
+static int sun8i_a33_mbus_set_dram_freq(struct sun8i_a33_mbus *priv,
+ unsigned long freq)
+{
+ u32 ddr_freq_mhz = freq / USEC_PER_SEC; /* DDR */
+ u32 dram_freq_mhz = ddr_freq_mhz / 2; /* SDR */
+ u32 mctl_freq_mhz = dram_freq_mhz / 2; /* HDR */
+ u32 dxodt, mdfscr, pwrctl, vtfcr;
+ u32 i, tREFI_32ck, tRFC_ck;
+ int ret;
+
+ /* The rate change is not effective until the MDFS process runs. */
+ ret = clk_set_rate(priv->clk_dram, freq);
+ if (ret)
+ return ret;
+
+ /* Disable automatic self-refesh and VTF before starting MDFS. */
+ pwrctl = readl_relaxed(priv->reg_dram + DRAM_PWRCTL) &
+ ~DRAM_PWRCTL_SELFREF_EN;
+ writel_relaxed(pwrctl, priv->reg_dram + DRAM_PWRCTL);
+ vtfcr = readl_relaxed(priv->reg_dram + DRAM_VTFCR);
+ writel_relaxed(vtfcr & ~DRAM_VTFCR_VTF_ENABLE,
+ priv->reg_dram + DRAM_VTFCR);
+
+ /* Set up MDFS and enable double buffering for timing registers. */
+ mdfscr = MBUS_MDFSCR_MODE_DFS |
+ MBUS_MDFSCR_BYPASS |
+ MBUS_MDFSCR_PAD_HOLD |
+ MBUS_MDFSCR_BUFFER_TIMING;
+ writel(mdfscr, priv->reg_mbus + MBUS_MDFSCR);
+
+ /* Update the buffered copy of RFSHTMG. */
+ tREFI_32ck = priv->tREFI_ns * mctl_freq_mhz / 1000 / 32;
+ tRFC_ck = DIV_ROUND_UP(priv->tRFC_ns * mctl_freq_mhz, 1000);
+ writel(DRAM_RFSHTMG_TREFI(tREFI_32ck) | DRAM_RFSHTMG_TRFC(tRFC_ck),
+ priv->reg_dram + DRAM_RFSHTMG);
+
+ /* Enable ODT if needed, or disable it to save power. */
+ if (priv->odtmap && dram_freq_mhz > priv->variant->odt_freq_mhz) {
+ dxodt = DRAM_DXnGCR0_DXODT_DYNAMIC;
+ writel(priv->odtmap, priv->reg_dram + DRAM_ODTMAP);
+ } else {
+ dxodt = DRAM_DXnGCR0_DXODT_DISABLED;
+ writel(0, priv->reg_dram + DRAM_ODTMAP);
+ }
+ for (i = 0; i < DRAM_DX_MAX; ++i) {
+ void __iomem *reg = priv->reg_dram + DRAM_DXnGCR0(i);
+
+ writel((readl(reg) & ~DRAM_DXnGCR0_DXODT) | dxodt, reg);
+ }
+
+ dev_dbg(priv->devfreq_dram->dev.parent,
+ "Setting DRAM to %u MHz, tREFI=%u, tRFC=%u, ODT=%s\n",
+ dram_freq_mhz, tREFI_32ck, tRFC_ck,
+ dxodt == DRAM_DXnGCR0_DXODT_DYNAMIC ? "dynamic" : "disabled");
+
+ /* Trigger hardware MDFS. */
+ writel(mdfscr | MBUS_MDFSCR_START, priv->reg_mbus + MBUS_MDFSCR);
+ ret = readl_poll_timeout_atomic(priv->reg_mbus + MBUS_MDFSCR, mdfscr,
+ !(mdfscr & MBUS_MDFSCR_START), 10, 1000);
+ if (ret)
+ return ret;
+
+ /* Disable double buffering. */
+ writel(0, priv->reg_mbus + MBUS_MDFSCR);
+
+ /* Restore VTF configuration. */
+ writel_relaxed(vtfcr, priv->reg_dram + DRAM_VTFCR);
+
+ /* Enable automatic self-refresh at the lowest frequency only. */
+ if (freq == priv->freq_table[0])
+ pwrctl |= DRAM_PWRCTL_SELFREF_EN;
+ writel_relaxed(pwrctl, priv->reg_dram + DRAM_PWRCTL);
+
+ sun8i_a33_mbus_restart_pmu_counters(priv);
+ sun8i_a33_mbus_update_nominal_bw(priv, ddr_freq_mhz);
+
+ return 0;
+}
+
+static int sun8i_a33_mbus_set_dram_target(struct device *dev,
+ unsigned long *freq, u32 flags)
+{
+ struct sun8i_a33_mbus *priv = dev_get_drvdata(dev);
+ struct devfreq *devfreq = priv->devfreq_dram;
+ struct dev_pm_opp *opp;
+ int ret;
+
+ opp = devfreq_recommended_opp(dev, freq, flags);
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+
+ dev_pm_opp_put(opp);
+
+ if (*freq == devfreq->previous_freq)
+ return 0;
+
+ ret = sun8i_a33_mbus_set_dram_freq(priv, *freq);
+ if (ret) {
+ dev_warn(dev, "failed to set DRAM frequency: %d\n", ret);
+ *freq = devfreq->previous_freq;
+ }
+
+ return ret;
+}
+
+static int sun8i_a33_mbus_get_dram_status(struct device *dev,
+ struct devfreq_dev_status *stat)
+{
+ struct sun8i_a33_mbus *priv = dev_get_drvdata(dev);
+
+ stat->busy_time = sun8i_a33_mbus_get_peak_bw(priv);
+ stat->total_time = priv->nominal_bw;
+ stat->current_frequency = priv->devfreq_dram->previous_freq;
+
+ sun8i_a33_mbus_restart_pmu_counters(priv);
+
+ dev_dbg(dev, "Using %lu/%lu (%lu%%) at %lu MHz\n",
+ stat->busy_time, stat->total_time,
+ DIV_ROUND_CLOSEST(stat->busy_time * 100, stat->total_time),
+ stat->current_frequency / USEC_PER_SEC);
+
+ return 0;
+}
+
+static int sun8i_a33_mbus_hw_init(struct device *dev,
+ struct sun8i_a33_mbus *priv,
+ unsigned long ddr_freq)
+{
+ u32 i, mbus_cr, mbus_freq_mhz;
+
+ /* Choose tREFI and tRFC to match the configured DRAM type. */
+ mbus_cr = readl_relaxed(priv->reg_mbus + MBUS_CR);
+ switch (MBUS_CR_GET_DRAM_TYPE(mbus_cr)) {
+ case MBUS_CR_DRAM_TYPE_DDR2:
+ case MBUS_CR_DRAM_TYPE_DDR3:
+ case MBUS_CR_DRAM_TYPE_DDR4:
+ priv->tREFI_ns = 7800;
+ priv->tRFC_ns = 350;
+ break;
+ case MBUS_CR_DRAM_TYPE_LPDDR2:
+ case MBUS_CR_DRAM_TYPE_LPDDR3:
+ priv->tREFI_ns = 3900;
+ priv->tRFC_ns = 210;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Save ODTMAP so it can be restored when raising the frequency. */
+ priv->odtmap = readl_relaxed(priv->reg_dram + DRAM_ODTMAP);
+
+ /* Compute the DRAM data bus width by counting enabled DATx8 blocks. */
+ for (i = 0; i < DRAM_DX_MAX; ++i) {
+ void __iomem *reg = priv->reg_dram + DRAM_DXnGCR0(i);
+
+ if (!(readl_relaxed(reg) & DRAM_DXnGCR0_DXEN))
+ break;
+ }
+ priv->data_width = i;
+
+ dev_dbg(dev, "Detected %u-bit %sDDRx with%s ODT\n",
+ priv->data_width * 8,
+ MBUS_CR_GET_DRAM_TYPE(mbus_cr) > 4 ? "LP" : "",
+ priv->odtmap ? "" : "out");
+
+ /* Program MBUS_TMR such that the PMU period unit is microseconds. */
+ mbus_freq_mhz = clk_get_rate(priv->clk_mbus) / USEC_PER_SEC;
+ writel_relaxed(MBUS_TMR_PERIOD(mbus_freq_mhz),
+ priv->reg_mbus + MBUS_TMR);
+
+ /* "Master Ready Mask Register" bits must be set or MDFS will block. */
+ writel_relaxed(0xffffffff, priv->reg_mbus + MBUS_MDFSMRMR);
+
+ sun8i_a33_mbus_restart_pmu_counters(priv);
+ sun8i_a33_mbus_update_nominal_bw(priv, ddr_freq / USEC_PER_SEC);
+
+ return 0;
+}
+
+static int __maybe_unused sun8i_a33_mbus_suspend(struct device *dev)
+{
+ struct sun8i_a33_mbus *priv = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(priv->clk_bus);
+
+ return 0;
+}
+
+static int __maybe_unused sun8i_a33_mbus_resume(struct device *dev)
+{
+ struct sun8i_a33_mbus *priv = dev_get_drvdata(dev);
+
+ return clk_prepare_enable(priv->clk_bus);
+}
+
+static int sun8i_a33_mbus_probe(struct platform_device *pdev)
+{
+ const struct sun8i_a33_mbus_variant *variant;
+ struct device *dev = &pdev->dev;
+ struct sun8i_a33_mbus *priv;
+ unsigned long base_freq;
+ unsigned int max_state;
+ const char *err;
+ int i, ret;
+
+ variant = device_get_match_data(dev);
+ if (!variant)
+ return -EINVAL;
+
+ max_state = variant->max_dram_divider - variant->min_dram_divider + 1;
+
+ priv = devm_kzalloc(dev, struct_size(priv, freq_table, max_state), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, priv);
+
+ priv->variant = variant;
+
+ priv->reg_dram = devm_platform_ioremap_resource_byname(pdev, "dram");
+ if (IS_ERR(priv->reg_dram))
+ return PTR_ERR(priv->reg_dram);
+
+ priv->reg_mbus = devm_platform_ioremap_resource_byname(pdev, "mbus");
+ if (IS_ERR(priv->reg_mbus))
+ return PTR_ERR(priv->reg_mbus);
+
+ priv->clk_bus = devm_clk_get(dev, "bus");
+ if (IS_ERR(priv->clk_bus))
+ return dev_err_probe(dev, PTR_ERR(priv->clk_bus),
+ "failed to get bus clock\n");
+
+ priv->clk_dram = devm_clk_get(dev, "dram");
+ if (IS_ERR(priv->clk_dram))
+ return dev_err_probe(dev, PTR_ERR(priv->clk_dram),
+ "failed to get dram clock\n");
+
+ priv->clk_mbus = devm_clk_get(dev, "mbus");
+ if (IS_ERR(priv->clk_mbus))
+ return dev_err_probe(dev, PTR_ERR(priv->clk_mbus),
+ "failed to get mbus clock\n");
+
+ ret = clk_prepare_enable(priv->clk_bus);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to enable bus clock\n");
+
+ /* Lock the DRAM clock rate to keep priv->nominal_bw in sync. */
+ ret = clk_rate_exclusive_get(priv->clk_dram);
+ if (ret) {
+ err = "failed to lock dram clock rate\n";
+ goto err_disable_bus;
+ }
+
+ /* Lock the MBUS clock rate to keep MBUS_TMR_PERIOD in sync. */
+ ret = clk_rate_exclusive_get(priv->clk_mbus);
+ if (ret) {
+ err = "failed to lock mbus clock rate\n";
+ goto err_unlock_dram;
+ }
+
+ priv->gov_data.upthreshold = 10;
+ priv->gov_data.downdifferential = 5;
+
+ priv->profile.initial_freq = clk_get_rate(priv->clk_dram);
+ priv->profile.polling_ms = 1000;
+ priv->profile.target = sun8i_a33_mbus_set_dram_target;
+ priv->profile.get_dev_status = sun8i_a33_mbus_get_dram_status;
+ priv->profile.freq_table = priv->freq_table;
+ priv->profile.max_state = max_state;
+
+ ret = devm_pm_opp_set_clkname(dev, "dram");
+ if (ret) {
+ err = "failed to add OPP table\n";
+ goto err_unlock_mbus;
+ }
+
+ base_freq = clk_get_rate(clk_get_parent(priv->clk_dram));
+ for (i = 0; i < max_state; ++i) {
+ unsigned int div = variant->max_dram_divider - i;
+
+ priv->freq_table[i] = base_freq / div;
+
+ ret = dev_pm_opp_add(dev, priv->freq_table[i], 0);
+ if (ret) {
+ err = "failed to add OPPs\n";
+ goto err_remove_opps;
+ }
+ }
+
+ ret = sun8i_a33_mbus_hw_init(dev, priv, priv->profile.initial_freq);
+ if (ret) {
+ err = "failed to init hardware\n";
+ goto err_remove_opps;
+ }
+
+ priv->devfreq_dram = devfreq_add_device(dev, &priv->profile,
+ DEVFREQ_GOV_SIMPLE_ONDEMAND,
+ &priv->gov_data);
+ if (IS_ERR(priv->devfreq_dram)) {
+ ret = PTR_ERR(priv->devfreq_dram);
+ err = "failed to add devfreq device\n";
+ goto err_remove_opps;
+ }
+
+ /*
+ * This must be set manually after registering the devfreq device,
+ * because there is no way to select a dynamic OPP as the suspend OPP.
+ */
+ priv->devfreq_dram->suspend_freq = priv->freq_table[0];
+
+ return 0;
+
+err_remove_opps:
+ dev_pm_opp_remove_all_dynamic(dev);
+err_unlock_mbus:
+ clk_rate_exclusive_put(priv->clk_mbus);
+err_unlock_dram:
+ clk_rate_exclusive_put(priv->clk_dram);
+err_disable_bus:
+ clk_disable_unprepare(priv->clk_bus);
+
+ return dev_err_probe(dev, ret, err);
+}
+
+static int sun8i_a33_mbus_remove(struct platform_device *pdev)
+{
+ struct sun8i_a33_mbus *priv = platform_get_drvdata(pdev);
+ unsigned long initial_freq = priv->profile.initial_freq;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ devfreq_remove_device(priv->devfreq_dram);
+
+ ret = sun8i_a33_mbus_set_dram_freq(priv, initial_freq);
+ if (ret)
+ dev_warn(dev, "failed to restore DRAM frequency: %d\n", ret);
+
+ dev_pm_opp_remove_all_dynamic(dev);
+ clk_rate_exclusive_put(priv->clk_mbus);
+ clk_rate_exclusive_put(priv->clk_dram);
+ clk_disable_unprepare(priv->clk_bus);
+
+ return 0;
+}
+
+static const struct sun8i_a33_mbus_variant sun50i_a64_mbus = {
+ .min_dram_divider = 1,
+ .max_dram_divider = 4,
+ .odt_freq_mhz = 400,
+};
+
+static const struct of_device_id sun8i_a33_mbus_of_match[] = {
+ { .compatible = "allwinner,sun50i-a64-mbus", .data = &sun50i_a64_mbus },
+ { .compatible = "allwinner,sun50i-h5-mbus", .data = &sun50i_a64_mbus },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sun8i_a33_mbus_of_match);
+
+static SIMPLE_DEV_PM_OPS(sun8i_a33_mbus_pm_ops,
+ sun8i_a33_mbus_suspend, sun8i_a33_mbus_resume);
+
+static struct platform_driver sun8i_a33_mbus_driver = {
+ .probe = sun8i_a33_mbus_probe,
+ .remove = sun8i_a33_mbus_remove,
+ .driver = {
+ .name = "sun8i-a33-mbus",
+ .of_match_table = sun8i_a33_mbus_of_match,
+ .pm = pm_ptr(&sun8i_a33_mbus_pm_ops),
+ },
+};
+module_platform_driver(sun8i_a33_mbus_driver);
+
+MODULE_AUTHOR("Samuel Holland <samuel@sholland.org>");
+MODULE_DESCRIPTION("Allwinner sun8i/sun50i MBUS DEVFREQ Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/devfreq/tegra30-devfreq.c b/drivers/devfreq/tegra30-devfreq.c
index 65ecf17a36f4..503376b894b6 100644
--- a/drivers/devfreq/tegra30-devfreq.c
+++ b/drivers/devfreq/tegra30-devfreq.c
@@ -821,6 +821,15 @@ static int devm_tegra_devfreq_init_hw(struct device *dev,
return err;
}
+static int tegra_devfreq_config_clks_nop(struct device *dev,
+ struct opp_table *opp_table,
+ struct dev_pm_opp *opp, void *data,
+ bool scaling_down)
+{
+ /* We want to skip clk configuration via dev_pm_opp_set_opp() */
+ return 0;
+}
+
static int tegra_devfreq_probe(struct platform_device *pdev)
{
u32 hw_version = BIT(tegra_sku_info.soc_speedo_id);
@@ -830,6 +839,13 @@ static int tegra_devfreq_probe(struct platform_device *pdev)
unsigned int i;
long rate;
int err;
+ const char *clk_names[] = { "actmon", NULL };
+ struct dev_pm_opp_config config = {
+ .supported_hw = &hw_version,
+ .supported_hw_count = 1,
+ .clk_names = clk_names,
+ .config_clks = tegra_devfreq_config_clks_nop,
+ };
tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL);
if (!tegra)
@@ -874,13 +890,13 @@ static int tegra_devfreq_probe(struct platform_device *pdev)
return err;
}
- err = devm_pm_opp_set_supported_hw(&pdev->dev, &hw_version, 1);
+ err = devm_pm_opp_set_config(&pdev->dev, &config);
if (err) {
- dev_err(&pdev->dev, "Failed to set supported HW: %d\n", err);
+ dev_err(&pdev->dev, "Failed to set OPP config: %d\n", err);
return err;
}
- err = devm_pm_opp_of_add_table_noclk(&pdev->dev, 0);
+ err = devm_pm_opp_of_add_table_indexed(&pdev->dev, 0);
if (err) {
dev_err(&pdev->dev, "Failed to add OPP table: %d\n", err);
return err;
@@ -922,8 +938,10 @@ static int tegra_devfreq_probe(struct platform_device *pdev)
devfreq = devm_devfreq_add_device(&pdev->dev, &tegra_devfreq_profile,
"tegra_actmon", NULL);
- if (IS_ERR(devfreq))
+ if (IS_ERR(devfreq)) {
+ dev_err(&pdev->dev, "Failed to add device: %pe\n", devfreq);
return PTR_ERR(devfreq);
+ }
return 0;
}