aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/drivers/clk/renesas
diff options
context:
space:
mode:
authorClaudiu Beznea <claudiu.beznea.uj@bp.renesas.com>2023-10-06 13:39:58 +0300
committerGeert Uytterhoeven <geert+renesas@glider.be>2023-10-10 09:27:08 +0200
commit62b1feac485866494f111e3a6aa4a9ae03a7a2b9 (patch)
tree6de93893edf441bc786fd46e005873aafa7dcf83 /drivers/clk/renesas
parentclk: renesas: rzg2l: Refactor SD mux driver (diff)
downloadwireguard-linux-62b1feac485866494f111e3a6aa4a9ae03a7a2b9.tar.xz
wireguard-linux-62b1feac485866494f111e3a6aa4a9ae03a7a2b9.zip
clk: renesas: rzg2l: Add divider clock for RZ/G3S
Add a divider clock driver for RZ/G3S. This will be used on RZ/G3S for the SDHI, SPI, OCTA, I, I2, I3, P0, P1, P2, and P3 core clocks. The divider has some limitation for SDHI, OCTA and SPI clocks: - SDHI div cannot be 1 if parent rate is 800MHz, - OCTA, SPI div cannot be 1 if parent rate is 400MHz. To handle these limitations, a notifier is registered from platform specific clock driver, which makes sure proper actions are taken before the clock rate is changed, when needed. Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com> Reviewed-by: Geert Uytterhoeven <geert+renesas@glider.be> Link: https://lore.kernel.org/r/20231006103959.197485-4-claudiu.beznea.uj@bp.renesas.com Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be>
Diffstat (limited to 'drivers/clk/renesas')
-rw-r--r--drivers/clk/renesas/rzg2l-cpg.c186
-rw-r--r--drivers/clk/renesas/rzg2l-cpg.h11
2 files changed, 197 insertions, 0 deletions
diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c
index 8f927568a3c8..49dc6c5c0b0c 100644
--- a/drivers/clk/renesas/rzg2l-cpg.c
+++ b/drivers/clk/renesas/rzg2l-cpg.c
@@ -93,6 +93,24 @@ struct sd_mux_hw_data {
#define to_sd_mux_hw_data(_hw) container_of(_hw, struct sd_mux_hw_data, hw_data)
+/**
+ * struct div_hw_data - divider clock hardware data
+ * @hw_data: clock hw data
+ * @dtable: pointer to divider table
+ * @invalid_rate: invalid rate for divider
+ * @max_rate: maximum rate for divider
+ * @width: divider width
+ */
+struct div_hw_data {
+ struct clk_hw_data hw_data;
+ const struct clk_div_table *dtable;
+ unsigned long invalid_rate;
+ unsigned long max_rate;
+ u32 width;
+};
+
+#define to_div_hw_data(_hw) container_of(_hw, struct div_hw_data, hw_data)
+
struct rzg2l_pll5_param {
u32 pl5_fracin;
u8 pl5_refdiv;
@@ -201,6 +219,53 @@ int rzg2l_cpg_sd_clk_mux_notifier(struct notifier_block *nb, unsigned long event
return notifier_from_errno(ret);
}
+int rzg3s_cpg_div_clk_notifier(struct notifier_block *nb, unsigned long event,
+ void *data)
+{
+ struct clk_notifier_data *cnd = data;
+ struct clk_hw *hw = __clk_get_hw(cnd->clk);
+ struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
+ struct div_hw_data *div_hw_data = to_div_hw_data(clk_hw_data);
+ struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
+ u32 off = GET_REG_OFFSET(clk_hw_data->conf);
+ u32 shift = GET_SHIFT(clk_hw_data->conf);
+ unsigned long flags;
+ int ret = 0;
+ u32 val;
+
+ if (event != PRE_RATE_CHANGE || !div_hw_data->invalid_rate ||
+ div_hw_data->invalid_rate % cnd->new_rate)
+ return NOTIFY_DONE;
+
+ spin_lock_irqsave(&priv->rmw_lock, flags);
+
+ val = readl(priv->base + off);
+ val >>= shift;
+ val &= GENMASK(GET_WIDTH(clk_hw_data->conf) - 1, 0);
+
+ /*
+ * There are different constraints for the user of this notifiers as follows:
+ * 1/ SD div cannot be 1 (val == 0) if parent rate is 800MHz
+ * 2/ OCTA / SPI div cannot be 1 (val == 0) if parent rate is 400MHz
+ * As SD can have only one parent having 800MHz and OCTA div can have
+ * only one parent having 400MHz we took into account the parent rate
+ * at the beginning of function (by checking invalid_rate % new_rate).
+ * Now it is time to check the hardware divider and update it accordingly.
+ */
+ if (!val) {
+ writel((CPG_WEN_BIT | 1) << shift, priv->base + off);
+ /* Wait for the update done. */
+ ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf);
+ }
+
+ spin_unlock_irqrestore(&priv->rmw_lock, flags);
+
+ if (ret)
+ dev_err(priv->dev, "Failed to downgrade the div\n");
+
+ return notifier_from_errno(ret);
+}
+
static int rzg2l_register_notifier(struct clk_hw *hw, const struct cpg_core_clk *core,
struct rzg2l_cpg_priv *priv)
{
@@ -218,6 +283,124 @@ static int rzg2l_register_notifier(struct clk_hw *hw, const struct cpg_core_clk
return clk_notifier_register(hw->clk, nb);
}
+static unsigned long rzg3s_div_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
+ struct div_hw_data *div_hw_data = to_div_hw_data(clk_hw_data);
+ struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
+ u32 val;
+
+ val = readl(priv->base + GET_REG_OFFSET(clk_hw_data->conf));
+ val >>= GET_SHIFT(clk_hw_data->conf);
+ val &= GENMASK(GET_WIDTH(clk_hw_data->conf) - 1, 0);
+
+ return divider_recalc_rate(hw, parent_rate, val, div_hw_data->dtable,
+ CLK_DIVIDER_ROUND_CLOSEST, div_hw_data->width);
+}
+
+static int rzg3s_div_clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
+{
+ struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
+ struct div_hw_data *div_hw_data = to_div_hw_data(clk_hw_data);
+
+ if (div_hw_data->max_rate && req->rate > div_hw_data->max_rate)
+ req->rate = div_hw_data->max_rate;
+
+ return divider_determine_rate(hw, req, div_hw_data->dtable, div_hw_data->width,
+ CLK_DIVIDER_ROUND_CLOSEST);
+}
+
+static int rzg3s_div_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
+ struct div_hw_data *div_hw_data = to_div_hw_data(clk_hw_data);
+ struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
+ u32 off = GET_REG_OFFSET(clk_hw_data->conf);
+ u32 shift = GET_SHIFT(clk_hw_data->conf);
+ unsigned long flags;
+ u32 val;
+ int ret;
+
+ val = divider_get_val(rate, parent_rate, div_hw_data->dtable, div_hw_data->width,
+ CLK_DIVIDER_ROUND_CLOSEST);
+
+ spin_lock_irqsave(&priv->rmw_lock, flags);
+ writel((CPG_WEN_BIT | val) << shift, priv->base + off);
+ /* Wait for the update done. */
+ ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf);
+ spin_unlock_irqrestore(&priv->rmw_lock, flags);
+
+ return ret;
+}
+
+static const struct clk_ops rzg3s_div_clk_ops = {
+ .recalc_rate = rzg3s_div_clk_recalc_rate,
+ .determine_rate = rzg3s_div_clk_determine_rate,
+ .set_rate = rzg3s_div_clk_set_rate,
+};
+
+static struct clk * __init
+rzg3s_cpg_div_clk_register(const struct cpg_core_clk *core, struct clk **clks,
+ void __iomem *base, struct rzg2l_cpg_priv *priv)
+{
+ struct div_hw_data *div_hw_data;
+ struct clk_init_data init = {};
+ const struct clk_div_table *clkt;
+ struct clk_hw *clk_hw;
+ const struct clk *parent;
+ const char *parent_name;
+ u32 max = 0;
+ int ret;
+
+ parent = clks[core->parent & 0xffff];
+ if (IS_ERR(parent))
+ return ERR_CAST(parent);
+
+ parent_name = __clk_get_name(parent);
+
+ div_hw_data = devm_kzalloc(priv->dev, sizeof(*div_hw_data), GFP_KERNEL);
+ if (!div_hw_data)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = core->name;
+ init.flags = core->flag;
+ init.ops = &rzg3s_div_clk_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ /* Get the maximum divider to retrieve div width. */
+ for (clkt = core->dtable; clkt->div; clkt++) {
+ if (max < clkt->div)
+ max = clkt->div;
+ }
+
+ div_hw_data->hw_data.priv = priv;
+ div_hw_data->hw_data.conf = core->conf;
+ div_hw_data->hw_data.sconf = core->sconf;
+ div_hw_data->dtable = core->dtable;
+ div_hw_data->invalid_rate = core->invalid_rate;
+ div_hw_data->max_rate = core->max_rate;
+ div_hw_data->width = fls(max) - 1;
+
+ clk_hw = &div_hw_data->hw_data.hw;
+ clk_hw->init = &init;
+
+ ret = devm_clk_hw_register(priv->dev, clk_hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = rzg2l_register_notifier(clk_hw, core, priv);
+ if (ret) {
+ dev_err(priv->dev, "Failed to register notifier for %s\n",
+ core->name);
+ return ERR_PTR(ret);
+ }
+
+ return clk_hw->clk;
+}
+
static struct clk * __init
rzg2l_cpg_div_clk_register(const struct cpg_core_clk *core,
struct clk **clks,
@@ -963,6 +1146,9 @@ rzg2l_cpg_register_core_clk(const struct cpg_core_clk *core,
clk = rzg2l_cpg_div_clk_register(core, priv->clks,
priv->base, priv);
break;
+ case CLK_TYPE_G3S_DIV:
+ clk = rzg3s_cpg_div_clk_register(core, priv->clks, priv->base, priv);
+ break;
case CLK_TYPE_MUX:
clk = rzg2l_cpg_mux_clk_register(core, priv->base, priv);
break;
diff --git a/drivers/clk/renesas/rzg2l-cpg.h b/drivers/clk/renesas/rzg2l-cpg.h
index f1910913b29a..4755befaf38e 100644
--- a/drivers/clk/renesas/rzg2l-cpg.h
+++ b/drivers/clk/renesas/rzg2l-cpg.h
@@ -89,6 +89,8 @@ struct cpg_core_clk {
unsigned int sconf;
const struct clk_div_table *dtable;
const u32 *mtable;
+ const unsigned long invalid_rate;
+ const unsigned long max_rate;
const char * const *parent_names;
notifier_fn_t notifier;
u32 flag;
@@ -105,6 +107,7 @@ enum clk_types {
/* Clock with divider */
CLK_TYPE_DIV,
+ CLK_TYPE_G3S_DIV,
/* Clock with clock source selector */
CLK_TYPE_MUX,
@@ -143,6 +146,13 @@ enum clk_types {
DEF_TYPE(_name, _id, CLK_TYPE_DIV, .conf = _conf, \
.parent = _parent, .dtable = _dtable, \
.flag = CLK_DIVIDER_READ_ONLY)
+#define DEF_G3S_DIV(_name, _id, _parent, _conf, _sconf, _dtable, _invalid_rate, \
+ _max_rate, _clk_flags, _notif) \
+ DEF_TYPE(_name, _id, CLK_TYPE_G3S_DIV, .conf = _conf, .sconf = _sconf, \
+ .parent = _parent, .dtable = _dtable, \
+ .invalid_rate = _invalid_rate, \
+ .max_rate = _max_rate, .flag = (_clk_flags), \
+ .notifier = _notif)
#define DEF_MUX(_name, _id, _conf, _parent_names) \
DEF_TYPE(_name, _id, CLK_TYPE_MUX, .conf = _conf, \
.parent_names = _parent_names, \
@@ -277,5 +287,6 @@ extern const struct rzg2l_cpg_info r9a07g054_cpg_info;
extern const struct rzg2l_cpg_info r9a09g011_cpg_info;
int rzg2l_cpg_sd_clk_mux_notifier(struct notifier_block *nb, unsigned long event, void *data);
+int rzg3s_cpg_div_clk_notifier(struct notifier_block *nb, unsigned long event, void *data);
#endif