aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/drivers/clk/x86
diff options
context:
space:
mode:
authorRahul Tanwar <rahul.tanwar@linux.intel.com>2020-07-16 14:30:31 +0800
committerStephen Boyd <sboyd@kernel.org>2020-07-24 01:55:18 -0700
commitc9e28fe649f76ee13a9bcc09f89ae0189f85801b (patch)
tree358b8cfef6585fc096def2b5d43a2221a35be55c /drivers/clk/x86
parentclk: intel: Use devm_clk_hw_register() instead of clk_hw_register() (diff)
downloadwireguard-linux-c9e28fe649f76ee13a9bcc09f89ae0189f85801b.tar.xz
wireguard-linux-c9e28fe649f76ee13a9bcc09f89ae0189f85801b.zip
clk: intel: Improve locking in the driver
Remove/reduce unnecessary spin locking of the code. Signed-off-by: Rahul Tanwar <rahul.tanwar@linux.intel.com> Link: https://lore.kernel.org/r/79c0f5f9f5bc512a7e2b5f3c91f6341f28b5854c.1594880946.git.rahul.tanwar@linux.intel.com Signed-off-by: Stephen Boyd <sboyd@kernel.org>
Diffstat (limited to 'drivers/clk/x86')
-rw-r--r--drivers/clk/x86/clk-cgu.c17
1 files changed, 5 insertions, 12 deletions
diff --git a/drivers/clk/x86/clk-cgu.c b/drivers/clk/x86/clk-cgu.c
index 88ebeb53b109..c379fedfb9f2 100644
--- a/drivers/clk/x86/clk-cgu.c
+++ b/drivers/clk/x86/clk-cgu.c
@@ -420,18 +420,14 @@ lgm_clk_ddiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
{
struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
unsigned int div0, div1, exdiv;
- unsigned long flags;
u64 prate;
- spin_lock_irqsave(&ddiv->lock, flags);
div0 = lgm_get_clk_val(ddiv->membase, ddiv->reg,
ddiv->shift0, ddiv->width0) + 1;
div1 = lgm_get_clk_val(ddiv->membase, ddiv->reg,
ddiv->shift1, ddiv->width1) + 1;
exdiv = lgm_get_clk_val(ddiv->membase, ddiv->reg,
ddiv->shift2, ddiv->width2);
- spin_unlock_irqrestore(&ddiv->lock, flags);
-
prate = (u64)parent_rate;
do_div(prate, div0);
do_div(prate, div1);
@@ -548,24 +544,21 @@ lgm_clk_ddiv_round_rate(struct clk_hw *hw, unsigned long rate,
div = div * 2;
div = DIV_ROUND_CLOSEST_ULL((u64)div, 5);
}
+ spin_unlock_irqrestore(&ddiv->lock, flags);
- if (div <= 0) {
- spin_unlock_irqrestore(&ddiv->lock, flags);
+ if (div <= 0)
return *prate;
- }
- if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2) != 0) {
- if (lgm_clk_get_ddiv_val(div + 1, &ddiv1, &ddiv2) != 0) {
- spin_unlock_irqrestore(&ddiv->lock, flags);
+ if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2) != 0)
+ if (lgm_clk_get_ddiv_val(div + 1, &ddiv1, &ddiv2) != 0)
return -EINVAL;
- }
- }
rate64 = *prate;
do_div(rate64, ddiv1);
do_div(rate64, ddiv2);
/* if predivide bit is enabled, modify rounded rate by factor of 2.5 */
+ spin_lock_irqsave(&ddiv->lock, flags);
if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) {
rate64 = rate64 * 2;
rate64 = DIV_ROUND_CLOSEST_ULL(rate64, 5);