diff options
author | 2025-06-06 20:02:51 -0700 | |
---|---|---|
committer | 2025-06-06 20:02:51 -0700 | |
commit | 949ea6f3f4c016852406bfdd3374e2ba5d4c30a9 (patch) | |
tree | e2513f4b07712e5fa00752efd43cdae7669d2ba0 /drivers | |
parent | Merge tag 'riscv-for-linus-6.16-mw1' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux (diff) | |
parent | scsi: ufs: qcom: Prevent calling phy_exit() before phy_init() (diff) | |
download | linux-rng-949ea6f3f4c016852406bfdd3374e2ba5d4c30a9.tar.xz linux-rng-949ea6f3f4c016852406bfdd3374e2ba5d4c30a9.zip |
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI fixes from James Bottomley:
"Mostly trivial updates and bug fixes (core update is a comment
spelling fix).
The bigger UFS update is the clock scaling and frequency fixes"
* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi:
scsi: ufs: qcom: Prevent calling phy_exit() before phy_init()
scsi: ufs: qcom: Call ufs_qcom_cfg_timers() in clock scaling path
scsi: ufs: qcom: Map devfreq OPP freq to UniPro Core Clock freq
scsi: ufs: qcom: Check gear against max gear in vop freq_to_gear()
scsi: aacraid: Remove useless code
scsi: core: devinfo: Fix typo in comment
scsi: ufs: core: Don't perform UFS clkscaling during host async scan
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/scsi/aacraid/aacraid.h | 1 | ||||
-rw-r--r-- | drivers/scsi/aacraid/commsup.c | 10 | ||||
-rw-r--r-- | drivers/scsi/scsi_devinfo.c | 2 | ||||
-rw-r--r-- | drivers/ufs/core/ufshcd.c | 3 | ||||
-rw-r--r-- | drivers/ufs/host/ufs-qcom.c | 141 |
5 files changed, 110 insertions, 47 deletions
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 8c384c25dca1..0a5888b53d6d 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -93,7 +93,6 @@ enum { #define AAC_NUM_MGT_FIB 8 #define AAC_NUM_IO_FIB (1024 - AAC_NUM_MGT_FIB) -#define AAC_NUM_FIB (AAC_NUM_IO_FIB + AAC_NUM_MGT_FIB) #define AAC_MAX_LUN 256 diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index ffef61c4aa01..7d9a4dce236b 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -48,15 +48,7 @@ static int fib_map_alloc(struct aac_dev *dev) { - if (dev->max_fib_size > AAC_MAX_NATIVE_SIZE) - dev->max_cmd_size = AAC_MAX_NATIVE_SIZE; - else - dev->max_cmd_size = dev->max_fib_size; - if (dev->max_fib_size < AAC_MAX_NATIVE_SIZE) { - dev->max_cmd_size = AAC_MAX_NATIVE_SIZE; - } else { - dev->max_cmd_size = dev->max_fib_size; - } + dev->max_cmd_size = AAC_MAX_NATIVE_SIZE; dprintk((KERN_INFO "allocate hardware fibs dma_alloc_coherent(%p, %d * (%d + %d), %p)\n", diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index a348df895dca..8ff679e67bbf 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c @@ -836,7 +836,7 @@ int __init scsi_init_devinfo(void) goto out; for (i = 0; scsi_static_device_list[i].vendor; i++) { - error = scsi_dev_info_list_add(1 /* compatibile */, + error = scsi_dev_info_list_add(1 /* compatible */, scsi_static_device_list[i].vendor, scsi_static_device_list[i].model, NULL, diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 76cedd30c274..4410e7d93b7d 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -1397,6 +1397,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us) * make sure that there are no outstanding requests when * clock scaling is in progress */ + mutex_lock(&hba->host->scan_mutex); blk_mq_quiesce_tagset(&hba->host->tag_set); mutex_lock(&hba->wb_mutex); down_write(&hba->clk_scaling_lock); @@ -1407,6 +1408,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us) up_write(&hba->clk_scaling_lock); mutex_unlock(&hba->wb_mutex); blk_mq_unquiesce_tagset(&hba->host->tag_set); + mutex_unlock(&hba->host->scan_mutex); goto out; } @@ -1428,6 +1430,7 @@ static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err) mutex_unlock(&hba->wb_mutex); blk_mq_unquiesce_tagset(&hba->host->tag_set); + mutex_unlock(&hba->host->scan_mutex); ufshcd_release(hba); } diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c index 37887ec68412..18a978452001 100644 --- a/drivers/ufs/host/ufs-qcom.c +++ b/drivers/ufs/host/ufs-qcom.c @@ -122,7 +122,9 @@ static const struct { }; static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host); -static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, unsigned long freq); +static unsigned long ufs_qcom_opp_freq_to_clk_freq(struct ufs_hba *hba, + unsigned long freq, char *name); +static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up, unsigned long freq); static struct ufs_qcom_host *rcdev_to_ufs_host(struct reset_controller_dev *rcd) { @@ -506,10 +508,9 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba) if (ret) return ret; - if (phy->power_count) { + if (phy->power_count) phy_power_off(phy); - phy_exit(phy); - } + /* phy initialization - calibrate the phy */ ret = phy_init(phy); @@ -597,13 +598,14 @@ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba, * * @hba: host controller instance * @is_pre_scale_up: flag to check if pre scale up condition. + * @freq: target opp freq * Return: zero for success and non-zero in case of a failure. */ -static int ufs_qcom_cfg_timers(struct ufs_hba *hba, bool is_pre_scale_up) +static int ufs_qcom_cfg_timers(struct ufs_hba *hba, bool is_pre_scale_up, unsigned long freq) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); struct ufs_clk_info *clki; - unsigned long core_clk_rate = 0; + unsigned long clk_freq = 0; u32 core_clk_cycles_per_us; /* @@ -615,22 +617,34 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, bool is_pre_scale_up) if (host->hw_ver.major < 4 && !ufshcd_is_intr_aggr_allowed(hba)) return 0; + if (hba->use_pm_opp && freq != ULONG_MAX) { + clk_freq = ufs_qcom_opp_freq_to_clk_freq(hba, freq, "core_clk"); + if (clk_freq) + goto cfg_timers; + } + list_for_each_entry(clki, &hba->clk_list_head, list) { if (!strcmp(clki->name, "core_clk")) { + if (freq == ULONG_MAX) { + clk_freq = clki->max_freq; + break; + } + if (is_pre_scale_up) - core_clk_rate = clki->max_freq; + clk_freq = clki->max_freq; else - core_clk_rate = clk_get_rate(clki->clk); + clk_freq = clk_get_rate(clki->clk); break; } } +cfg_timers: /* If frequency is smaller than 1MHz, set to 1MHz */ - if (core_clk_rate < DEFAULT_CLK_RATE_HZ) - core_clk_rate = DEFAULT_CLK_RATE_HZ; + if (clk_freq < DEFAULT_CLK_RATE_HZ) + clk_freq = DEFAULT_CLK_RATE_HZ; - core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC; + core_clk_cycles_per_us = clk_freq / USEC_PER_SEC; if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) { ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US); /* @@ -650,13 +664,13 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba, switch (status) { case PRE_CHANGE: - if (ufs_qcom_cfg_timers(hba, false)) { + if (ufs_qcom_cfg_timers(hba, false, ULONG_MAX)) { dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", __func__); return -EINVAL; } - err = ufs_qcom_set_core_clk_ctrl(hba, ULONG_MAX); + err = ufs_qcom_set_core_clk_ctrl(hba, true, ULONG_MAX); if (err) dev_err(hba->dev, "cfg core clk ctrl failed\n"); /* @@ -928,17 +942,6 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba, break; case POST_CHANGE: - if (ufs_qcom_cfg_timers(hba, false)) { - dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", - __func__); - /* - * we return error code at the end of the routine, - * but continue to configure UFS_PHY_TX_LANE_ENABLE - * and bus voting as usual - */ - ret = -EINVAL; - } - /* cache the power mode parameters to use internally */ memcpy(&host->dev_req_params, dev_req_params, sizeof(*dev_req_params)); @@ -1414,29 +1417,46 @@ static int ufs_qcom_set_clk_40ns_cycles(struct ufs_hba *hba, return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CORE_CLK_40NS_CYCLES), reg); } -static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, unsigned long freq) +static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up, unsigned long freq) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); struct list_head *head = &hba->clk_list_head; struct ufs_clk_info *clki; u32 cycles_in_1us = 0; u32 core_clk_ctrl_reg; + unsigned long clk_freq; int err; + if (hba->use_pm_opp && freq != ULONG_MAX) { + clk_freq = ufs_qcom_opp_freq_to_clk_freq(hba, freq, "core_clk_unipro"); + if (clk_freq) { + cycles_in_1us = ceil(clk_freq, HZ_PER_MHZ); + goto set_core_clk_ctrl; + } + } + list_for_each_entry(clki, head, list) { if (!IS_ERR_OR_NULL(clki->clk) && !strcmp(clki->name, "core_clk_unipro")) { - if (!clki->max_freq) + if (!clki->max_freq) { cycles_in_1us = 150; /* default for backwards compatibility */ - else if (freq == ULONG_MAX) + break; + } + + if (freq == ULONG_MAX) { cycles_in_1us = ceil(clki->max_freq, HZ_PER_MHZ); - else - cycles_in_1us = ceil(freq, HZ_PER_MHZ); + break; + } + if (is_scale_up) + cycles_in_1us = ceil(clki->max_freq, HZ_PER_MHZ); + else + cycles_in_1us = ceil(clk_get_rate(clki->clk), HZ_PER_MHZ); break; } } +set_core_clk_ctrl: err = ufshcd_dme_get(hba, UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL), &core_clk_ctrl_reg); @@ -1473,13 +1493,13 @@ static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba, unsigned long f { int ret; - ret = ufs_qcom_cfg_timers(hba, true); + ret = ufs_qcom_cfg_timers(hba, true, freq); if (ret) { dev_err(hba->dev, "%s ufs cfg timer failed\n", __func__); return ret; } /* set unipro core clock attributes and clear clock divider */ - return ufs_qcom_set_core_clk_ctrl(hba, freq); + return ufs_qcom_set_core_clk_ctrl(hba, true, freq); } static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba) @@ -1510,8 +1530,15 @@ static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba) static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba, unsigned long freq) { + int ret; + + ret = ufs_qcom_cfg_timers(hba, false, freq); + if (ret) { + dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", __func__); + return ret; + } /* set unipro core clock attributes and clear clock divider */ - return ufs_qcom_set_core_clk_ctrl(hba, freq); + return ufs_qcom_set_core_clk_ctrl(hba, false, freq); } static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba, bool scale_up, @@ -2092,11 +2119,53 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba) return 0; } +static unsigned long ufs_qcom_opp_freq_to_clk_freq(struct ufs_hba *hba, + unsigned long freq, char *name) +{ + struct ufs_clk_info *clki; + struct dev_pm_opp *opp; + unsigned long clk_freq; + int idx = 0; + bool found = false; + + opp = dev_pm_opp_find_freq_exact_indexed(hba->dev, freq, 0, true); + if (IS_ERR(opp)) { + dev_err(hba->dev, "Failed to find OPP for exact frequency %lu\n", freq); + return 0; + } + + list_for_each_entry(clki, &hba->clk_list_head, list) { + if (!strcmp(clki->name, name)) { + found = true; + break; + } + + idx++; + } + + if (!found) { + dev_err(hba->dev, "Failed to find clock '%s' in clk list\n", name); + dev_pm_opp_put(opp); + return 0; + } + + clk_freq = dev_pm_opp_get_freq_indexed(opp, idx); + + dev_pm_opp_put(opp); + + return clk_freq; +} + static u32 ufs_qcom_freq_to_gear_speed(struct ufs_hba *hba, unsigned long freq) { - u32 gear = 0; + u32 gear = UFS_HS_DONT_CHANGE; + unsigned long unipro_freq; - switch (freq) { + if (!hba->use_pm_opp) + return gear; + + unipro_freq = ufs_qcom_opp_freq_to_clk_freq(hba, freq, "core_clk_unipro"); + switch (unipro_freq) { case 403000000: gear = UFS_HS_G5; break; @@ -2116,10 +2185,10 @@ static u32 ufs_qcom_freq_to_gear_speed(struct ufs_hba *hba, unsigned long freq) break; default: dev_err(hba->dev, "%s: Unsupported clock freq : %lu\n", __func__, freq); - break; + return UFS_HS_DONT_CHANGE; } - return gear; + return min_t(u32, gear, hba->max_pwr_info.info.gear_rx); } /* |