aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/ufs/ufshcd.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/ufs/ufshcd.c')
-rw-r--r--drivers/scsi/ufs/ufshcd.c667
1 files changed, 367 insertions, 300 deletions
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 9902b7e3aa4a..721f55db181f 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -20,6 +20,7 @@
#include "ufs_quirks.h"
#include "unipro.h"
#include "ufs-sysfs.h"
+#include "ufs-debugfs.h"
#include "ufs_bsg.h"
#include "ufshcd-crypto.h"
#include <asm/unaligned.h>
@@ -94,6 +95,8 @@
16, 4, buf, __len, false); \
} while (0)
+static bool early_suspend;
+
int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
const char *prefix)
{
@@ -225,6 +228,7 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba);
static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
static void ufshcd_hba_exit(struct ufs_hba *hba);
+static int ufshcd_clear_ua_wluns(struct ufs_hba *hba);
static int ufshcd_probe_hba(struct ufs_hba *hba, bool async);
static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
@@ -243,11 +247,8 @@ static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
struct ufs_vreg *vreg);
static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag);
-static int ufshcd_wb_buf_flush_enable(struct ufs_hba *hba);
-static int ufshcd_wb_buf_flush_disable(struct ufs_hba *hba);
-static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable);
static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set);
-static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable);
+static inline int ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable);
static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
@@ -288,7 +289,8 @@ static inline void ufshcd_wb_config(struct ufs_hba *hba)
if (ret)
dev_err(hba->dev, "%s: En WB flush during H8: failed: %d\n",
__func__, ret);
- ufshcd_wb_toggle_flush(hba, true);
+ if (!(hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL))
+ ufshcd_wb_toggle_flush(hba, true);
}
static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
@@ -304,53 +306,67 @@ static void ufshcd_scsi_block_requests(struct ufs_hba *hba)
}
static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
- const char *str)
+ enum ufs_trace_str_t str_t)
{
struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
- trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->sc.cdb);
+ if (!trace_ufshcd_upiu_enabled())
+ return;
+
+ trace_ufshcd_upiu(dev_name(hba->dev), str_t, &rq->header, &rq->sc.cdb,
+ UFS_TSF_CDB);
}
-static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba, unsigned int tag,
- const char *str)
+static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba,
+ enum ufs_trace_str_t str_t,
+ struct utp_upiu_req *rq_rsp)
{
- struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
+ if (!trace_ufshcd_upiu_enabled())
+ return;
- trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->qr);
+ trace_ufshcd_upiu(dev_name(hba->dev), str_t, &rq_rsp->header,
+ &rq_rsp->qr, UFS_TSF_OSF);
}
static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
- const char *str)
+ enum ufs_trace_str_t str_t)
{
int off = (int)tag - hba->nutrs;
struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[off];
- trace_ufshcd_upiu(dev_name(hba->dev), str, &descp->req_header,
- &descp->input_param1);
+ if (!trace_ufshcd_upiu_enabled())
+ return;
+
+ if (str_t == UFS_TM_SEND)
+ trace_ufshcd_upiu(dev_name(hba->dev), str_t, &descp->req_header,
+ &descp->input_param1, UFS_TSF_TM_INPUT);
+ else
+ trace_ufshcd_upiu(dev_name(hba->dev), str_t, &descp->rsp_header,
+ &descp->output_param1, UFS_TSF_TM_OUTPUT);
}
static void ufshcd_add_uic_command_trace(struct ufs_hba *hba,
struct uic_command *ucmd,
- const char *str)
+ enum ufs_trace_str_t str_t)
{
u32 cmd;
if (!trace_ufshcd_uic_command_enabled())
return;
- if (!strcmp(str, "send"))
+ if (str_t == UFS_CMD_SEND)
cmd = ucmd->command;
else
cmd = ufshcd_readl(hba, REG_UIC_COMMAND);
- trace_ufshcd_uic_command(dev_name(hba->dev), str, cmd,
+ trace_ufshcd_uic_command(dev_name(hba->dev), str_t, cmd,
ufshcd_readl(hba, REG_UIC_COMMAND_ARG_1),
ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2),
ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3));
}
-static void ufshcd_add_command_trace(struct ufs_hba *hba,
- unsigned int tag, const char *str)
+static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
+ enum ufs_trace_str_t str_t)
{
sector_t lba = -1;
u8 opcode = 0, group_id = 0;
@@ -362,13 +378,13 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba,
if (!trace_ufshcd_command_enabled()) {
/* trace UPIU W/O tracing command */
if (cmd)
- ufshcd_add_cmd_upiu_trace(hba, tag, str);
+ ufshcd_add_cmd_upiu_trace(hba, tag, str_t);
return;
}
if (cmd) { /* data phase exists */
/* trace UPIU also */
- ufshcd_add_cmd_upiu_trace(hba, tag, str);
+ ufshcd_add_cmd_upiu_trace(hba, tag, str_t);
opcode = cmd->cmnd[0];
if ((opcode == READ_10) || (opcode == WRITE_10)) {
/*
@@ -391,7 +407,7 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba,
intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
- trace_ufshcd_command(dev_name(hba->dev), str, tag,
+ trace_ufshcd_command(dev_name(hba->dev), str_t, tag,
doorbell, transfer_len, intr, lba, opcode, group_id);
}
@@ -580,6 +596,23 @@ static void ufshcd_print_pwr_info(struct ufs_hba *hba)
hba->pwr_info.hs_rate);
}
+static void ufshcd_device_reset(struct ufs_hba *hba)
+{
+ int err;
+
+ err = ufshcd_vops_device_reset(hba);
+
+ if (!err) {
+ ufshcd_set_ufs_dev_active(hba);
+ if (ufshcd_is_wb_allowed(hba)) {
+ hba->dev_info.wb_enabled = false;
+ hba->dev_info.wb_buf_flush_enabled = false;
+ }
+ }
+ if (err != -EOPNOTSUPP)
+ ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, err);
+}
+
void ufshcd_delay_us(unsigned long us, unsigned long tolerance)
{
if (!us)
@@ -1163,19 +1196,30 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
*/
ufshcd_scsi_block_requests(hba);
down_write(&hba->clk_scaling_lock);
- if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
+
+ if (!hba->clk_scaling.is_allowed ||
+ ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
ret = -EBUSY;
up_write(&hba->clk_scaling_lock);
ufshcd_scsi_unblock_requests(hba);
+ goto out;
}
+ /* let's not get into low power until clock scaling is completed */
+ ufshcd_hold(hba, false);
+
+out:
return ret;
}
-static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
+static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, bool writelock)
{
- up_write(&hba->clk_scaling_lock);
+ if (writelock)
+ up_write(&hba->clk_scaling_lock);
+ else
+ up_read(&hba->clk_scaling_lock);
ufshcd_scsi_unblock_requests(hba);
+ ufshcd_release(hba);
}
/**
@@ -1190,13 +1234,11 @@ static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
{
int ret = 0;
-
- /* let's not get into low power until clock scaling is completed */
- ufshcd_hold(hba, false);
+ bool is_writelock = true;
ret = ufshcd_clock_scaling_prepare(hba);
if (ret)
- goto out;
+ return ret;
/* scale down the gear before scaling down clocks */
if (!scale_up) {
@@ -1222,14 +1264,12 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
}
/* Enable Write Booster if we have scaled up else disable it */
- up_write(&hba->clk_scaling_lock);
+ downgrade_write(&hba->clk_scaling_lock);
+ is_writelock = false;
ufshcd_wb_ctrl(hba, scale_up);
- down_write(&hba->clk_scaling_lock);
out_unprepare:
- ufshcd_clock_scaling_unprepare(hba);
-out:
- ufshcd_release(hba);
+ ufshcd_clock_scaling_unprepare(hba, is_writelock);
return ret;
}
@@ -1310,15 +1350,8 @@ static int ufshcd_devfreq_target(struct device *dev,
}
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- pm_runtime_get_noresume(hba->dev);
- if (!pm_runtime_active(hba->dev)) {
- pm_runtime_put_noidle(hba->dev);
- ret = -EAGAIN;
- goto out;
- }
start = ktime_get();
ret = ufshcd_devfreq_scale(hba, scale_up);
- pm_runtime_put(hba->dev);
trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
(scale_up ? "up" : "down"),
@@ -1465,8 +1498,8 @@ static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
unsigned long flags;
bool suspend = false;
- if (!ufshcd_is_clkscaling_supported(hba))
- return;
+ cancel_work_sync(&hba->clk_scaling.suspend_work);
+ cancel_work_sync(&hba->clk_scaling.resume_work);
spin_lock_irqsave(hba->host->host_lock, flags);
if (!hba->clk_scaling.is_suspended) {
@@ -1484,9 +1517,6 @@ static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
unsigned long flags;
bool resume = false;
- if (!ufshcd_is_clkscaling_supported(hba))
- return;
-
spin_lock_irqsave(hba->host->host_lock, flags);
if (hba->clk_scaling.is_suspended) {
resume = true;
@@ -1503,7 +1533,7 @@ static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
{
struct ufs_hba *hba = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
+ return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_enabled);
}
static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
@@ -1511,22 +1541,25 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
{
struct ufs_hba *hba = dev_get_drvdata(dev);
u32 value;
- int err;
+ int err = 0;
if (kstrtou32(buf, 0, &value))
return -EINVAL;
+ down(&hba->host_sem);
+ if (!ufshcd_is_user_access_allowed(hba)) {
+ err = -EBUSY;
+ goto out;
+ }
+
value = !!value;
- if (value == hba->clk_scaling.is_allowed)
+ if (value == hba->clk_scaling.is_enabled)
goto out;
pm_runtime_get_sync(hba->dev);
ufshcd_hold(hba, false);
- cancel_work_sync(&hba->clk_scaling.suspend_work);
- cancel_work_sync(&hba->clk_scaling.resume_work);
-
- hba->clk_scaling.is_allowed = value;
+ hba->clk_scaling.is_enabled = value;
if (value) {
ufshcd_resume_clkscaling(hba);
@@ -1541,10 +1574,11 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
ufshcd_release(hba);
pm_runtime_put_sync(hba->dev);
out:
- return count;
+ up(&hba->host_sem);
+ return err ? err : count;
}
-static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
+static void ufshcd_init_clk_scaling_sysfs(struct ufs_hba *hba)
{
hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
@@ -1555,6 +1589,45 @@ static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
}
+static void ufshcd_remove_clk_scaling_sysfs(struct ufs_hba *hba)
+{
+ if (hba->clk_scaling.enable_attr.attr.name)
+ device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
+}
+
+static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
+{
+ char wq_name[sizeof("ufs_clkscaling_00")];
+
+ if (!ufshcd_is_clkscaling_supported(hba))
+ return;
+
+ if (!hba->clk_scaling.min_gear)
+ hba->clk_scaling.min_gear = UFS_HS_G1;
+
+ INIT_WORK(&hba->clk_scaling.suspend_work,
+ ufshcd_clk_scaling_suspend_work);
+ INIT_WORK(&hba->clk_scaling.resume_work,
+ ufshcd_clk_scaling_resume_work);
+
+ snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
+ hba->host->host_no);
+ hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
+
+ hba->clk_scaling.is_initialized = true;
+}
+
+static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
+{
+ if (!hba->clk_scaling.is_initialized)
+ return;
+
+ ufshcd_remove_clk_scaling_sysfs(hba);
+ destroy_workqueue(hba->clk_scaling.workq);
+ ufshcd_devfreq_remove(hba);
+ hba->clk_scaling.is_initialized = false;
+}
+
static void ufshcd_ungate_work(struct work_struct *work)
{
int ret;
@@ -1846,35 +1919,31 @@ out:
return count;
}
-static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
+static void ufshcd_init_clk_gating_sysfs(struct ufs_hba *hba)
{
- char wq_name[sizeof("ufs_clkscaling_00")];
-
- if (!ufshcd_is_clkscaling_supported(hba))
- return;
-
- if (!hba->clk_scaling.min_gear)
- hba->clk_scaling.min_gear = UFS_HS_G1;
-
- INIT_WORK(&hba->clk_scaling.suspend_work,
- ufshcd_clk_scaling_suspend_work);
- INIT_WORK(&hba->clk_scaling.resume_work,
- ufshcd_clk_scaling_resume_work);
-
- snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
- hba->host->host_no);
- hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
+ hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
+ hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
+ sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
+ hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
+ hba->clk_gating.delay_attr.attr.mode = 0644;
+ if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
+ dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
- ufshcd_clkscaling_init_sysfs(hba);
+ hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
+ hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
+ sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
+ hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
+ hba->clk_gating.enable_attr.attr.mode = 0644;
+ if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
+ dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
}
-static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
+static void ufshcd_remove_clk_gating_sysfs(struct ufs_hba *hba)
{
- if (!ufshcd_is_clkscaling_supported(hba))
- return;
-
- destroy_workqueue(hba->clk_scaling.workq);
- ufshcd_devfreq_remove(hba);
+ if (hba->clk_gating.delay_attr.attr.name)
+ device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
+ if (hba->clk_gating.enable_attr.attr.name)
+ device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
}
static void ufshcd_init_clk_gating(struct ufs_hba *hba)
@@ -1895,34 +1964,21 @@ static void ufshcd_init_clk_gating(struct ufs_hba *hba)
hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
WQ_MEM_RECLAIM | WQ_HIGHPRI);
- hba->clk_gating.is_enabled = true;
-
- hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
- hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
- sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
- hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
- hba->clk_gating.delay_attr.attr.mode = 0644;
- if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
- dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
+ ufshcd_init_clk_gating_sysfs(hba);
- hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
- hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
- sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
- hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
- hba->clk_gating.enable_attr.attr.mode = 0644;
- if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
- dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
+ hba->clk_gating.is_enabled = true;
+ hba->clk_gating.is_initialized = true;
}
static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
{
- if (!ufshcd_is_clkgating_allowed(hba))
+ if (!hba->clk_gating.is_initialized)
return;
- device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
- device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
+ ufshcd_remove_clk_gating_sysfs(hba);
cancel_work_sync(&hba->clk_gating.ungate_work);
cancel_delayed_work_sync(&hba->clk_gating.gate_work);
destroy_workqueue(hba->clk_gating.clk_gating_workq);
+ hba->clk_gating.is_initialized = false;
}
/* Must be called with host lock acquired */
@@ -1937,7 +1993,7 @@ static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
if (!hba->clk_scaling.active_reqs++)
queue_resume_work = true;
- if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
+ if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress)
return;
if (queue_resume_work)
@@ -1983,7 +2039,7 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
lrbp->issue_time_stamp = ktime_get();
lrbp->compl_time_stamp = ktime_set(0, 0);
ufshcd_vops_setup_xfer_req(hba, task_tag, (lrbp->cmd ? true : false));
- ufshcd_add_command_trace(hba, task_tag, "send");
+ ufshcd_add_command_trace(hba, task_tag, UFS_CMD_SEND);
ufshcd_clk_scaling_start_busy(hba);
__set_bit(task_tag, &hba->outstanding_reqs);
ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
@@ -2119,7 +2175,7 @@ ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
- ufshcd_add_uic_command_trace(hba, uic_cmd, "send");
+ ufshcd_add_uic_command_trace(hba, uic_cmd, UFS_CMD_SEND);
/* Write UIC Cmd */
ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
@@ -2838,7 +2894,7 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
hba->dev_cmd.complete = &wait;
- ufshcd_add_query_upiu_trace(hba, tag, "query_send");
+ ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
/* Make sure descriptors are ready before ringing the doorbell */
wmb();
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -2848,8 +2904,8 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
out:
- ufshcd_add_query_upiu_trace(hba, tag,
- err ? "query_complete_err" : "query_complete");
+ ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
+ (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
out_put_tag:
blk_put_request(req);
@@ -3406,7 +3462,7 @@ static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
* Unit descriptors are only available for general purpose LUs (LUN id
* from 0 to 7) and RPMB Well known LU.
*/
- if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun))
+ if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun, param_offset))
return -EOPNOTSUPP;
return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
@@ -3665,7 +3721,7 @@ static int ufshcd_dme_enable(struct ufs_hba *hba)
ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
if (ret)
dev_err(hba->dev,
- "dme-reset: error code %d\n", ret);
+ "dme-enable: error code %d\n", ret);
return ret;
}
@@ -3964,7 +4020,7 @@ int ufshcd_link_recovery(struct ufs_hba *hba)
spin_unlock_irqrestore(hba->host->host_lock, flags);
/* Reset the attached device */
- ufshcd_vops_device_reset(hba);
+ ufshcd_device_reset(hba);
ret = ufshcd_host_reset_and_restore(hba);
@@ -3977,6 +4033,8 @@ int ufshcd_link_recovery(struct ufs_hba *hba)
if (ret)
dev_err(hba->dev, "%s: link recovery failed, err %d",
__func__, ret);
+ else
+ ufshcd_clear_ua_wluns(hba);
return ret;
}
@@ -4197,25 +4255,27 @@ static int ufshcd_change_power_mode(struct ufs_hba *hba,
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
pwr_mode->hs_rate);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
- DL_FC0ProtectionTimeOutVal_Default);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
- DL_TC0ReplayTimeOutVal_Default);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
- DL_AFC0ReqTimeOutVal_Default);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
- DL_FC1ProtectionTimeOutVal_Default);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
- DL_TC1ReplayTimeOutVal_Default);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
- DL_AFC1ReqTimeOutVal_Default);
-
- ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
- DL_FC0ProtectionTimeOutVal_Default);
- ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
- DL_TC0ReplayTimeOutVal_Default);
- ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
- DL_AFC0ReqTimeOutVal_Default);
+ if (!(hba->quirks & UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING)) {
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
+ DL_FC0ProtectionTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
+ DL_TC0ReplayTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
+ DL_AFC0ReqTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
+ DL_FC1ProtectionTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
+ DL_TC1ReplayTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
+ DL_AFC1ReqTimeOutVal_Default);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
+ DL_FC0ProtectionTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
+ DL_TC0ReplayTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
+ DL_AFC0ReqTimeOutVal_Default);
+ }
ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
| pwr_mode->pwr_tx);
@@ -4522,6 +4582,7 @@ void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val)
e = &hba->ufs_stats.event[id];
e->val[e->pos] = val;
e->tstamp[e->pos] = ktime_get();
+ e->cnt += 1;
e->pos = (e->pos + 1) % UFS_EVENT_HIST_LENGTH;
ufshcd_vops_event_notify(hba, id, &val);
@@ -4806,6 +4867,8 @@ static int ufshcd_slave_configure(struct scsi_device *sdev)
struct request_queue *q = sdev->request_queue;
blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
+ if (hba->quirks & UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE)
+ blk_queue_update_dma_alignment(q, PAGE_SIZE - 1);
if (ufshcd_is_rpm_autosuspend_allowed(hba))
sdev->rpm_autosuspend = 1;
@@ -4851,9 +4914,7 @@ ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
ufshcd_copy_sense_data(lrbp);
fallthrough;
case SAM_STAT_GOOD:
- result |= DID_OK << 16 |
- COMMAND_COMPLETE << 8 |
- scsi_status;
+ result |= DID_OK << 16 | scsi_status;
break;
case SAM_STAT_TASK_SET_FULL:
case SAM_STAT_BUSY:
@@ -4973,7 +5034,8 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
break;
} /* end of switch */
- if ((host_byte(result) != DID_OK) && !hba->silence_err_logs)
+ if ((host_byte(result) != DID_OK) &&
+ (host_byte(result) != DID_REQUEUE) && !hba->silence_err_logs)
ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
return result;
}
@@ -5010,7 +5072,7 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
if (retval == IRQ_HANDLED)
ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd,
- "complete");
+ UFS_CMD_COMP);
return retval;
}
@@ -5034,7 +5096,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
lrbp->compl_time_stamp = ktime_get();
cmd = lrbp->cmd;
if (cmd) {
- ufshcd_add_command_trace(hba, index, "complete");
+ ufshcd_add_command_trace(hba, index, UFS_CMD_COMP);
result = ufshcd_transfer_rsp_status(hba, lrbp);
scsi_dma_unmap(cmd);
cmd->result = result;
@@ -5048,7 +5110,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
if (hba->dev_cmd.complete) {
ufshcd_add_command_trace(hba, index,
- "dev_complete");
+ UFS_DEV_COMP);
complete(hba->dev_cmd.complete);
update_scaling = true;
}
@@ -5368,7 +5430,7 @@ out:
__func__, err);
}
-static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable)
+int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable)
{
int ret;
u8 index;
@@ -5377,7 +5439,7 @@ static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable)
if (!ufshcd_is_wb_allowed(hba))
return 0;
- if (!(enable ^ hba->wb_enabled))
+ if (!(enable ^ hba->dev_info.wb_enabled))
return 0;
if (enable)
opcode = UPIU_QUERY_OPCODE_SET_FLAG;
@@ -5393,7 +5455,7 @@ static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable)
return ret;
}
- hba->wb_enabled = enable;
+ hba->dev_info.wb_enabled = enable;
dev_dbg(hba->dev, "%s write booster %s %d\n",
__func__, enable ? "enable" : "disable", ret);
@@ -5416,61 +5478,37 @@ static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set)
index, NULL);
}
-static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable)
-{
- if (hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL)
- return;
-
- if (enable)
- ufshcd_wb_buf_flush_enable(hba);
- else
- ufshcd_wb_buf_flush_disable(hba);
-
-}
-
-static int ufshcd_wb_buf_flush_enable(struct ufs_hba *hba)
+static inline int ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable)
{
int ret;
u8 index;
+ enum query_opcode opcode;
- if (!ufshcd_is_wb_allowed(hba) || hba->wb_buf_flush_enabled)
+ if (!ufshcd_is_wb_allowed(hba) ||
+ hba->dev_info.wb_buf_flush_enabled == enable)
return 0;
- index = ufshcd_wb_get_query_index(hba);
- ret = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
- QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN,
- index, NULL);
- if (ret)
- dev_err(hba->dev, "%s WB - buf flush enable failed %d\n",
- __func__, ret);
+ if (enable)
+ opcode = UPIU_QUERY_OPCODE_SET_FLAG;
else
- hba->wb_buf_flush_enabled = true;
-
- dev_dbg(hba->dev, "WB - Flush enabled: %d\n", ret);
- return ret;
-}
-
-static int ufshcd_wb_buf_flush_disable(struct ufs_hba *hba)
-{
- int ret;
- u8 index;
-
- if (!ufshcd_is_wb_allowed(hba) || !hba->wb_buf_flush_enabled)
- return 0;
+ opcode = UPIU_QUERY_OPCODE_CLEAR_FLAG;
index = ufshcd_wb_get_query_index(hba);
- ret = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
- QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN,
- index, NULL);
+ ret = ufshcd_query_flag_retry(hba, opcode,
+ QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN, index,
+ NULL);
if (ret) {
- dev_warn(hba->dev, "%s: WB - buf flush disable failed %d\n",
- __func__, ret);
- } else {
- hba->wb_buf_flush_enabled = false;
- dev_dbg(hba->dev, "WB - Flush disabled: %d\n", ret);
+ dev_err(hba->dev, "%s WB-Buf Flush %s failed %d\n", __func__,
+ enable ? "enable" : "disable", ret);
+ goto out;
}
+ hba->dev_info.wb_buf_flush_enabled = enable;
+
+ dev_dbg(hba->dev, "WB-Buf Flush %s\n", enable ? "enabled" : "disabled");
+out:
return ret;
+
}
static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
@@ -5695,6 +5733,26 @@ static inline void ufshcd_schedule_eh_work(struct ufs_hba *hba)
}
}
+static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
+{
+ down_write(&hba->clk_scaling_lock);
+ hba->clk_scaling.is_allowed = allow;
+ up_write(&hba->clk_scaling_lock);
+}
+
+static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend)
+{
+ if (suspend) {
+ if (hba->clk_scaling.is_enabled)
+ ufshcd_suspend_clkscaling(hba);
+ ufshcd_clk_scaling_allow(hba, false);
+ } else {
+ ufshcd_clk_scaling_allow(hba, true);
+ if (hba->clk_scaling.is_enabled)
+ ufshcd_resume_clkscaling(hba);
+ }
+}
+
static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
{
pm_runtime_get_sync(hba->dev);
@@ -5719,27 +5777,27 @@ static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
ufshcd_vops_resume(hba, pm_op);
} else {
ufshcd_hold(hba, false);
- if (hba->clk_scaling.is_allowed) {
- cancel_work_sync(&hba->clk_scaling.suspend_work);
- cancel_work_sync(&hba->clk_scaling.resume_work);
+ if (ufshcd_is_clkscaling_supported(hba) &&
+ hba->clk_scaling.is_enabled)
ufshcd_suspend_clkscaling(hba);
- }
+ ufshcd_clk_scaling_allow(hba, false);
}
}
static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
{
ufshcd_release(hba);
- if (hba->clk_scaling.is_allowed)
- ufshcd_resume_clkscaling(hba);
+ if (ufshcd_is_clkscaling_supported(hba))
+ ufshcd_clk_scaling_suspend(hba, false);
pm_runtime_put(hba->dev);
}
static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba)
{
- return (!hba->is_powered || hba->ufshcd_state == UFSHCD_STATE_ERROR ||
+ return (!hba->is_powered || hba->shutting_down ||
+ hba->ufshcd_state == UFSHCD_STATE_ERROR ||
(!(hba->saved_err || hba->saved_uic_err || hba->force_reset ||
- ufshcd_is_link_broken(hba))));
+ ufshcd_is_link_broken(hba))));
}
#ifdef CONFIG_PM
@@ -5809,13 +5867,13 @@ static void ufshcd_err_handler(struct work_struct *work)
hba = container_of(work, struct ufs_hba, eh_work);
- down(&hba->eh_sem);
+ down(&hba->host_sem);
spin_lock_irqsave(hba->host->host_lock, flags);
if (ufshcd_err_handling_should_stop(hba)) {
if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
spin_unlock_irqrestore(hba->host->host_lock, flags);
- up(&hba->eh_sem);
+ up(&hba->host_sem);
return;
}
ufshcd_set_eh_in_progress(hba);
@@ -5984,7 +6042,10 @@ skip_err_handling:
spin_unlock_irqrestore(hba->host->host_lock, flags);
ufshcd_scsi_unblock_requests(hba);
ufshcd_err_handling_unprepare(hba);
- up(&hba->eh_sem);
+ up(&hba->host_sem);
+
+ if (!err && needs_reset)
+ ufshcd_clear_ua_wluns(hba);
}
/**
@@ -6271,17 +6332,20 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
while (intr_status && retries--) {
enabled_intr_status =
intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
- if (intr_status)
- ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
+ ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
if (enabled_intr_status)
retval |= ufshcd_sl_intr(hba, enabled_intr_status);
intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
}
- if (enabled_intr_status && retval == IRQ_NONE) {
- dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n",
- __func__, intr_status);
+ if (enabled_intr_status && retval == IRQ_NONE &&
+ !ufshcd_eh_in_progress(hba)) {
+ dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n",
+ __func__,
+ intr_status,
+ hba->ufs_stats.last_intr_status,
+ enabled_intr_status);
ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
}
@@ -6325,7 +6389,10 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
* Even though we use wait_event() which sleeps indefinitely,
* the maximum wait time is bounded by %TM_CMD_TIMEOUT.
*/
- req = blk_get_request(q, REQ_OP_DRV_OUT, BLK_MQ_REQ_RESERVED);
+ req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
req->end_io_data = &wait;
free_slot = req->tag;
WARN_ON_ONCE(free_slot < 0 || free_slot >= hba->nutmrs);
@@ -6351,7 +6418,7 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
spin_unlock_irqrestore(host->host_lock, flags);
- ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_send");
+ ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_SEND);
/* wait until the task management command is completed */
err = wait_for_completion_io_timeout(&wait,
@@ -6362,7 +6429,7 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
* use-after-free.
*/
req->end_io_data = NULL;
- ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err");
+ ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_ERR);
dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
__func__, tm_function);
if (ufshcd_clear_tm_cmd(hba, free_slot))
@@ -6373,7 +6440,7 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
err = 0;
memcpy(treq, hba->utmrdl_base_addr + free_slot, sizeof(*treq));
- ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete");
+ ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_COMP);
}
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -6643,19 +6710,16 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
{
struct Scsi_Host *host;
struct ufs_hba *hba;
- unsigned int tag;
u32 pos;
int err;
- u8 resp = 0xF;
- struct ufshcd_lrb *lrbp;
+ u8 resp = 0xF, lun;
unsigned long flags;
host = cmd->device->host;
hba = shost_priv(host);
- tag = cmd->request->tag;
- lrbp = &hba->lrb[tag];
- err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
+ lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
+ err = ufshcd_issue_tm_cmd(hba, lun, 0, UFS_LOGICAL_RESET, &resp);
if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
if (!err)
err = resp;
@@ -6664,7 +6728,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
/* clear the commands that were pending for corresponding LUN */
for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
- if (hba->lrb[pos].lun == lrbp->lun) {
+ if (hba->lrb[pos].lun == lun) {
err = ufshcd_clear_cmd(hba, pos);
if (err)
break;
@@ -6925,13 +6989,11 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
ufshcd_set_clk_freq(hba, true);
err = ufshcd_hba_enable(hba);
- if (err)
- goto out;
/* Establish the link again and restore the device */
- err = ufshcd_probe_hba(hba, false);
+ if (!err)
+ err = ufshcd_probe_hba(hba, false);
-out:
if (err)
dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
ufshcd_update_evt_hist(hba, UFS_EVT_HOST_RESET, (u32)err);
@@ -6968,7 +7030,7 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba)
do {
/* Reset the attached device */
- ufshcd_vops_device_reset(hba);
+ ufshcd_device_reset(hba);
err = ufshcd_host_reset_and_restore(hba);
} while (err && --retries);
@@ -7224,6 +7286,7 @@ static void ufshcd_wb_probe(struct ufs_hba *hba, u8 *desc_buf)
struct ufs_dev_info *dev_info = &hba->dev_info;
u8 lun;
u32 d_lu_wb_buf_alloc;
+ u32 ext_ufs_feature;
if (!ufshcd_is_wb_allowed(hba))
return;
@@ -7241,30 +7304,25 @@ static void ufshcd_wb_probe(struct ufs_hba *hba, u8 *desc_buf)
DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP + 4)
goto wb_disabled;
- dev_info->d_ext_ufs_feature_sup =
- get_unaligned_be32(desc_buf +
- DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
+ ext_ufs_feature = get_unaligned_be32(desc_buf +
+ DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
- if (!(dev_info->d_ext_ufs_feature_sup & UFS_DEV_WRITE_BOOSTER_SUP))
+ if (!(ext_ufs_feature & UFS_DEV_WRITE_BOOSTER_SUP))
goto wb_disabled;
/*
- * WB may be supported but not configured while provisioning.
- * The spec says, in dedicated wb buffer mode,
- * a max of 1 lun would have wb buffer configured.
- * Now only shared buffer mode is supported.
+ * WB may be supported but not configured while provisioning. The spec
+ * says, in dedicated wb buffer mode, a max of 1 lun would have wb
+ * buffer configured.
*/
- dev_info->b_wb_buffer_type =
- desc_buf[DEVICE_DESC_PARAM_WB_TYPE];
+ dev_info->wb_buffer_type = desc_buf[DEVICE_DESC_PARAM_WB_TYPE];
dev_info->b_presrv_uspc_en =
desc_buf[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN];
- if (dev_info->b_wb_buffer_type == WB_BUF_MODE_SHARED) {
- dev_info->d_wb_alloc_units =
- get_unaligned_be32(desc_buf +
- DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS);
- if (!dev_info->d_wb_alloc_units)
+ if (dev_info->wb_buffer_type == WB_BUF_MODE_SHARED) {
+ if (!get_unaligned_be32(desc_buf +
+ DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS))
goto wb_disabled;
} else {
for (lun = 0; lun < UFS_UPIU_MAX_WB_LUN_ID; lun++) {
@@ -7702,19 +7760,22 @@ static int ufshcd_add_lus(struct ufs_hba *hba)
if (ret)
goto out;
+ ufshcd_clear_ua_wluns(hba);
+
/* Initialize devfreq after UFS device is detected */
if (ufshcd_is_clkscaling_supported(hba)) {
memcpy(&hba->clk_scaling.saved_pwr_info.info,
&hba->pwr_info,
sizeof(struct ufs_pa_layer_attr));
hba->clk_scaling.saved_pwr_info.is_valid = true;
- if (!hba->devfreq) {
- ret = ufshcd_devfreq_init(hba);
- if (ret)
- goto out;
- }
-
hba->clk_scaling.is_allowed = true;
+
+ ret = ufshcd_devfreq_init(hba);
+ if (ret)
+ goto out;
+
+ hba->clk_scaling.is_enabled = true;
+ ufshcd_init_clk_scaling_sysfs(hba);
}
ufs_bsg_probe(hba);
@@ -7885,10 +7946,10 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie)
struct ufs_hba *hba = (struct ufs_hba *)data;
int ret;
- down(&hba->eh_sem);
+ down(&hba->host_sem);
/* Initialize hba, detect and initialize UFS device */
ret = ufshcd_probe_hba(hba, true);
- up(&hba->eh_sem);
+ up(&hba->host_sem);
if (ret)
goto out;
@@ -7901,10 +7962,7 @@ out:
*/
if (ret) {
pm_runtime_put_sync(hba->dev);
- ufshcd_exit_clk_scaling(hba);
ufshcd_hba_exit(hba);
- } else {
- ufshcd_clear_ua_wluns(hba);
}
}
@@ -8045,7 +8103,7 @@ static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
{
int ret = 0;
- if (!vreg || !vreg->enabled)
+ if (!vreg || !vreg->enabled || vreg->always_on)
goto out;
ret = regulator_disable(vreg->reg);
@@ -8315,6 +8373,8 @@ static int ufshcd_hba_init(struct ufs_hba *hba)
if (err)
goto out_disable_vreg;
+ ufs_debugfs_hba_init(hba);
+
hba->is_powered = true;
goto out;
@@ -8331,12 +8391,13 @@ out:
static void ufshcd_hba_exit(struct ufs_hba *hba)
{
if (hba->is_powered) {
+ ufshcd_exit_clk_scaling(hba);
+ ufshcd_exit_clk_gating(hba);
+ if (hba->eh_wq)
+ destroy_workqueue(hba->eh_wq);
+ ufs_debugfs_hba_exit(hba);
ufshcd_variant_hba_exit(hba);
ufshcd_setup_vreg(hba, false);
- ufshcd_suspend_clkscaling(hba);
- if (ufshcd_is_clkscaling_supported(hba))
- if (hba->devfreq)
- ufshcd_suspend_clkscaling(hba);
ufshcd_setup_clocks(hba, false);
ufshcd_setup_hba_vreg(hba, false);
hba->is_powered = false;
@@ -8414,13 +8475,7 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
* handling context.
*/
hba->host->eh_noresume = 1;
- if (hba->wlun_dev_clr_ua) {
- ret = ufshcd_send_request_sense(hba, sdp);
- if (ret)
- goto out;
- /* Unit attention condition is cleared now */
- hba->wlun_dev_clr_ua = false;
- }
+ ufshcd_clear_ua_wluns(hba);
cmd[4] = pwr_mode << 4;
@@ -8441,7 +8496,7 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
if (!ret)
hba->curr_dev_pwr_mode = pwr_mode;
-out:
+
scsi_device_put(sdp);
hba->host->eh_noresume = 0;
return ret;
@@ -8637,11 +8692,8 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
ufshcd_hold(hba, false);
hba->clk_gating.is_suspended = true;
- if (hba->clk_scaling.is_allowed) {
- cancel_work_sync(&hba->clk_scaling.suspend_work);
- cancel_work_sync(&hba->clk_scaling.resume_work);
- ufshcd_suspend_clkscaling(hba);
- }
+ if (ufshcd_is_clkscaling_supported(hba))
+ ufshcd_clk_scaling_suspend(hba, true);
if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
req_link_state == UIC_LINK_ACTIVE_STATE) {
@@ -8685,6 +8737,8 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
ufshcd_wb_need_flush(hba));
}
+ flush_work(&hba->eeh_work);
+
if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) {
if (!ufshcd_is_runtime_pm(pm_op))
/* ensure that bkops is disabled */
@@ -8697,8 +8751,6 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
}
}
- flush_work(&hba->eeh_work);
-
/*
* In the case of DeepSleep, the device is expected to remain powered
* with the link off, so do not check for bkops.
@@ -8708,8 +8760,6 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if (ret)
goto set_dev_active;
- ufshcd_vreg_set_lpm(hba);
-
disable_clks:
/*
* Call vendor specific suspend callback. As these callbacks may access
@@ -8733,13 +8783,13 @@ disable_clks:
hba->clk_gating.state);
}
+ ufshcd_vreg_set_lpm(hba);
+
/* Put the host controller in low power mode if possible */
ufshcd_hba_vreg_set_lpm(hba);
goto out;
set_link_active:
- if (hba->clk_scaling.is_allowed)
- ufshcd_resume_clkscaling(hba);
ufshcd_vreg_set_hpm(hba);
/*
* Device hardware reset is required to exit DeepSleep. Also, for
@@ -8747,7 +8797,7 @@ set_link_active:
* further below.
*/
if (ufshcd_is_ufs_dev_deepsleep(hba)) {
- ufshcd_vops_device_reset(hba);
+ ufshcd_device_reset(hba);
WARN_ON(!ufshcd_is_link_off(hba));
}
if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
@@ -8757,16 +8807,18 @@ set_link_active:
set_dev_active:
/* Can also get here needing to exit DeepSleep */
if (ufshcd_is_ufs_dev_deepsleep(hba)) {
- ufshcd_vops_device_reset(hba);
+ ufshcd_device_reset(hba);
ufshcd_host_reset_and_restore(hba);
}
if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
ufshcd_disable_auto_bkops(hba);
enable_gating:
- if (hba->clk_scaling.is_allowed)
- ufshcd_resume_clkscaling(hba);
+ if (ufshcd_is_clkscaling_supported(hba))
+ ufshcd_clk_scaling_suspend(hba, false);
+
hba->clk_gating.is_suspended = false;
hba->dev_info.b_rpm_dev_flush_capable = false;
+ ufshcd_clear_ua_wluns(hba);
ufshcd_release(hba);
out:
if (hba->dev_info.b_rpm_dev_flush_capable) {
@@ -8800,18 +8852,18 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
old_link_state = hba->uic_link_state;
ufshcd_hba_vreg_set_hpm(hba);
+ ret = ufshcd_vreg_set_hpm(hba);
+ if (ret)
+ goto out;
+
/* Make sure clocks are enabled before accessing controller */
ret = ufshcd_setup_clocks(hba, true);
if (ret)
- goto out;
+ goto disable_vreg;
/* enable the host irq as host controller would be active soon */
ufshcd_enable_irq(hba);
- ret = ufshcd_vreg_set_hpm(hba);
- if (ret)
- goto disable_irq_and_vops_clks;
-
/*
* Call vendor specific resume callback. As these callbacks may access
* vendor specific host controller register space call them when the
@@ -8819,7 +8871,7 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
*/
ret = ufshcd_vops_resume(hba, pm_op);
if (ret)
- goto disable_vreg;
+ goto disable_irq_and_vops_clks;
/* For DeepSleep, the only supported option is to have the link off */
WARN_ON(ufshcd_is_ufs_dev_deepsleep(hba) && !ufshcd_is_link_off(hba));
@@ -8866,8 +8918,8 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
hba->clk_gating.is_suspended = false;
- if (hba->clk_scaling.is_allowed)
- ufshcd_resume_clkscaling(hba);
+ if (ufshcd_is_clkscaling_supported(hba))
+ ufshcd_clk_scaling_suspend(hba, false);
/* Enable Auto-Hibernate if configured */
ufshcd_auto_hibern8_enable(hba);
@@ -8877,6 +8929,8 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
cancel_delayed_work(&hba->rpm_dev_flush_recheck_work);
}
+ ufshcd_clear_ua_wluns(hba);
+
/* Schedule clock gating in case of no access to UFS device yet */
ufshcd_release(hba);
@@ -8886,18 +8940,16 @@ set_old_link_state:
ufshcd_link_state_transition(hba, old_link_state, 0);
vendor_suspend:
ufshcd_vops_suspend(hba, pm_op);
-disable_vreg:
- ufshcd_vreg_set_lpm(hba);
disable_irq_and_vops_clks:
ufshcd_disable_irq(hba);
- if (hba->clk_scaling.is_allowed)
- ufshcd_suspend_clkscaling(hba);
ufshcd_setup_clocks(hba, false);
if (ufshcd_is_clkgating_allowed(hba)) {
hba->clk_gating.state = CLKS_OFF;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
}
+disable_vreg:
+ ufshcd_vreg_set_lpm(hba);
out:
hba->pm_op_in_progress = 0;
if (ret)
@@ -8918,14 +8970,21 @@ int ufshcd_system_suspend(struct ufs_hba *hba)
int ret = 0;
ktime_t start = ktime_get();
- down(&hba->eh_sem);
- if (!hba || !hba->is_powered)
+ if (!hba) {
+ early_suspend = true;
+ return 0;
+ }
+
+ down(&hba->host_sem);
+
+ if (!hba->is_powered)
return 0;
if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
hba->curr_dev_pwr_mode) &&
(ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
- hba->uic_link_state))
+ hba->uic_link_state) &&
+ !hba->dev_info.b_rpm_dev_flush_capable)
goto out;
if (pm_runtime_suspended(hba->dev)) {
@@ -8950,7 +9009,7 @@ out:
if (!ret)
hba->is_sys_suspended = true;
else
- up(&hba->eh_sem);
+ up(&hba->host_sem);
return ret;
}
EXPORT_SYMBOL(ufshcd_system_suspend);
@@ -8967,9 +9026,12 @@ int ufshcd_system_resume(struct ufs_hba *hba)
int ret = 0;
ktime_t start = ktime_get();
- if (!hba) {
- up(&hba->eh_sem);
+ if (!hba)
return -EINVAL;
+
+ if (unlikely(early_suspend)) {
+ early_suspend = false;
+ down(&hba->host_sem);
}
if (!hba->is_powered || pm_runtime_suspended(hba->dev))
@@ -8986,7 +9048,7 @@ out:
hba->curr_dev_pwr_mode, hba->uic_link_state);
if (!ret)
hba->is_sys_suspended = false;
- up(&hba->eh_sem);
+ up(&hba->host_sem);
return ret;
}
EXPORT_SYMBOL(ufshcd_system_resume);
@@ -9078,7 +9140,10 @@ int ufshcd_shutdown(struct ufs_hba *hba)
{
int ret = 0;
- down(&hba->eh_sem);
+ down(&hba->host_sem);
+ hba->shutting_down = true;
+ up(&hba->host_sem);
+
if (!hba->is_powered)
goto out;
@@ -9092,7 +9157,6 @@ out:
if (ret)
dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
hba->is_powered = false;
- up(&hba->eh_sem);
/* allow force shutdown even in case of errors */
return 0;
}
@@ -9111,15 +9175,9 @@ void ufshcd_remove(struct ufs_hba *hba)
blk_mq_free_tag_set(&hba->tmf_tag_set);
blk_cleanup_queue(hba->cmd_queue);
scsi_remove_host(hba->host);
- destroy_workqueue(hba->eh_wq);
/* disable interrupts */
ufshcd_disable_intr(hba, hba->intr_mask);
ufshcd_hba_stop(hba);
-
- ufshcd_exit_clk_scaling(hba);
- ufshcd_exit_clk_gating(hba);
- if (ufshcd_is_clkscaling_supported(hba))
- device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
ufshcd_hba_exit(hba);
}
EXPORT_SYMBOL_GPL(ufshcd_remove);
@@ -9130,7 +9188,6 @@ EXPORT_SYMBOL_GPL(ufshcd_remove);
*/
void ufshcd_dealloc_host(struct ufs_hba *hba)
{
- ufshcd_crypto_destroy_keyslot_manager(hba);
scsi_host_put(hba->host);
}
EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
@@ -9288,7 +9345,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
INIT_WORK(&hba->eh_work, ufshcd_err_handler);
INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
- sema_init(&hba->eh_sem, 1);
+ sema_init(&hba->host_sem, 1);
/* Initialize UIC command mutex */
mutex_init(&hba->uic_cmd_mutex);
@@ -9320,7 +9377,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
if (err) {
dev_err(hba->dev, "request irq failed\n");
- goto exit_gating;
+ goto out_disable;
} else {
hba->is_irq_enabled = true;
}
@@ -9328,7 +9385,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
err = scsi_add_host(host, hba->dev);
if (err) {
dev_err(hba->dev, "scsi_add_host failed\n");
- goto exit_gating;
+ goto out_disable;
}
hba->cmd_queue = blk_mq_init_queue(&hba->host->tag_set);
@@ -9353,7 +9410,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
}
/* Reset the attached device */
- ufshcd_vops_device_reset(hba);
+ ufshcd_device_reset(hba);
ufshcd_init_crypto(hba);
@@ -9411,10 +9468,6 @@ free_cmd_queue:
blk_cleanup_queue(hba->cmd_queue);
out_remove_scsi_host:
scsi_remove_host(hba->host);
-exit_gating:
- ufshcd_exit_clk_scaling(hba);
- ufshcd_exit_clk_gating(hba);
- destroy_workqueue(hba->eh_wq);
out_disable:
hba->is_irq_enabled = false;
ufshcd_hba_exit(hba);
@@ -9423,6 +9476,20 @@ out_error:
}
EXPORT_SYMBOL_GPL(ufshcd_init);
+static int __init ufshcd_core_init(void)
+{
+ ufs_debugfs_init();
+ return 0;
+}
+
+static void __exit ufshcd_core_exit(void)
+{
+ ufs_debugfs_exit();
+}
+
+module_init(ufshcd_core_init);
+module_exit(ufshcd_core_exit);
+
MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
MODULE_DESCRIPTION("Generic UFS host controller driver Core");