aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/ufs/ufshcd.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/ufs/ufshcd.c')
-rw-r--r--drivers/scsi/ufs/ufshcd.c634
1 files changed, 348 insertions, 286 deletions
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 708b3b62fc4d..3841ab49f556 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -17,15 +17,18 @@
#include <linux/blk-pm.h>
#include <linux/blkdev.h>
#include <scsi/scsi_driver.h>
+#include <scsi/scsi_transport.h>
+#include "../scsi_transport_api.h"
#include "ufshcd.h"
#include "ufs_quirks.h"
#include "unipro.h"
#include "ufs-sysfs.h"
#include "ufs-debugfs.h"
+#include "ufs-fault-injection.h"
#include "ufs_bsg.h"
#include "ufshcd-crypto.h"
+#include "ufshpb.h"
#include <asm/unaligned.h>
-#include "../sd.h"
#define CREATE_TRACE_POINTS
#include <trace/events/ufs.h>
@@ -128,15 +131,6 @@ enum {
UFSHCD_CAN_QUEUE = 32,
};
-/* UFSHCD states */
-enum {
- UFSHCD_STATE_RESET,
- UFSHCD_STATE_ERROR,
- UFSHCD_STATE_OPERATIONAL,
- UFSHCD_STATE_EH_SCHEDULED_FATAL,
- UFSHCD_STATE_EH_SCHEDULED_NON_FATAL,
-};
-
/* UFSHCD error handling flags */
enum {
UFSHCD_EH_IN_PROGRESS = (1 << 0),
@@ -205,7 +199,8 @@ ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
static struct ufs_dev_fix ufs_fixups[] = {
/* UFS cards deviations table */
UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL,
- UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
+ UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
+ UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ),
UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE |
@@ -242,7 +237,6 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
static irqreturn_t ufshcd_intr(int irq, void *__hba);
static int ufshcd_change_power_mode(struct ufs_hba *hba,
struct ufs_pa_layer_attr *pwr_mode);
-static void ufshcd_schedule_eh_work(struct ufs_hba *hba);
static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);
static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
@@ -253,11 +247,6 @@ static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable);
static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
-static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
-{
- return tag >= 0 && tag < hba->nutrs;
-}
-
static inline void ufshcd_enable_irq(struct ufs_hba *hba)
{
if (!hba->is_irq_enabled) {
@@ -371,26 +360,24 @@ static void ufshcd_add_uic_command_trace(struct ufs_hba *hba,
static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
enum ufs_trace_str_t str_t)
{
- u64 lba = -1;
+ u64 lba;
u8 opcode = 0, group_id = 0;
u32 intr, doorbell;
struct ufshcd_lrb *lrbp = &hba->lrb[tag];
struct scsi_cmnd *cmd = lrbp->cmd;
+ struct request *rq = scsi_cmd_to_rq(cmd);
int transfer_len = -1;
if (!cmd)
return;
- if (!trace_ufshcd_command_enabled()) {
- /* trace UPIU W/O tracing command */
- ufshcd_add_cmd_upiu_trace(hba, tag, str_t);
- return;
- }
-
/* trace UPIU also */
ufshcd_add_cmd_upiu_trace(hba, tag, str_t);
+ if (!trace_ufshcd_command_enabled())
+ return;
+
opcode = cmd->cmnd[0];
- lba = sectors_to_logical(cmd->device, blk_rq_pos(cmd->request));
+ lba = scsi_get_lba(cmd);
if (opcode == READ_10 || opcode == WRITE_10) {
/*
@@ -404,7 +391,7 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
/*
* The number of Bytes to be unmapped beginning with the lba.
*/
- transfer_len = blk_rq_bytes(cmd->request);
+ transfer_len = blk_rq_bytes(rq);
}
intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
@@ -758,16 +745,6 @@ static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
}
/**
- * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
- * @hba: per adapter instance
- * @tag: position of the bit to be cleared
- */
-static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
-{
- clear_bit(tag, &hba->outstanding_reqs);
-}
-
-/**
* ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
* @reg: Register value of host controller status
*
@@ -2078,7 +2055,7 @@ static void ufshcd_update_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
spin_lock_irqsave(hba->host->host_lock, flags);
if (dir >= 0 && hba->monitor.nr_queued[dir] > 0) {
- struct request *req = lrbp->cmd->request;
+ struct request *req = scsi_cmd_to_rq(lrbp->cmd);
struct ufs_hba_monitor *m = &hba->monitor;
ktime_t now, inc, lat;
@@ -2112,27 +2089,22 @@ static inline
void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
{
struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
+ unsigned long flags;
lrbp->issue_time_stamp = ktime_get();
lrbp->compl_time_stamp = ktime_set(0, 0);
- ufshcd_vops_setup_xfer_req(hba, task_tag, (lrbp->cmd ? true : false));
ufshcd_add_command_trace(hba, task_tag, UFS_CMD_SEND);
ufshcd_clk_scaling_start_busy(hba);
if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
ufshcd_start_monitor(hba, lrbp);
- if (ufshcd_has_utrlcnr(hba)) {
- set_bit(task_tag, &hba->outstanding_reqs);
- ufshcd_writel(hba, 1 << task_tag,
- REG_UTP_TRANSFER_REQ_DOOR_BELL);
- } else {
- unsigned long flags;
- spin_lock_irqsave(hba->host->host_lock, flags);
- set_bit(task_tag, &hba->outstanding_reqs);
- ufshcd_writel(hba, 1 << task_tag,
- REG_UTP_TRANSFER_REQ_DOOR_BELL);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- }
+ spin_lock_irqsave(&hba->outstanding_lock, flags);
+ if (hba->vops && hba->vops->setup_xfer_req)
+ hba->vops->setup_xfer_req(hba, task_tag, !!lrbp->cmd);
+ __set_bit(task_tag, &hba->outstanding_reqs);
+ ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ spin_unlock_irqrestore(&hba->outstanding_lock, flags);
+
/* Make sure that doorbell is committed immediately */
wmb();
}
@@ -2247,15 +2219,15 @@ static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
}
/**
- * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
+ * ufshcd_dispatch_uic_cmd - Dispatch an UIC command to the Unipro layer
* @hba: per adapter instance
* @uic_cmd: UIC command
- *
- * Mutex must be held.
*/
static inline void
ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
{
+ lockdep_assert_held(&hba->uic_cmd_mutex);
+
WARN_ON(hba->active_uic_cmd);
hba->active_uic_cmd = uic_cmd;
@@ -2273,11 +2245,10 @@ ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
}
/**
- * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
+ * ufshcd_wait_for_uic_cmd - Wait for completion of an UIC command
* @hba: per adapter instance
* @uic_cmd: UIC command
*
- * Must be called with mutex held.
* Returns 0 only if success.
*/
static int
@@ -2286,6 +2257,8 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
int ret;
unsigned long flags;
+ lockdep_assert_held(&hba->uic_cmd_mutex);
+
if (wait_for_completion_timeout(&uic_cmd->done,
msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
@@ -2315,14 +2288,15 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
* @uic_cmd: UIC command
* @completion: initialize the completion only if this is set to true
*
- * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
- * with mutex held and host_lock locked.
* Returns 0 only if success.
*/
static int
__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
bool completion)
{
+ lockdep_assert_held(&hba->uic_cmd_mutex);
+ lockdep_assert_held(hba->host->host_lock);
+
if (!ufshcd_ready_for_uic_cmd(hba)) {
dev_err(hba->dev,
"Controller not ready to accept UIC commands\n");
@@ -2701,20 +2675,12 @@ static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
*/
static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
{
+ struct ufs_hba *hba = shost_priv(host);
+ int tag = scsi_cmd_to_rq(cmd)->tag;
struct ufshcd_lrb *lrbp;
- struct ufs_hba *hba;
- int tag;
int err = 0;
- hba = shost_priv(host);
-
- tag = cmd->request->tag;
- if (!ufshcd_valid_tag(hba, tag)) {
- dev_err(hba->dev,
- "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
- __func__, tag, cmd, cmd->request);
- BUG();
- }
+ WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
if (!down_read_trylock(&hba->clk_scaling_lock))
return SCSI_MLQUEUE_HOST_BUSY;
@@ -2748,12 +2714,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
set_host_byte(cmd, DID_ERROR);
cmd->scsi_done(cmd);
goto out;
- default:
- dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
- __func__, hba->ufshcd_state);
- set_host_byte(cmd, DID_BAD_TARGET);
- cmd->scsi_done(cmd);
- goto out;
}
hba->req_abort_count = 0;
@@ -2766,15 +2726,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
WARN_ON(ufshcd_is_clkgating_allowed(hba) &&
(hba->clk_gating.state != CLKS_ON));
- if (unlikely(test_bit(tag, &hba->outstanding_reqs))) {
- if (hba->pm_op_in_progress)
- set_host_byte(cmd, DID_BAD_TARGET);
- else
- err = SCSI_MLQUEUE_HOST_BUSY;
- ufshcd_release(hba);
- goto out;
- }
-
lrbp = &hba->lrb[tag];
WARN_ON(lrbp->cmd);
lrbp->cmd = cmd;
@@ -2784,10 +2735,17 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
- ufshcd_prepare_lrbp_crypto(cmd->request, lrbp);
+ ufshcd_prepare_lrbp_crypto(scsi_cmd_to_rq(cmd), lrbp);
lrbp->req_abort_skip = false;
+ err = ufshpb_prep(hba, lrbp);
+ if (err == -EAGAIN) {
+ lrbp->cmd = NULL;
+ ufshcd_release(hba);
+ goto out;
+ }
+
ufshcd_comp_scsi_upiu(hba, lrbp);
err = ufshcd_map_sg(hba, lrbp);
@@ -2796,12 +2754,14 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
ufshcd_release(hba);
goto out;
}
- /* Make sure descriptors are ready before ringing the doorbell */
- wmb();
ufshcd_send_command(hba, tag);
out:
up_read(&hba->clk_scaling_lock);
+
+ if (ufs_trigger_eh())
+ scsi_schedule_eh(hba->host);
+
return err;
}
@@ -2907,8 +2867,6 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
msecs_to_jiffies(max_timeout));
- /* Make sure descriptors are ready before ringing the doorbell */
- wmb();
spin_lock_irqsave(hba->host->host_lock, flags);
hba->dev_cmd.complete = NULL;
if (likely(time_left)) {
@@ -2930,7 +2888,9 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
* we also need to clear the outstanding_request
* field in hba
*/
- ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
+ spin_lock_irqsave(&hba->outstanding_lock, flags);
+ __clear_bit(lrbp->task_tag, &hba->outstanding_reqs);
+ spin_unlock_irqrestore(&hba->outstanding_lock, flags);
}
return err;
@@ -2949,11 +2909,11 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
enum dev_cmd_type cmd_type, int timeout)
{
struct request_queue *q = hba->cmd_queue;
+ DECLARE_COMPLETION_ONSTACK(wait);
struct request *req;
struct ufshcd_lrb *lrbp;
int err;
int tag;
- struct completion wait;
down_read(&hba->clk_scaling_lock);
@@ -2968,17 +2928,11 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
goto out_unlock;
}
tag = req->tag;
- WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
+ WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
/* Set the timeout such that the SCSI error handler is not activated. */
req->timeout = msecs_to_jiffies(2 * timeout);
blk_mq_start_request(req);
- if (unlikely(test_bit(tag, &hba->outstanding_reqs))) {
- err = -EBUSY;
- goto out;
- }
-
- init_completion(&wait);
lrbp = &hba->lrb[tag];
WARN_ON(lrbp->cmd);
err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
@@ -2988,8 +2942,6 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
hba->dev_cmd.complete = &wait;
ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
- /* Make sure descriptors are ready before ringing the doorbell */
- wmb();
ufshcd_send_command(hba, tag);
err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
@@ -3194,7 +3146,7 @@ out_unlock:
*
* Returns 0 for success, non-zero in case of failure
*/
-static int ufshcd_query_attr_retry(struct ufs_hba *hba,
+int ufshcd_query_attr_retry(struct ufs_hba *hba,
enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
u32 *attr_val)
{
@@ -3419,9 +3371,11 @@ int ufshcd_read_desc_param(struct ufs_hba *hba,
if (is_kmalloc) {
/* Make sure we don't copy more data than available */
- if (param_offset + param_size > buff_len)
- param_size = buff_len - param_offset;
- memcpy(param_read_buf, &desc_buf[param_offset], param_size);
+ if (param_offset >= buff_len)
+ ret = -EINVAL;
+ else
+ memcpy(param_read_buf, &desc_buf[param_offset],
+ min_t(u32, param_size, buff_len - param_offset));
}
out:
if (is_kmalloc)
@@ -3965,6 +3919,35 @@ out:
}
EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
+static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba)
+{
+ lockdep_assert_held(hba->host->host_lock);
+
+ return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) ||
+ (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK));
+}
+
+static void ufshcd_schedule_eh(struct ufs_hba *hba)
+{
+ bool schedule_eh = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ /* handle fatal errors only when link is not in error state */
+ if (hba->ufshcd_state != UFSHCD_STATE_ERROR) {
+ if (hba->force_reset || ufshcd_is_link_broken(hba) ||
+ ufshcd_is_saved_err_fatal(hba))
+ hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL;
+ else
+ hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL;
+ schedule_eh = true;
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ if (schedule_eh)
+ scsi_schedule_eh(hba->host);
+}
+
/**
* ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
* state) and waits for it to take effect.
@@ -3983,14 +3966,14 @@ EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
*/
static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
{
- struct completion uic_async_done;
+ DECLARE_COMPLETION_ONSTACK(uic_async_done);
unsigned long flags;
+ bool schedule_eh = false;
u8 status;
int ret;
bool reenable_intr = false;
mutex_lock(&hba->uic_cmd_mutex);
- init_completion(&uic_async_done);
ufshcd_add_delay_before_dme_cmd(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -4055,10 +4038,14 @@ out:
ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
if (ret) {
ufshcd_set_link_broken(hba);
- ufshcd_schedule_eh_work(hba);
+ schedule_eh = true;
}
+
out_unlock:
spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ if (schedule_eh)
+ ufshcd_schedule_eh(hba);
mutex_unlock(&hba->uic_cmd_mutex);
return ret;
@@ -4987,6 +4974,26 @@ static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
return scsi_change_queue_depth(sdev, depth);
}
+static void ufshcd_hpb_destroy(struct ufs_hba *hba, struct scsi_device *sdev)
+{
+ /* skip well-known LU */
+ if ((sdev->lun >= UFS_UPIU_MAX_UNIT_NUM_ID) ||
+ !(hba->dev_info.hpb_enabled) || !ufshpb_is_allowed(hba))
+ return;
+
+ ufshpb_destroy_lu(hba, sdev);
+}
+
+static void ufshcd_hpb_configure(struct ufs_hba *hba, struct scsi_device *sdev)
+{
+ /* skip well-known LU */
+ if ((sdev->lun >= UFS_UPIU_MAX_UNIT_NUM_ID) ||
+ !(hba->dev_info.hpb_enabled) || !ufshpb_is_allowed(hba))
+ return;
+
+ ufshpb_init_hpb_lu(hba, sdev);
+}
+
/**
* ufshcd_slave_configure - adjust SCSI device configurations
* @sdev: pointer to SCSI device
@@ -4996,6 +5003,8 @@ static int ufshcd_slave_configure(struct scsi_device *sdev)
struct ufs_hba *hba = shost_priv(sdev->host);
struct request_queue *q = sdev->request_queue;
+ ufshcd_hpb_configure(hba, sdev);
+
blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
if (hba->quirks & UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE)
blk_queue_update_dma_alignment(q, PAGE_SIZE - 1);
@@ -5020,15 +5029,37 @@ static int ufshcd_slave_configure(struct scsi_device *sdev)
static void ufshcd_slave_destroy(struct scsi_device *sdev)
{
struct ufs_hba *hba;
+ unsigned long flags;
hba = shost_priv(sdev->host);
+
+ ufshcd_hpb_destroy(hba, sdev);
+
/* Drop the reference as it won't be needed anymore */
if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
- unsigned long flags;
-
spin_lock_irqsave(hba->host->host_lock, flags);
hba->sdev_ufs_device = NULL;
spin_unlock_irqrestore(hba->host->host_lock, flags);
+ } else if (hba->sdev_ufs_device) {
+ struct device *supplier = NULL;
+
+ /* Ensure UFS Device WLUN exists and does not disappear */
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (hba->sdev_ufs_device) {
+ supplier = &hba->sdev_ufs_device->sdev_gendev;
+ get_device(supplier);
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ if (supplier) {
+ /*
+ * If a LUN fails to probe (e.g. absent BOOT WLUN), the
+ * device will not have been registered but can still
+ * have a device link holding a reference to the device.
+ */
+ device_link_remove(&sdev->sdev_gendev, supplier);
+ put_device(supplier);
+ }
}
}
@@ -5124,6 +5155,9 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
/* Flushed in suspend */
schedule_work(&hba->eeh_work);
+
+ if (scsi_status == SAM_STAT_GOOD)
+ ufshpb_rsp_upiu(hba, lrbp);
break;
case UPIU_TRANSACTION_REJECT_UPIU:
/* TODO: handle Reject UPIU Response */
@@ -5232,10 +5266,12 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
/**
* __ufshcd_transfer_req_compl - handle SCSI and query command completion
* @hba: per adapter instance
- * @completed_reqs: requests to complete
+ * @completed_reqs: bitmask that indicates which requests to complete
+ * @retry_requests: whether to ask the SCSI core to retry completed requests
*/
static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
- unsigned long completed_reqs)
+ unsigned long completed_reqs,
+ bool retry_requests)
{
struct ufshcd_lrb *lrbp;
struct scsi_cmnd *cmd;
@@ -5244,8 +5280,6 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
bool update_scaling = false;
for_each_set_bit(index, &completed_reqs, hba->nutrs) {
- if (!test_and_clear_bit(index, &hba->outstanding_reqs))
- continue;
lrbp = &hba->lrb[index];
lrbp->compl_time_stamp = ktime_get();
cmd = lrbp->cmd;
@@ -5253,7 +5287,8 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
ufshcd_update_monitor(hba, lrbp);
ufshcd_add_command_trace(hba, index, UFS_CMD_COMP);
- result = ufshcd_transfer_rsp_status(hba, lrbp);
+ result = retry_requests ? DID_BUS_BUSY << 16 :
+ ufshcd_transfer_rsp_status(hba, lrbp);
scsi_dma_unmap(cmd);
cmd->result = result;
/* Mark completed command as NULL in LRB */
@@ -5277,17 +5312,19 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
}
/**
- * ufshcd_trc_handler - handle transfer requests completion
+ * ufshcd_transfer_req_compl - handle SCSI and query command completion
* @hba: per adapter instance
- * @use_utrlcnr: get completed requests from UTRLCNR
+ * @retry_requests: whether or not to ask to retry requests
*
* Returns
* IRQ_HANDLED - If interrupt is valid
* IRQ_NONE - If invalid interrupt
*/
-static irqreturn_t ufshcd_trc_handler(struct ufs_hba *hba, bool use_utrlcnr)
+static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba,
+ bool retry_requests)
{
- unsigned long completed_reqs = 0;
+ unsigned long completed_reqs, flags;
+ u32 tr_doorbell;
/* Resetting interrupt aggregation counters first and reading the
* DOOR_BELL afterward allows us to handle all the completed requests.
@@ -5300,27 +5337,21 @@ static irqreturn_t ufshcd_trc_handler(struct ufs_hba *hba, bool use_utrlcnr)
!(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
ufshcd_reset_intr_aggr(hba);
- if (use_utrlcnr) {
- u32 utrlcnr;
-
- utrlcnr = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_LIST_COMPL);
- if (utrlcnr) {
- ufshcd_writel(hba, utrlcnr,
- REG_UTP_TRANSFER_REQ_LIST_COMPL);
- completed_reqs = utrlcnr;
- }
- } else {
- unsigned long flags;
- u32 tr_doorbell;
+ if (ufs_fail_completion())
+ return IRQ_HANDLED;
- spin_lock_irqsave(hba->host->host_lock, flags);
- tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
- completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- }
+ spin_lock_irqsave(&hba->outstanding_lock, flags);
+ tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ completed_reqs = ~tr_doorbell & hba->outstanding_reqs;
+ WARN_ONCE(completed_reqs & ~hba->outstanding_reqs,
+ "completed: %#lx; outstanding: %#lx\n", completed_reqs,
+ hba->outstanding_reqs);
+ hba->outstanding_reqs &= ~completed_reqs;
+ spin_unlock_irqrestore(&hba->outstanding_lock, flags);
if (completed_reqs) {
- __ufshcd_transfer_req_compl(hba, completed_reqs);
+ __ufshcd_transfer_req_compl(hba, completed_reqs,
+ retry_requests);
return IRQ_HANDLED;
} else {
return IRQ_NONE;
@@ -5799,7 +5830,13 @@ out:
/* Complete requests that have door-bell cleared */
static void ufshcd_complete_requests(struct ufs_hba *hba)
{
- ufshcd_trc_handler(hba, false);
+ ufshcd_transfer_req_compl(hba, /*retry_requests=*/false);
+ ufshcd_tmc_handler(hba);
+}
+
+static void ufshcd_retry_aborted_requests(struct ufs_hba *hba)
+{
+ ufshcd_transfer_req_compl(hba, /*retry_requests=*/true);
ufshcd_tmc_handler(hba);
}
@@ -5874,27 +5911,6 @@ out:
return err_handling;
}
-/* host lock must be held before calling this func */
-static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba)
-{
- return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) ||
- (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK));
-}
-
-/* host lock must be held before calling this func */
-static inline void ufshcd_schedule_eh_work(struct ufs_hba *hba)
-{
- /* handle fatal errors only when link is not in error state */
- if (hba->ufshcd_state != UFSHCD_STATE_ERROR) {
- if (hba->force_reset || ufshcd_is_link_broken(hba) ||
- ufshcd_is_saved_err_fatal(hba))
- hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL;
- else
- hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL;
- queue_work(hba->eh_wq, &hba->eh_work);
- }
-}
-
static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
{
down_write(&hba->clk_scaling_lock);
@@ -6028,11 +6044,11 @@ static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba *hba)
/**
* ufshcd_err_handler - handle UFS errors that require s/w attention
- * @work: pointer to work structure
+ * @host: SCSI host pointer
*/
-static void ufshcd_err_handler(struct work_struct *work)
+static void ufshcd_err_handler(struct Scsi_Host *host)
{
- struct ufs_hba *hba;
+ struct ufs_hba *hba = shost_priv(host);
unsigned long flags;
bool err_xfer = false;
bool err_tm = false;
@@ -6040,10 +6056,9 @@ static void ufshcd_err_handler(struct work_struct *work)
int tag;
bool needs_reset = false, needs_restore = false;
- hba = container_of(work, struct ufs_hba, eh_work);
-
down(&hba->host_sem);
spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->host->host_eh_scheduled = 0;
if (ufshcd_err_handling_should_stop(hba)) {
if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
@@ -6141,8 +6156,7 @@ static void ufshcd_err_handler(struct work_struct *work)
}
lock_skip_pending_xfer_clear:
- /* Complete the requests that are cleared by s/w */
- ufshcd_complete_requests(hba);
+ ufshcd_retry_aborted_requests(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
hba->silence_err_logs = false;
@@ -6357,7 +6371,6 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
"host_regs: ");
ufshcd_print_pwr_info(hba);
}
- ufshcd_schedule_eh_work(hba);
retval |= IRQ_HANDLED;
}
/*
@@ -6369,6 +6382,10 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
hba->errors = 0;
hba->uic_error = 0;
spin_unlock(hba->host->host_lock);
+
+ if (queue_eh_work)
+ ufshcd_schedule_eh(hba);
+
return retval;
}
@@ -6440,7 +6457,7 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
retval |= ufshcd_tmc_handler(hba);
if (intr_status & UTP_TRANSFER_REQ_COMPL)
- retval |= ufshcd_trc_handler(hba, ufshcd_has_utrlcnr(hba));
+ retval |= ufshcd_transfer_req_compl(hba, /*retry_requests=*/false);
return retval;
}
@@ -6548,9 +6565,6 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
/* send command to the controller */
__set_bit(task_tag, &hba->outstanding_tasks);
- /* Make sure descriptors are ready before ringing the task doorbell */
- wmb();
-
ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL);
/* Make sure that doorbell is committed immediately */
wmb();
@@ -6663,11 +6677,11 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
enum query_opcode desc_op)
{
struct request_queue *q = hba->cmd_queue;
+ DECLARE_COMPLETION_ONSTACK(wait);
struct request *req;
struct ufshcd_lrb *lrbp;
int err = 0;
int tag;
- struct completion wait;
u8 upiu_flags;
down_read(&hba->clk_scaling_lock);
@@ -6678,14 +6692,13 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
goto out_unlock;
}
tag = req->tag;
- WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
+ WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
if (unlikely(test_bit(tag, &hba->outstanding_reqs))) {
err = -EBUSY;
goto out;
}
- init_completion(&wait);
lrbp = &hba->lrb[tag];
WARN_ON(lrbp->cmd);
lrbp->cmd = NULL;
@@ -6723,8 +6736,6 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
hba->dev_cmd.complete = &wait;
ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
- /* Make sure descriptors are ready before ringing the doorbell */
- wmb();
ufshcd_send_command(hba, tag);
/*
@@ -6865,7 +6876,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
err = ufshcd_clear_cmd(hba, pos);
if (err)
break;
- __ufshcd_transfer_req_compl(hba, pos);
+ __ufshcd_transfer_req_compl(hba, pos, /*retry_requests=*/true);
}
}
@@ -6981,33 +6992,24 @@ out:
*/
static int ufshcd_abort(struct scsi_cmnd *cmd)
{
- struct Scsi_Host *host;
- struct ufs_hba *hba;
+ struct Scsi_Host *host = cmd->device->host;
+ struct ufs_hba *hba = shost_priv(host);
+ int tag = scsi_cmd_to_rq(cmd)->tag;
+ struct ufshcd_lrb *lrbp = &hba->lrb[tag];
unsigned long flags;
- unsigned int tag;
- int err = 0;
- struct ufshcd_lrb *lrbp;
+ int err = FAILED;
u32 reg;
- host = cmd->device->host;
- hba = shost_priv(host);
- tag = cmd->request->tag;
- lrbp = &hba->lrb[tag];
- if (!ufshcd_valid_tag(hba, tag)) {
- dev_err(hba->dev,
- "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
- __func__, tag, cmd, cmd->request);
- BUG();
- }
+ WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
ufshcd_hold(hba, false);
reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
- /* If command is already aborted/completed, return SUCCESS */
+ /* If command is already aborted/completed, return FAILED. */
if (!(test_bit(tag, &hba->outstanding_reqs))) {
dev_err(hba->dev,
"%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
__func__, tag, hba->outstanding_reqs, reg);
- goto out;
+ goto release;
}
/* Print Transfer Request of aborted task */
@@ -7036,7 +7038,8 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
dev_err(hba->dev,
"%s: cmd was completed, but without a notifying intr, tag = %d",
__func__, tag);
- goto cleanup;
+ __ufshcd_transfer_req_compl(hba, 1UL << tag, /*retry_requests=*/false);
+ goto release;
}
/*
@@ -7045,40 +7048,39 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
* will be to send LU reset which, again, is a spec violation.
* To avoid these unnecessary/illegal steps, first we clean up
* the lrb taken by this cmd and re-set it in outstanding_reqs,
- * then queue the eh_work and bail.
+ * then queue the error handler and bail.
*/
if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) {
ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun);
- __ufshcd_transfer_req_compl(hba, (1UL << tag));
- set_bit(tag, &hba->outstanding_reqs);
+
spin_lock_irqsave(host->host_lock, flags);
hba->force_reset = true;
- ufshcd_schedule_eh_work(hba);
spin_unlock_irqrestore(host->host_lock, flags);
- goto out;
+
+ ufshcd_schedule_eh(hba);
+
+ goto release;
}
/* Skip task abort in case previous aborts failed and report failure */
- if (lrbp->req_abort_skip)
- err = -EIO;
- else
- err = ufshcd_try_to_abort_task(hba, tag);
+ if (lrbp->req_abort_skip) {
+ dev_err(hba->dev, "%s: skipping abort\n", __func__);
+ ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
+ goto release;
+ }
- if (!err) {
-cleanup:
- __ufshcd_transfer_req_compl(hba, (1UL << tag));
-out:
- err = SUCCESS;
- } else {
+ err = ufshcd_try_to_abort_task(hba, tag);
+ if (err) {
dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
err = FAILED;
+ goto release;
}
- /*
- * This ufshcd_release() corresponds to the original scsi cmd that got
- * aborted here (as we won't get any IRQ for it).
- */
+ err = SUCCESS;
+
+release:
+ /* Matches the ufshcd_hold() call at the start of this function. */
ufshcd_release(hba);
return err;
}
@@ -7101,9 +7103,10 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
* Stop the host controller and complete the requests
* cleared by h/w
*/
+ ufshpb_reset_host(hba);
ufshcd_hba_stop(hba);
hba->silence_err_logs = true;
- ufshcd_complete_requests(hba);
+ ufshcd_retry_aborted_requests(hba);
hba->silence_err_logs = false;
/* scale up clocks to max frequency before full reinitialization */
@@ -7188,11 +7191,10 @@ static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
spin_lock_irqsave(hba->host->host_lock, flags);
hba->force_reset = true;
- ufshcd_schedule_eh_work(hba);
dev_err(hba->dev, "%s: reset in progress - 1\n", __func__);
spin_unlock_irqrestore(hba->host->host_lock, flags);
- flush_work(&hba->eh_work);
+ ufshcd_err_handler(hba->host);
spin_lock_irqsave(hba->host->host_lock, flags);
if (hba->ufshcd_state == UFSHCD_STATE_ERROR)
@@ -7499,6 +7501,7 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
{
int err;
u8 model_index;
+ u8 b_ufs_feature_sup;
u8 *desc_buf;
struct ufs_dev_info *dev_info = &hba->dev_info;
@@ -7526,9 +7529,26 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
/* getting Specification Version in big endian format */
dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 |
desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
+ b_ufs_feature_sup = desc_buf[DEVICE_DESC_PARAM_UFS_FEAT];
model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
+ if (dev_info->wspecversion >= UFS_DEV_HPB_SUPPORT_VERSION &&
+ (b_ufs_feature_sup & UFS_DEV_HPB_SUPPORT)) {
+ bool hpb_en = false;
+
+ ufshpb_get_dev_info(hba, desc_buf);
+
+ if (!ufshpb_is_legacy(hba))
+ err = ufshcd_query_flag_retry(hba,
+ UPIU_QUERY_OPCODE_READ_FLAG,
+ QUERY_FLAG_IDN_HPB_EN, 0,
+ &hpb_en);
+
+ if (ufshpb_is_legacy(hba) || (!err && hpb_en))
+ dev_info->hpb_enabled = true;
+ }
+
err = ufshcd_read_string_desc(hba, model_index,
&dev_info->model, SD_ASCII_STD);
if (err < 0) {
@@ -7760,6 +7780,10 @@ static int ufshcd_device_geo_params_init(struct ufs_hba *hba)
else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0)
hba->dev_info.max_lu_supported = 8;
+ if (hba->desc_size[QUERY_DESC_IDN_GEOMETRY] >=
+ GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS)
+ ufshpb_get_geo_info(hba, desc_buf);
+
out:
kfree(desc_buf);
return err;
@@ -7902,6 +7926,7 @@ static int ufshcd_add_lus(struct ufs_hba *hba)
}
ufs_bsg_probe(hba);
+ ufshpb_init(hba);
scsi_scan_host(hba->host);
pm_runtime_put_sync(hba->dev);
@@ -7909,8 +7934,61 @@ out:
return ret;
}
+static void ufshcd_request_sense_done(struct request *rq, blk_status_t error)
+{
+ if (error != BLK_STS_OK)
+ pr_err("%s: REQUEST SENSE failed (%d)\n", __func__, error);
+ kfree(rq->end_io_data);
+ blk_put_request(rq);
+}
+
static int
-ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp);
+ufshcd_request_sense_async(struct ufs_hba *hba, struct scsi_device *sdev)
+{
+ /*
+ * Some UFS devices clear unit attention condition only if the sense
+ * size used (UFS_SENSE_SIZE in this case) is non-zero.
+ */
+ static const u8 cmd[6] = {REQUEST_SENSE, 0, 0, 0, UFS_SENSE_SIZE, 0};
+ struct scsi_request *rq;
+ struct request *req;
+ char *buffer;
+ int ret;
+
+ buffer = kzalloc(UFS_SENSE_SIZE, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ req = blk_get_request(sdev->request_queue, REQ_OP_DRV_IN,
+ /*flags=*/BLK_MQ_REQ_PM);
+ if (IS_ERR(req)) {
+ ret = PTR_ERR(req);
+ goto out_free;
+ }
+
+ ret = blk_rq_map_kern(sdev->request_queue, req,
+ buffer, UFS_SENSE_SIZE, GFP_NOIO);
+ if (ret)
+ goto out_put;
+
+ rq = scsi_req(req);
+ rq->cmd_len = ARRAY_SIZE(cmd);
+ memcpy(rq->cmd, cmd, rq->cmd_len);
+ rq->retries = 3;
+ req->timeout = 1 * HZ;
+ req->rq_flags |= RQF_PM | RQF_QUIET;
+ req->end_io_data = buffer;
+
+ blk_execute_rq_nowait(/*bd_disk=*/NULL, req, /*at_head=*/true,
+ ufshcd_request_sense_done);
+ return 0;
+
+out_put:
+ blk_put_request(req);
+out_free:
+ kfree(buffer);
+ return ret;
+}
static int ufshcd_clear_ua_wlun(struct ufs_hba *hba, u8 wlun)
{
@@ -7938,7 +8016,7 @@ static int ufshcd_clear_ua_wlun(struct ufs_hba *hba, u8 wlun)
if (ret)
goto out_err;
- ret = ufshcd_send_request_sense(hba, sdp);
+ ret = ufshcd_request_sense_async(hba, sdp);
scsi_device_put(sdp);
out_err:
if (ret)
@@ -7967,13 +8045,13 @@ out:
}
/**
- * ufshcd_probe_hba - probe hba to detect device and initialize
+ * ufshcd_probe_hba - probe hba to detect device and initialize it
* @hba: per-adapter instance
- * @async: asynchronous execution or not
+ * @init_dev_params: whether or not to call ufshcd_device_params_init().
*
* Execute link-startup and verify device initialization
*/
-static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
+static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
{
int ret;
unsigned long flags;
@@ -8005,7 +8083,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
* Initialize UFS device parameters used by driver, these
* parameters are associated with UFS descriptors.
*/
- if (async) {
+ if (init_dev_params) {
ret = ufshcd_device_params_init(hba);
if (ret)
goto out;
@@ -8050,6 +8128,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
/* Enable Auto-Hibernate if configured */
ufshcd_auto_hibern8_enable(hba);
+ ufshpb_reset(hba);
out:
spin_lock_irqsave(hba->host->host_lock, flags);
if (ret)
@@ -8097,6 +8176,10 @@ out:
static const struct attribute_group *ufshcd_driver_groups[] = {
&ufs_sysfs_unit_descriptor_group,
&ufs_sysfs_lun_attributes_group,
+#ifdef CONFIG_SCSI_UFS_HPB
+ &ufs_sysfs_hpb_stat_group,
+ &ufs_sysfs_hpb_param_group,
+#endif
NULL,
};
@@ -8521,8 +8604,6 @@ static void ufshcd_hba_exit(struct ufs_hba *hba)
if (hba->is_powered) {
ufshcd_exit_clk_scaling(hba);
ufshcd_exit_clk_gating(hba);
- if (hba->eh_wq)
- destroy_workqueue(hba->eh_wq);
ufs_debugfs_hba_exit(hba);
ufshcd_variant_hba_exit(hba);
ufshcd_setup_vreg(hba, false);
@@ -8533,35 +8614,6 @@ static void ufshcd_hba_exit(struct ufs_hba *hba)
}
}
-static int
-ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
-{
- unsigned char cmd[6] = {REQUEST_SENSE,
- 0,
- 0,
- 0,
- UFS_SENSE_SIZE,
- 0};
- char *buffer;
- int ret;
-
- buffer = kzalloc(UFS_SENSE_SIZE, GFP_KERNEL);
- if (!buffer) {
- ret = -ENOMEM;
- goto out;
- }
-
- ret = scsi_execute(sdp, cmd, DMA_FROM_DEVICE, buffer,
- UFS_SENSE_SIZE, NULL, NULL,
- msecs_to_jiffies(1000), 3, 0, RQF_PM, NULL);
- if (ret)
- pr_err("%s: failed with err %d\n", __func__, ret);
-
- kfree(buffer);
-out:
- return ret;
-}
-
/**
* ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
* power mode
@@ -8739,6 +8791,7 @@ static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
usleep_range(5000, 5100);
}
+#ifdef CONFIG_PM
static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
{
int ret = 0;
@@ -8766,6 +8819,7 @@ vcc_disable:
out:
return ret;
}
+#endif /* CONFIG_PM */
static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
{
@@ -8798,6 +8852,8 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
req_link_state = UIC_LINK_OFF_STATE;
}
+ ufshpb_suspend(hba);
+
/*
* If we can't transition into any of the low power modes
* just gate the clocks.
@@ -8921,6 +8977,7 @@ out:
ufshcd_update_evt_hist(hba, UFS_EVT_WL_SUSP_ERR, (u32)ret);
hba->clk_gating.is_suspended = false;
ufshcd_release(hba);
+ ufshpb_resume(hba);
}
hba->pm_op_in_progress = false;
return ret;
@@ -8999,6 +9056,8 @@ static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
/* Enable Auto-Hibernate if configured */
ufshcd_auto_hibern8_enable(hba);
+
+ ufshpb_resume(hba);
goto out;
set_old_link_state:
@@ -9168,6 +9227,7 @@ static int ufshcd_suspend(struct ufs_hba *hba)
return ret;
}
+#ifdef CONFIG_PM
/**
* ufshcd_resume - helper function for resume operations
* @hba: per adapter instance
@@ -9205,17 +9265,21 @@ out:
ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)ret);
return ret;
}
+#endif /* CONFIG_PM */
+#ifdef CONFIG_PM_SLEEP
/**
- * ufshcd_system_suspend - system suspend routine
- * @hba: per adapter instance
+ * ufshcd_system_suspend - system suspend callback
+ * @dev: Device associated with the UFS controller.
*
- * Check the description of ufshcd_suspend() function for more details.
+ * Executed before putting the system into a sleep state in which the contents
+ * of main memory are preserved.
*
* Returns 0 for success and non-zero for failure
*/
-int ufshcd_system_suspend(struct ufs_hba *hba)
+int ufshcd_system_suspend(struct device *dev)
{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
int ret = 0;
ktime_t start = ktime_get();
@@ -9232,16 +9296,19 @@ out:
EXPORT_SYMBOL(ufshcd_system_suspend);
/**
- * ufshcd_system_resume - system resume routine
- * @hba: per adapter instance
+ * ufshcd_system_resume - system resume callback
+ * @dev: Device associated with the UFS controller.
+ *
+ * Executed after waking the system up from a sleep state in which the contents
+ * of main memory were preserved.
*
* Returns 0 for success and non-zero for failure
*/
-
-int ufshcd_system_resume(struct ufs_hba *hba)
+int ufshcd_system_resume(struct device *dev)
{
- int ret = 0;
+ struct ufs_hba *hba = dev_get_drvdata(dev);
ktime_t start = ktime_get();
+ int ret = 0;
if (pm_runtime_suspended(hba->dev))
goto out;
@@ -9256,17 +9323,20 @@ out:
return ret;
}
EXPORT_SYMBOL(ufshcd_system_resume);
+#endif /* CONFIG_PM_SLEEP */
+#ifdef CONFIG_PM
/**
- * ufshcd_runtime_suspend - runtime suspend routine
- * @hba: per adapter instance
+ * ufshcd_runtime_suspend - runtime suspend callback
+ * @dev: Device associated with the UFS controller.
*
* Check the description of ufshcd_suspend() function for more details.
*
* Returns 0 for success and non-zero for failure
*/
-int ufshcd_runtime_suspend(struct ufs_hba *hba)
+int ufshcd_runtime_suspend(struct device *dev)
{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
int ret;
ktime_t start = ktime_get();
@@ -9281,7 +9351,7 @@ EXPORT_SYMBOL(ufshcd_runtime_suspend);
/**
* ufshcd_runtime_resume - runtime resume routine
- * @hba: per adapter instance
+ * @dev: Device associated with the UFS controller.
*
* This function basically brings controller
* to active state. Following operations are done in this function:
@@ -9289,8 +9359,9 @@ EXPORT_SYMBOL(ufshcd_runtime_suspend);
* 1. Turn on all the controller related clocks
* 2. Turn ON VCC rail
*/
-int ufshcd_runtime_resume(struct ufs_hba *hba)
+int ufshcd_runtime_resume(struct device *dev)
{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
int ret;
ktime_t start = ktime_get();
@@ -9302,12 +9373,7 @@ int ufshcd_runtime_resume(struct ufs_hba *hba)
return ret;
}
EXPORT_SYMBOL(ufshcd_runtime_resume);
-
-int ufshcd_runtime_idle(struct ufs_hba *hba)
-{
- return 0;
-}
-EXPORT_SYMBOL(ufshcd_runtime_idle);
+#endif /* CONFIG_PM */
/**
* ufshcd_shutdown - shutdown routine
@@ -9343,6 +9409,7 @@ void ufshcd_remove(struct ufs_hba *hba)
if (hba->sdev_ufs_device)
ufshcd_rpm_get_sync(hba);
ufs_bsg_remove(hba);
+ ufshpb_remove(hba);
ufs_sysfs_remove_nodes(hba->dev);
blk_cleanup_queue(hba->tmf_queue);
blk_mq_free_tag_set(&hba->tmf_tag_set);
@@ -9381,6 +9448,10 @@ static int ufshcd_set_dma_mask(struct ufs_hba *hba)
return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
}
+static struct scsi_transport_template ufshcd_transport_template = {
+ .eh_strategy_handler = ufshcd_err_handler,
+};
+
/**
* ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
* @dev: pointer to device handle
@@ -9407,13 +9478,15 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
err = -ENOMEM;
goto out_error;
}
+ host->transportt = &ufshcd_transport_template;
hba = shost_priv(host);
hba->host = host;
hba->dev = dev;
- *hba_handle = hba;
hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
-
INIT_LIST_HEAD(&hba->clk_list_head);
+ spin_lock_init(&hba->outstanding_lock);
+
+ *hba_handle = hba;
out_error:
return err;
@@ -9444,7 +9517,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
int err;
struct Scsi_Host *host = hba->host;
struct device *dev = hba->dev;
- char eh_wq_name[sizeof("ufs_eh_wq_00")];
if (!mmio_base) {
dev_err(hba->dev,
@@ -9498,17 +9570,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
hba->max_pwr_info.is_valid = false;
- /* Initialize work queues */
- snprintf(eh_wq_name, sizeof(eh_wq_name), "ufs_eh_wq_%d",
- hba->host->host_no);
- hba->eh_wq = create_singlethread_workqueue(eh_wq_name);
- if (!hba->eh_wq) {
- dev_err(hba->dev, "%s: failed to create eh workqueue\n",
- __func__);
- err = -ENOMEM;
- goto out_disable;
- }
- INIT_WORK(&hba->eh_work, ufshcd_err_handler);
INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
sema_init(&hba->host_sem, 1);
@@ -9627,6 +9688,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
async_schedule(ufshcd_async_scan, hba);
ufs_sysfs_add_nodes(hba->dev);
+ device_enable_async_suspend(dev);
return 0;
free_tmf_queue: