aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/ufs/ufshcd.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/ufs/ufshcd.c')
-rw-r--r--drivers/scsi/ufs/ufshcd.c358
1 files changed, 213 insertions, 145 deletions
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 13c09dbd99b9..50b12d60dc1b 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -128,8 +128,9 @@ EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
enum {
UFSHCD_MAX_CHANNEL = 0,
UFSHCD_MAX_ID = 1,
- UFSHCD_CMD_PER_LUN = 32,
- UFSHCD_CAN_QUEUE = 32,
+ UFSHCD_NUM_RESERVED = 1,
+ UFSHCD_CMD_PER_LUN = 32 - UFSHCD_NUM_RESERVED,
+ UFSHCD_CAN_QUEUE = 32 - UFSHCD_NUM_RESERVED,
};
static const char *const ufshcd_state_name[] = {
@@ -1069,13 +1070,32 @@ static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
return false;
}
+/*
+ * Determine the number of pending commands by counting the bits in the SCSI
+ * device budget maps. This approach has been selected because a bit is set in
+ * the budget map before scsi_host_queue_ready() checks the host_self_blocked
+ * flag. The host_self_blocked flag can be modified by calling
+ * scsi_block_requests() or scsi_unblock_requests().
+ */
+static u32 ufshcd_pending_cmds(struct ufs_hba *hba)
+{
+ struct scsi_device *sdev;
+ u32 pending = 0;
+
+ lockdep_assert_held(hba->host->host_lock);
+ __shost_for_each_device(sdev, hba->host)
+ pending += sbitmap_weight(&sdev->budget_map);
+
+ return pending;
+}
+
static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
u64 wait_timeout_us)
{
unsigned long flags;
int ret = 0;
u32 tm_doorbell;
- u32 tr_doorbell;
+ u32 tr_pending;
bool timeout = false, do_last_check = false;
ktime_t start;
@@ -1093,8 +1113,8 @@ static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
}
tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
- tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
- if (!tm_doorbell && !tr_doorbell) {
+ tr_pending = ufshcd_pending_cmds(hba);
+ if (!tm_doorbell && !tr_pending) {
timeout = false;
break;
} else if (do_last_check) {
@@ -1114,12 +1134,12 @@ static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
do_last_check = true;
}
spin_lock_irqsave(hba->host->host_lock, flags);
- } while (tm_doorbell || tr_doorbell);
+ } while (tm_doorbell || tr_pending);
if (timeout) {
dev_err(hba->dev,
"%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
- __func__, tm_doorbell, tr_doorbell);
+ __func__, tm_doorbell, tr_pending);
ret = -EBUSY;
}
out:
@@ -1352,25 +1372,6 @@ out:
return ret;
}
-static bool ufshcd_is_busy(struct request *req, void *priv, bool reserved)
-{
- int *busy = priv;
-
- WARN_ON_ONCE(reserved);
- (*busy)++;
- return false;
-}
-
-/* Whether or not any tag is in use by a request that is in progress. */
-static bool ufshcd_any_tag_in_use(struct ufs_hba *hba)
-{
- struct request_queue *q = hba->cmd_queue;
- int busy = 0;
-
- blk_mq_tagset_busy_iter(q->tag_set, ufshcd_is_busy, &busy);
- return busy;
-}
-
static int ufshcd_devfreq_get_dev_status(struct device *dev,
struct devfreq_dev_status *stat)
{
@@ -1666,7 +1667,8 @@ int ufshcd_hold(struct ufs_hba *hba, bool async)
bool flush_result;
unsigned long flags;
- if (!ufshcd_is_clkgating_allowed(hba))
+ if (!ufshcd_is_clkgating_allowed(hba) ||
+ !hba->clk_gating.is_initialized)
goto out;
spin_lock_irqsave(hba->host->host_lock, flags);
hba->clk_gating.active_reqs++;
@@ -1769,7 +1771,7 @@ static void ufshcd_gate_work(struct work_struct *work)
if (hba->clk_gating.active_reqs
|| hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
- || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks
+ || hba->outstanding_reqs || hba->outstanding_tasks
|| hba->active_uic_cmd || hba->uic_async_done)
goto rel_lock;
@@ -1826,7 +1828,7 @@ static void __ufshcd_release(struct ufs_hba *hba)
if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended ||
hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
- hba->outstanding_tasks ||
+ hba->outstanding_tasks || !hba->clk_gating.is_initialized ||
hba->active_uic_cmd || hba->uic_async_done ||
hba->clk_gating.state == CLKS_OFF)
return;
@@ -1961,11 +1963,15 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
{
if (!hba->clk_gating.is_initialized)
return;
+
ufshcd_remove_clk_gating_sysfs(hba);
- cancel_work_sync(&hba->clk_gating.ungate_work);
- cancel_delayed_work_sync(&hba->clk_gating.gate_work);
- destroy_workqueue(hba->clk_gating.clk_gating_workq);
+
+ /* Ungate the clock if necessary. */
+ ufshcd_hold(hba, false);
hba->clk_gating.is_initialized = false;
+ ufshcd_release(hba);
+
+ destroy_workqueue(hba->clk_gating.clk_gating_workq);
}
/* Must be called with host lock acquired */
@@ -2189,6 +2195,7 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
hba->nutmrs =
((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
+ hba->reserved_slot = hba->nutrs - 1;
/* Read crypto capabilities */
err = ufshcd_hba_init_crypto_capabilities(hba);
@@ -2650,17 +2657,42 @@ static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
}
-static inline bool is_rpmb_wlun(struct scsi_device *sdev)
-{
- return sdev->lun == ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN);
-}
-
static inline bool is_device_wlun(struct scsi_device *sdev)
{
return sdev->lun ==
ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN);
}
+/*
+ * Associate the UFS controller queue with the default and poll HCTX types.
+ * Initialize the mq_map[] arrays.
+ */
+static int ufshcd_map_queues(struct Scsi_Host *shost)
+{
+ int i, ret;
+
+ for (i = 0; i < shost->nr_maps; i++) {
+ struct blk_mq_queue_map *map = &shost->tag_set.map[i];
+
+ switch (i) {
+ case HCTX_TYPE_DEFAULT:
+ case HCTX_TYPE_POLL:
+ map->nr_queues = 1;
+ break;
+ case HCTX_TYPE_READ:
+ map->nr_queues = 0;
+ break;
+ default:
+ WARN_ON_ONCE(true);
+ }
+ map->queue_offset = 0;
+ ret = blk_mq_map_queues(map);
+ WARN_ON_ONCE(ret);
+ }
+
+ return 0;
+}
+
static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
{
struct utp_transfer_cmd_desc *cmd_descp = hba->ucdl_base_addr;
@@ -2696,10 +2728,13 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
struct ufshcd_lrb *lrbp;
int err = 0;
- WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
+ WARN_ONCE(tag < 0 || tag >= hba->nutrs, "Invalid tag %d\n", tag);
- if (!down_read_trylock(&hba->clk_scaling_lock))
- return SCSI_MLQUEUE_HOST_BUSY;
+ /*
+ * Allows the UFS error handler to wait for prior ufshcd_queuecommand()
+ * calls.
+ */
+ rcu_read_lock();
switch (hba->ufshcd_state) {
case UFSHCD_STATE_OPERATIONAL:
@@ -2779,8 +2814,9 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
}
ufshcd_send_command(hba, tag);
+
out:
- up_read(&hba->clk_scaling_lock);
+ rcu_read_unlock();
if (ufs_trigger_eh()) {
unsigned long flags;
@@ -2936,30 +2972,15 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
enum dev_cmd_type cmd_type, int timeout)
{
- struct request_queue *q = hba->cmd_queue;
DECLARE_COMPLETION_ONSTACK(wait);
- struct request *req;
+ const u32 tag = hba->reserved_slot;
struct ufshcd_lrb *lrbp;
int err;
- int tag;
- down_read(&hba->clk_scaling_lock);
+ /* Protects use of hba->reserved_slot. */
+ lockdep_assert_held(&hba->dev_cmd.lock);
- /*
- * Get free slot, sleep if slots are unavailable.
- * Even though we use wait_event() which sleeps indefinitely,
- * the maximum wait time is bounded by SCSI request timeout.
- */
- req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, 0);
- if (IS_ERR(req)) {
- err = PTR_ERR(req);
- goto out_unlock;
- }
- tag = req->tag;
- WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
- /* Set the timeout such that the SCSI error handler is not activated. */
- req->timeout = msecs_to_jiffies(2 * timeout);
- blk_mq_start_request(req);
+ down_read(&hba->clk_scaling_lock);
lrbp = &hba->lrb[tag];
WARN_ON(lrbp->cmd);
@@ -2977,8 +2998,6 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
(struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
out:
- blk_mq_free_request(req);
-out_unlock:
up_read(&hba->clk_scaling_lock);
return err;
}
@@ -4960,11 +4979,7 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev)
*/
static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
{
- struct ufs_hba *hba = shost_priv(sdev->host);
-
- if (depth > hba->nutrs)
- depth = hba->nutrs;
- return scsi_change_queue_depth(sdev, depth);
+ return scsi_change_queue_depth(sdev, min(depth, sdev->host->can_queue));
}
static void ufshcd_hpb_destroy(struct ufs_hba *hba, struct scsi_device *sdev)
@@ -5256,6 +5271,18 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
return retval;
}
+/* Release the resources allocated for processing a SCSI command. */
+static void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
+ struct ufshcd_lrb *lrbp)
+{
+ struct scsi_cmnd *cmd = lrbp->cmd;
+
+ scsi_dma_unmap(cmd);
+ lrbp->cmd = NULL; /* Mark the command as completed. */
+ ufshcd_release(hba);
+ ufshcd_clk_scaling_update_busy(hba);
+}
+
/**
* __ufshcd_transfer_req_compl - handle SCSI and query command completion
* @hba: per adapter instance
@@ -5266,9 +5293,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
{
struct ufshcd_lrb *lrbp;
struct scsi_cmnd *cmd;
- int result;
int index;
- bool update_scaling = false;
for_each_set_bit(index, &completed_reqs, hba->nutrs) {
lrbp = &hba->lrb[index];
@@ -5278,29 +5303,47 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
ufshcd_update_monitor(hba, lrbp);
ufshcd_add_command_trace(hba, index, UFS_CMD_COMP);
- result = ufshcd_transfer_rsp_status(hba, lrbp);
- scsi_dma_unmap(cmd);
- cmd->result = result;
- /* Mark completed command as NULL in LRB */
- lrbp->cmd = NULL;
+ cmd->result = ufshcd_transfer_rsp_status(hba, lrbp);
+ ufshcd_release_scsi_cmd(hba, lrbp);
/* Do not touch lrbp after scsi done */
scsi_done(cmd);
- ufshcd_release(hba);
- update_scaling = true;
} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
if (hba->dev_cmd.complete) {
ufshcd_add_command_trace(hba, index,
UFS_DEV_COMP);
complete(hba->dev_cmd.complete);
- update_scaling = true;
+ ufshcd_clk_scaling_update_busy(hba);
}
}
- if (update_scaling)
- ufshcd_clk_scaling_update_busy(hba);
}
}
+/*
+ * Returns > 0 if one or more commands have been completed or 0 if no
+ * requests have been completed.
+ */
+static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
+{
+ struct ufs_hba *hba = shost_priv(shost);
+ unsigned long completed_reqs, flags;
+ u32 tr_doorbell;
+
+ spin_lock_irqsave(&hba->outstanding_lock, flags);
+ tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ completed_reqs = ~tr_doorbell & hba->outstanding_reqs;
+ WARN_ONCE(completed_reqs & ~hba->outstanding_reqs,
+ "completed: %#lx; outstanding: %#lx\n", completed_reqs,
+ hba->outstanding_reqs);
+ hba->outstanding_reqs &= ~completed_reqs;
+ spin_unlock_irqrestore(&hba->outstanding_lock, flags);
+
+ if (completed_reqs)
+ __ufshcd_transfer_req_compl(hba, completed_reqs);
+
+ return completed_reqs;
+}
+
/**
* ufshcd_transfer_req_compl - handle SCSI and query command completion
* @hba: per adapter instance
@@ -5311,9 +5354,6 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
*/
static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
{
- unsigned long completed_reqs, flags;
- u32 tr_doorbell;
-
/* Resetting interrupt aggregation counters first and reading the
* DOOR_BELL afterward allows us to handle all the completed requests.
* In order to prevent other interrupts starvation the DB is read once
@@ -5328,21 +5368,13 @@ static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
if (ufs_fail_completion())
return IRQ_HANDLED;
- spin_lock_irqsave(&hba->outstanding_lock, flags);
- tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
- completed_reqs = ~tr_doorbell & hba->outstanding_reqs;
- WARN_ONCE(completed_reqs & ~hba->outstanding_reqs,
- "completed: %#lx; outstanding: %#lx\n", completed_reqs,
- hba->outstanding_reqs);
- hba->outstanding_reqs &= ~completed_reqs;
- spin_unlock_irqrestore(&hba->outstanding_lock, flags);
+ /*
+ * Ignore the ufshcd_poll() return value and return IRQ_HANDLED since we
+ * do not want polling to trigger spurious interrupt complaints.
+ */
+ ufshcd_poll(hba->host, 0);
- if (completed_reqs) {
- __ufshcd_transfer_req_compl(hba, completed_reqs);
- return IRQ_HANDLED;
- } else {
- return IRQ_NONE;
- }
+ return IRQ_HANDLED;
}
int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask)
@@ -5986,8 +6018,7 @@ static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
}
ufshcd_scsi_block_requests(hba);
/* Drain ufshcd_queuecommand() */
- down_write(&hba->clk_scaling_lock);
- up_write(&hba->clk_scaling_lock);
+ synchronize_rcu();
cancel_work_sync(&hba->eeh_work);
}
@@ -6594,6 +6625,8 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
spin_lock_irqsave(host->host_lock, flags);
task_tag = req->tag;
+ WARN_ONCE(task_tag < 0 || task_tag >= hba->nutmrs, "Invalid tag %d\n",
+ task_tag);
hba->tmf_rqs[req->tag] = req;
treq->upiu_req.req_header.dword_0 |= cpu_to_be32(task_tag);
@@ -6711,28 +6744,16 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
enum dev_cmd_type cmd_type,
enum query_opcode desc_op)
{
- struct request_queue *q = hba->cmd_queue;
DECLARE_COMPLETION_ONSTACK(wait);
- struct request *req;
+ const u32 tag = hba->reserved_slot;
struct ufshcd_lrb *lrbp;
int err = 0;
- int tag;
u8 upiu_flags;
- down_read(&hba->clk_scaling_lock);
-
- req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, 0);
- if (IS_ERR(req)) {
- err = PTR_ERR(req);
- goto out_unlock;
- }
- tag = req->tag;
- WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
+ /* Protects use of hba->reserved_slot. */
+ lockdep_assert_held(&hba->dev_cmd.lock);
- if (unlikely(test_bit(tag, &hba->outstanding_reqs))) {
- err = -EBUSY;
- goto out;
- }
+ down_read(&hba->clk_scaling_lock);
lrbp = &hba->lrb[tag];
WARN_ON(lrbp->cmd);
@@ -6801,9 +6822,6 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
(struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
-out:
- blk_mq_free_request(req);
-out_unlock:
up_read(&hba->clk_scaling_lock);
return err;
}
@@ -7033,6 +7051,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
struct ufshcd_lrb *lrbp = &hba->lrb[tag];
unsigned long flags;
int err = FAILED;
+ bool outstanding;
u32 reg;
WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
@@ -7110,7 +7129,17 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
goto release;
}
- lrbp->cmd = NULL;
+ /*
+ * Clear the corresponding bit from outstanding_reqs since the command
+ * has been aborted successfully.
+ */
+ spin_lock_irqsave(&hba->outstanding_lock, flags);
+ outstanding = __test_and_clear_bit(tag, &hba->outstanding_reqs);
+ spin_unlock_irqrestore(&hba->outstanding_lock, flags);
+
+ if (outstanding)
+ ufshcd_release_scsi_cmd(hba, lrbp);
+
err = SUCCESS;
release:
@@ -7412,7 +7441,7 @@ static inline void ufshcd_blk_pm_runtime_init(struct scsi_device *sdev)
static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
{
int ret = 0;
- struct scsi_device *sdev_boot;
+ struct scsi_device *sdev_boot, *sdev_rpmb;
hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
@@ -7423,14 +7452,14 @@ static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
}
scsi_device_put(hba->sdev_ufs_device);
- hba->sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
+ sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
- if (IS_ERR(hba->sdev_rpmb)) {
- ret = PTR_ERR(hba->sdev_rpmb);
+ if (IS_ERR(sdev_rpmb)) {
+ ret = PTR_ERR(sdev_rpmb);
goto remove_sdev_ufs_device;
}
- ufshcd_blk_pm_runtime_init(hba->sdev_rpmb);
- scsi_device_put(hba->sdev_rpmb);
+ ufshcd_blk_pm_runtime_init(sdev_rpmb);
+ scsi_device_put(sdev_rpmb);
sdev_boot = __scsi_add_device(hba->host, 0, 0,
ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
@@ -7786,7 +7815,7 @@ static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
peer_pa_tactivate_us = peer_pa_tactivate *
gran_to_us_table[peer_granularity - 1];
- if (pa_tactivate_us > peer_pa_tactivate_us) {
+ if (pa_tactivate_us >= peer_pa_tactivate_us) {
u32 new_peer_pa_tactivate;
new_peer_pa_tactivate = pa_tactivate_us /
@@ -8156,7 +8185,9 @@ static struct scsi_host_template ufshcd_driver_template = {
.module = THIS_MODULE,
.name = UFSHCD,
.proc_name = UFSHCD,
+ .map_queues = ufshcd_map_queues,
.queuecommand = ufshcd_queuecommand,
+ .mq_poll = ufshcd_poll,
.slave_alloc = ufshcd_slave_alloc,
.slave_configure = ufshcd_slave_configure,
.slave_destroy = ufshcd_slave_destroy,
@@ -8582,7 +8613,7 @@ static void ufshcd_hba_exit(struct ufs_hba *hba)
* @pwr_mode: device power mode to set
*
* Returns 0 if requested power mode is set successfully
- * Returns non-zero if failed to set the requested power mode
+ * Returns < 0 if failed to set the requested power mode
*/
static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
enum ufs_dev_pwr_mode pwr_mode)
@@ -8636,8 +8667,11 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
sdev_printk(KERN_WARNING, sdp,
"START_STOP failed for power mode: %d, result %x\n",
pwr_mode, ret);
- if (ret > 0 && scsi_sense_valid(&sshdr))
- scsi_print_sense_hdr(sdp, NULL, &sshdr);
+ if (ret > 0) {
+ if (scsi_sense_valid(&sshdr))
+ scsi_print_sense_hdr(sdp, NULL, &sshdr);
+ ret = -EIO;
+ }
}
if (!ret)
@@ -9384,7 +9418,6 @@ void ufshcd_remove(struct ufs_hba *hba)
ufs_sysfs_remove_nodes(hba->dev);
blk_cleanup_queue(hba->tmf_queue);
blk_mq_free_tag_set(&hba->tmf_tag_set);
- blk_cleanup_queue(hba->cmd_queue);
scsi_remove_host(hba->host);
/* disable interrupts */
ufshcd_disable_intr(hba, hba->intr_mask);
@@ -9445,6 +9478,7 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
err = -ENOMEM;
goto out_error;
}
+ host->nr_maps = HCTX_TYPE_POLL + 1;
hba = shost_priv(host);
hba->host = host;
hba->dev = dev;
@@ -9486,6 +9520,13 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
struct device *dev = hba->dev;
char eh_wq_name[sizeof("ufs_eh_wq_00")];
+ /*
+ * dev_set_drvdata() must be called before any callbacks are registered
+ * that use dev_get_drvdata() (frequency scaling, clock scaling, hwmon,
+ * sysfs).
+ */
+ dev_set_drvdata(dev, hba);
+
if (!mmio_base) {
dev_err(hba->dev,
"Invalid memory reference for mmio_base is NULL\n");
@@ -9528,8 +9569,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
/* Configure LRB */
ufshcd_host_memory_configure(hba);
- host->can_queue = hba->nutrs;
- host->cmd_per_lun = hba->nutrs;
+ host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED;
+ host->cmd_per_lun = hba->nutrs - UFSHCD_NUM_RESERVED;
host->max_id = UFSHCD_MAX_ID;
host->max_lun = UFS_MAX_LUNS;
host->max_channel = UFSHCD_MAX_CHANNEL;
@@ -9597,12 +9638,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
goto out_disable;
}
- hba->cmd_queue = blk_mq_init_queue(&hba->host->tag_set);
- if (IS_ERR(hba->cmd_queue)) {
- err = PTR_ERR(hba->cmd_queue);
- goto out_remove_scsi_host;
- }
-
hba->tmf_tag_set = (struct blk_mq_tag_set) {
.nr_hw_queues = 1,
.queue_depth = hba->nutmrs,
@@ -9611,7 +9646,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
};
err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
if (err < 0)
- goto free_cmd_queue;
+ goto out_remove_scsi_host;
hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set);
if (IS_ERR(hba->tmf_queue)) {
err = PTR_ERR(hba->tmf_queue);
@@ -9680,8 +9715,6 @@ free_tmf_queue:
blk_cleanup_queue(hba->tmf_queue);
free_tmf_tag_set:
blk_mq_free_tag_set(&hba->tmf_tag_set);
-free_cmd_queue:
- blk_cleanup_queue(hba->cmd_queue);
out_remove_scsi_host:
scsi_remove_host(hba->host);
out_disable:
@@ -9703,7 +9736,27 @@ void ufshcd_resume_complete(struct device *dev)
}
EXPORT_SYMBOL_GPL(ufshcd_resume_complete);
-int ufshcd_suspend_prepare(struct device *dev)
+static bool ufshcd_rpm_ok_for_spm(struct ufs_hba *hba)
+{
+ struct device *dev = &hba->sdev_ufs_device->sdev_gendev;
+ enum ufs_dev_pwr_mode dev_pwr_mode;
+ enum uic_link_state link_state;
+ unsigned long flags;
+ bool res;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+ dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl);
+ link_state = ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl);
+ res = pm_runtime_suspended(dev) &&
+ hba->curr_dev_pwr_mode == dev_pwr_mode &&
+ hba->uic_link_state == link_state &&
+ !hba->dev_info.b_rpm_dev_flush_capable;
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ return res;
+}
+
+int __ufshcd_suspend_prepare(struct device *dev, bool rpm_ok_for_spm)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
int ret;
@@ -9715,15 +9768,30 @@ int ufshcd_suspend_prepare(struct device *dev)
* Refer ufshcd_resume_complete()
*/
if (hba->sdev_ufs_device) {
- ret = ufshcd_rpm_get_sync(hba);
- if (ret < 0 && ret != -EACCES) {
- ufshcd_rpm_put(hba);
- return ret;
+ /* Prevent runtime suspend */
+ ufshcd_rpm_get_noresume(hba);
+ /*
+ * Check if already runtime suspended in same state as system
+ * suspend would be.
+ */
+ if (!rpm_ok_for_spm || !ufshcd_rpm_ok_for_spm(hba)) {
+ /* RPM state is not ok for SPM, so runtime resume */
+ ret = ufshcd_rpm_resume(hba);
+ if (ret < 0 && ret != -EACCES) {
+ ufshcd_rpm_put(hba);
+ return ret;
+ }
}
hba->complete_put = true;
}
return 0;
}
+EXPORT_SYMBOL_GPL(__ufshcd_suspend_prepare);
+
+int ufshcd_suspend_prepare(struct device *dev)
+{
+ return __ufshcd_suspend_prepare(dev, true);
+}
EXPORT_SYMBOL_GPL(ufshcd_suspend_prepare);
#ifdef CONFIG_PM_SLEEP