aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/ufs
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/ufs')
-rw-r--r--drivers/scsi/ufs/cdns-pltfrm.c113
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.c206
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.h32
-rw-r--r--drivers/scsi/ufs/ufs-sysfs.c22
-rw-r--r--drivers/scsi/ufs/ufs-sysfs.h4
-rw-r--r--drivers/scsi/ufs/ufs.h31
-rw-r--r--drivers/scsi/ufs/ufs_bsg.c2
-rw-r--r--drivers/scsi/ufs/ufs_quirks.h9
-rw-r--r--drivers/scsi/ufs/ufshcd.c715
-rw-r--r--drivers/scsi/ufs/ufshcd.h34
-rw-r--r--drivers/scsi/ufs/unipro.h11
11 files changed, 790 insertions, 389 deletions
diff --git a/drivers/scsi/ufs/cdns-pltfrm.c b/drivers/scsi/ufs/cdns-pltfrm.c
index b2af04c57a39..56a6a1ed5ec2 100644
--- a/drivers/scsi/ufs/cdns-pltfrm.c
+++ b/drivers/scsi/ufs/cdns-pltfrm.c
@@ -19,6 +19,85 @@
#define CDNS_UFS_REG_HCLKDIV 0xFC
#define CDNS_UFS_REG_PHY_XCFGD1 0x113C
+#define CDNS_UFS_MAX_L4_ATTRS 12
+
+struct cdns_ufs_host {
+ /**
+ * cdns_ufs_dme_attr_val - for storing L4 attributes
+ */
+ u32 cdns_ufs_dme_attr_val[CDNS_UFS_MAX_L4_ATTRS];
+};
+
+/**
+ * cdns_ufs_get_l4_attr - get L4 attributes on local side
+ * @hba: per adapter instance
+ *
+ */
+static void cdns_ufs_get_l4_attr(struct ufs_hba *hba)
+{
+ struct cdns_ufs_host *host = ufshcd_get_variant(hba);
+
+ ufshcd_dme_get(hba, UIC_ARG_MIB(T_PEERDEVICEID),
+ &host->cdns_ufs_dme_attr_val[0]);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(T_PEERCPORTID),
+ &host->cdns_ufs_dme_attr_val[1]);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(T_TRAFFICCLASS),
+ &host->cdns_ufs_dme_attr_val[2]);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(T_PROTOCOLID),
+ &host->cdns_ufs_dme_attr_val[3]);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(T_CPORTFLAGS),
+ &host->cdns_ufs_dme_attr_val[4]);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(T_TXTOKENVALUE),
+ &host->cdns_ufs_dme_attr_val[5]);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(T_RXTOKENVALUE),
+ &host->cdns_ufs_dme_attr_val[6]);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(T_LOCALBUFFERSPACE),
+ &host->cdns_ufs_dme_attr_val[7]);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(T_PEERBUFFERSPACE),
+ &host->cdns_ufs_dme_attr_val[8]);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(T_CREDITSTOSEND),
+ &host->cdns_ufs_dme_attr_val[9]);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(T_CPORTMODE),
+ &host->cdns_ufs_dme_attr_val[10]);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(T_CONNECTIONSTATE),
+ &host->cdns_ufs_dme_attr_val[11]);
+}
+
+/**
+ * cdns_ufs_set_l4_attr - set L4 attributes on local side
+ * @hba: per adapter instance
+ *
+ */
+static void cdns_ufs_set_l4_attr(struct ufs_hba *hba)
+{
+ struct cdns_ufs_host *host = ufshcd_get_variant(hba);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_CONNECTIONSTATE), 0);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERDEVICEID),
+ host->cdns_ufs_dme_attr_val[0]);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERCPORTID),
+ host->cdns_ufs_dme_attr_val[1]);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_TRAFFICCLASS),
+ host->cdns_ufs_dme_attr_val[2]);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_PROTOCOLID),
+ host->cdns_ufs_dme_attr_val[3]);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_CPORTFLAGS),
+ host->cdns_ufs_dme_attr_val[4]);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_TXTOKENVALUE),
+ host->cdns_ufs_dme_attr_val[5]);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_RXTOKENVALUE),
+ host->cdns_ufs_dme_attr_val[6]);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_LOCALBUFFERSPACE),
+ host->cdns_ufs_dme_attr_val[7]);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERBUFFERSPACE),
+ host->cdns_ufs_dme_attr_val[8]);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_CREDITSTOSEND),
+ host->cdns_ufs_dme_attr_val[9]);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_CPORTMODE),
+ host->cdns_ufs_dme_attr_val[10]);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(T_CONNECTIONSTATE),
+ host->cdns_ufs_dme_attr_val[11]);
+}
/**
* Sets HCLKDIV register value based on the core_clk
@@ -78,6 +157,22 @@ static int cdns_ufs_hce_enable_notify(struct ufs_hba *hba,
}
/**
+ * Called around hibern8 enter/exit.
+ * @hba: host controller instance
+ * @cmd: UIC Command
+ * @status: notify stage (pre, post change)
+ *
+ */
+static void cdns_ufs_hibern8_notify(struct ufs_hba *hba, enum uic_cmd_dme cmd,
+ enum ufs_notify_change_status status)
+{
+ if (status == PRE_CHANGE && cmd == UIC_CMD_DME_HIBER_ENTER)
+ cdns_ufs_get_l4_attr(hba);
+ if (status == POST_CHANGE && cmd == UIC_CMD_DME_HIBER_EXIT)
+ cdns_ufs_set_l4_attr(hba);
+}
+
+/**
* Called before and after Link startup is carried out.
* @hba: host controller instance
* @status: notify stage (pre, post change)
@@ -99,6 +194,12 @@ static int cdns_ufs_link_startup_notify(struct ufs_hba *hba,
*/
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0);
+ /*
+ * Disabling Autohibern8 feature in cadence UFS
+ * to mask unexpected interrupt trigger.
+ */
+ hba->ahit = 0;
+
return 0;
}
@@ -111,6 +212,14 @@ static int cdns_ufs_link_startup_notify(struct ufs_hba *hba,
static int cdns_ufs_init(struct ufs_hba *hba)
{
int status = 0;
+ struct cdns_ufs_host *host;
+ struct device *dev = hba->dev;
+
+ host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+
+ if (!host)
+ return -ENOMEM;
+ ufshcd_set_variant(hba, host);
if (hba->vops && hba->vops->phy_initialization)
status = hba->vops->phy_initialization(hba);
@@ -138,8 +247,10 @@ static int cdns_ufs_m31_16nm_phy_initialization(struct ufs_hba *hba)
static const struct ufs_hba_variant_ops cdns_ufs_pltfm_hba_vops = {
.name = "cdns-ufs-pltfm",
+ .init = cdns_ufs_init,
.hce_enable_notify = cdns_ufs_hce_enable_notify,
.link_startup_notify = cdns_ufs_link_startup_notify,
+ .hibern8_notify = cdns_ufs_hibern8_notify,
};
static const struct ufs_hba_variant_ops cdns_ufs_m31_16nm_pltfm_hba_vops = {
@@ -148,6 +259,7 @@ static const struct ufs_hba_variant_ops cdns_ufs_m31_16nm_pltfm_hba_vops = {
.hce_enable_notify = cdns_ufs_hce_enable_notify,
.link_startup_notify = cdns_ufs_link_startup_notify,
.phy_initialization = cdns_ufs_m31_16nm_phy_initialization,
+ .hibern8_notify = cdns_ufs_hibern8_notify,
};
static const struct of_device_id cdns_ufs_of_match[] = {
@@ -213,6 +325,7 @@ static const struct dev_pm_ops cdns_ufs_dev_pm_ops = {
static struct platform_driver cdns_ufs_pltfrm_driver = {
.probe = cdns_ufs_pltfrm_probe,
.remove = cdns_ufs_pltfrm_remove,
+ .shutdown = ufshcd_pltfrm_shutdown,
.driver = {
.name = "cdns-ufshcd",
.pm = &cdns_ufs_dev_pm_ops,
diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
index 83e28edc3ac5..53eae5fe2ade 100644
--- a/drivers/scsi/ufs/ufs-mediatek.c
+++ b/drivers/scsi/ufs/ufs-mediatek.c
@@ -6,16 +6,30 @@
* Peter Wang <peter.wang@mediatek.com>
*/
+#include <linux/arm-smccc.h>
+#include <linux/bitfield.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/soc/mediatek/mtk_sip_svc.h>
#include "ufshcd.h"
#include "ufshcd-pltfrm.h"
+#include "ufs_quirks.h"
#include "unipro.h"
#include "ufs-mediatek.h"
+#define ufs_mtk_smc(cmd, val, res) \
+ arm_smccc_smc(MTK_SIP_UFS_CONTROL, \
+ cmd, val, 0, 0, 0, 0, 0, &(res))
+
+#define ufs_mtk_ref_clk_notify(on, res) \
+ ufs_mtk_smc(UFS_MTK_SIP_REF_CLK_NOTIFICATION, on, res)
+
+#define ufs_mtk_device_reset_ctrl(high, res) \
+ ufs_mtk_smc(UFS_MTK_SIP_DEVICE_RESET, high, res)
+
static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
{
u32 tmp;
@@ -81,6 +95,49 @@ static int ufs_mtk_bind_mphy(struct ufs_hba *hba)
return err;
}
+static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ struct arm_smccc_res res;
+ unsigned long timeout;
+ u32 value;
+
+ if (host->ref_clk_enabled == on)
+ return 0;
+
+ if (on) {
+ ufs_mtk_ref_clk_notify(on, res);
+ ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL);
+ } else {
+ ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL);
+ }
+
+ /* Wait for ack */
+ timeout = jiffies + msecs_to_jiffies(REFCLK_REQ_TIMEOUT_MS);
+ do {
+ value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL);
+
+ /* Wait until ack bit equals to req bit */
+ if (((value & REFCLK_ACK) >> 1) == (value & REFCLK_REQUEST))
+ goto out;
+
+ usleep_range(100, 200);
+ } while (time_before(jiffies, timeout));
+
+ dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value);
+
+ ufs_mtk_ref_clk_notify(host->ref_clk_enabled, res);
+
+ return -ETIMEDOUT;
+
+out:
+ host->ref_clk_enabled = on;
+ if (!on)
+ ufs_mtk_ref_clk_notify(on, res);
+
+ return 0;
+}
+
/**
* ufs_mtk_setup_clocks - enables/disable clocks
* @hba: host controller instance
@@ -105,12 +162,16 @@ static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
switch (status) {
case PRE_CHANGE:
- if (!on)
+ if (!on) {
+ ufs_mtk_setup_ref_clk(hba, on);
ret = phy_power_off(host->mphy);
+ }
break;
case POST_CHANGE:
- if (on)
+ if (on) {
ret = phy_power_on(host->mphy);
+ ufs_mtk_setup_ref_clk(hba, on);
+ }
break;
}
@@ -150,6 +211,9 @@ static int ufs_mtk_init(struct ufs_hba *hba)
/* Enable runtime autosuspend */
hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
+ /* Enable clock-gating */
+ hba->caps |= UFSHCD_CAP_CLK_GATING;
+
/*
* ufshcd_vops_init() is invoked after
* ufshcd_setup_clock(true) in ufshcd_hba_init() thus
@@ -238,6 +302,23 @@ static int ufs_mtk_pre_link(struct ufs_hba *hba)
return ret;
}
+static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
+{
+ unsigned long flags;
+ u32 ah_ms;
+
+ if (ufshcd_is_clkgating_allowed(hba)) {
+ if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit)
+ ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK,
+ hba->ahit);
+ else
+ ah_ms = 10;
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->clk_gating.delay_ms = ah_ms + 5;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ }
+}
+
static int ufs_mtk_post_link(struct ufs_hba *hba)
{
/* disable device LCC */
@@ -246,6 +327,15 @@ static int ufs_mtk_post_link(struct ufs_hba *hba)
/* enable unipro clock gating feature */
ufs_mtk_cfg_unipro_cg(hba, true);
+ /* configure auto-hibern8 timer to 10ms */
+ if (ufshcd_is_auto_hibern8_supported(hba)) {
+ ufshcd_auto_hibern8_update(hba,
+ FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) |
+ FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3));
+ }
+
+ ufs_mtk_setup_clk_gating(hba);
+
return 0;
}
@@ -269,12 +359,86 @@ static int ufs_mtk_link_startup_notify(struct ufs_hba *hba,
return ret;
}
+static void ufs_mtk_device_reset(struct ufs_hba *hba)
+{
+ struct arm_smccc_res res;
+
+ ufs_mtk_device_reset_ctrl(0, res);
+
+ /*
+ * The reset signal is active low. UFS devices shall detect
+ * more than or equal to 1us of positive or negative RST_n
+ * pulse width.
+ *
+ * To be on safe side, keep the reset low for at least 10us.
+ */
+ usleep_range(10, 15);
+
+ ufs_mtk_device_reset_ctrl(1, res);
+
+ /* Some devices may need time to respond to rst_n */
+ usleep_range(10000, 15000);
+
+ dev_info(hba->dev, "device reset done\n");
+}
+
+static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
+{
+ int err;
+
+ err = ufshcd_hba_enable(hba);
+ if (err)
+ return err;
+
+ err = ufshcd_dme_set(hba,
+ UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
+ 0);
+ if (err)
+ return err;
+
+ err = ufshcd_uic_hibern8_exit(hba);
+ if (!err)
+ ufshcd_set_link_active(hba);
+ else
+ return err;
+
+ err = ufshcd_make_hba_operational(hba);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int ufs_mtk_link_set_lpm(struct ufs_hba *hba)
+{
+ int err;
+
+ err = ufshcd_dme_set(hba,
+ UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
+ 1);
+ if (err) {
+ /* Resume UniPro state for following error recovery */
+ ufshcd_dme_set(hba,
+ UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
+ 0);
+ return err;
+ }
+
+ return 0;
+}
+
static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
+ int err;
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- if (ufshcd_is_link_hibern8(hba))
+ if (ufshcd_is_link_hibern8(hba)) {
+ err = ufs_mtk_link_set_lpm(hba);
+ if (err)
+ return -EAGAIN;
phy_power_off(host->mphy);
+ ufs_mtk_setup_ref_clk(hba, false);
+ }
return 0;
}
@@ -282,9 +446,40 @@ static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ int err;
- if (ufshcd_is_link_hibern8(hba))
+ if (ufshcd_is_link_hibern8(hba)) {
+ ufs_mtk_setup_ref_clk(hba, true);
phy_power_on(host->mphy);
+ err = ufs_mtk_link_set_hpm(hba);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
+{
+ ufshcd_dump_regs(hba, REG_UFS_REFCLK_CTRL, 0x4, "Ref-Clk Ctrl ");
+
+ ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg ");
+
+ ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL,
+ REG_UFS_REJECT_MON - REG_UFS_MPHYCTRL + 4,
+ "MPHY Ctrl ");
+
+ /* Direct debugging information to REG_MTK_PROBE */
+ ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
+ ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe ");
+}
+
+static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
+{
+ struct ufs_dev_info *dev_info = &hba->dev_info;
+
+ if (dev_info->wmanufacturerid == UFS_VENDOR_SAMSUNG)
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
return 0;
}
@@ -301,8 +496,11 @@ static struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
.setup_clocks = ufs_mtk_setup_clocks,
.link_startup_notify = ufs_mtk_link_startup_notify,
.pwr_change_notify = ufs_mtk_pwr_change_notify,
+ .apply_dev_quirks = ufs_mtk_apply_dev_quirks,
.suspend = ufs_mtk_suspend,
.resume = ufs_mtk_resume,
+ .dbg_register_dump = ufs_mtk_dbg_register_dump,
+ .device_reset = ufs_mtk_device_reset,
};
/**
diff --git a/drivers/scsi/ufs/ufs-mediatek.h b/drivers/scsi/ufs/ufs-mediatek.h
index 19f8c42fe06f..fccdd979d6fb 100644
--- a/drivers/scsi/ufs/ufs-mediatek.h
+++ b/drivers/scsi/ufs/ufs-mediatek.h
@@ -6,6 +6,30 @@
#ifndef _UFS_MEDIATEK_H
#define _UFS_MEDIATEK_H
+#include <linux/bitops.h>
+#include <linux/soc/mediatek/mtk_sip_svc.h>
+
+/*
+ * Vendor specific UFSHCI Registers
+ */
+#define REG_UFS_REFCLK_CTRL 0x144
+#define REG_UFS_EXTREG 0x2100
+#define REG_UFS_MPHYCTRL 0x2200
+#define REG_UFS_REJECT_MON 0x22AC
+#define REG_UFS_DEBUG_SEL 0x22C0
+#define REG_UFS_PROBE 0x22C8
+
+/*
+ * Ref-clk control
+ *
+ * Values for register REG_UFS_REFCLK_CTRL
+ */
+#define REFCLK_RELEASE 0x0
+#define REFCLK_REQUEST BIT(0)
+#define REFCLK_ACK BIT(1)
+
+#define REFCLK_REQ_TIMEOUT_MS 3
+
/*
* Vendor specific pre-defined parameters
*/
@@ -30,6 +54,13 @@
#define VS_UNIPROPOWERDOWNCONTROL 0xD0A8
/*
+ * SiP commands
+ */
+#define MTK_SIP_UFS_CONTROL MTK_SIP_SMC_CMD(0x276)
+#define UFS_MTK_SIP_DEVICE_RESET BIT(1)
+#define UFS_MTK_SIP_REF_CLK_NOTIFICATION BIT(3)
+
+/*
* VS_DEBUGCLOCKENABLE
*/
enum {
@@ -48,6 +79,7 @@ enum {
struct ufs_mtk_host {
struct ufs_hba *hba;
struct phy *mphy;
+ bool ref_clk_enabled;
};
#endif /* !_UFS_MEDIATEK_H */
diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/scsi/ufs/ufs-sysfs.c
index ad2abc96c0f1..dbdf8b01abed 100644
--- a/drivers/scsi/ufs/ufs-sysfs.c
+++ b/drivers/scsi/ufs/ufs-sysfs.c
@@ -118,26 +118,6 @@ static ssize_t spm_target_link_state_show(struct device *dev,
ufs_pm_lvl_states[hba->spm_lvl].link_state));
}
-static void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
-{
- unsigned long flags;
-
- if (!ufshcd_is_auto_hibern8_supported(hba))
- return;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (hba->ahit != ahit)
- hba->ahit = ahit;
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- if (!pm_runtime_suspended(hba->dev)) {
- pm_runtime_get_sync(hba->dev);
- ufshcd_hold(hba, false);
- ufshcd_auto_hibern8_enable(hba);
- ufshcd_release(hba);
- pm_runtime_put(hba->dev);
- }
-}
-
/* Convert Auto-Hibernate Idle Timer register value to microseconds */
static int ufshcd_ahit_to_us(u32 ahit)
{
@@ -733,7 +713,7 @@ static ssize_t _pname##_show(struct device *dev, \
struct scsi_device *sdev = to_scsi_device(dev); \
struct ufs_hba *hba = shost_priv(sdev->host); \
u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun); \
- if (!ufs_is_valid_unit_desc_lun(lun)) \
+ if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun)) \
return -EINVAL; \
return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_##_duname, \
lun, _duname##_DESC_PARAM##_puname, buf, _size); \
diff --git a/drivers/scsi/ufs/ufs-sysfs.h b/drivers/scsi/ufs/ufs-sysfs.h
index e5621e59a432..0f4e750a6748 100644
--- a/drivers/scsi/ufs/ufs-sysfs.h
+++ b/drivers/scsi/ufs/ufs-sysfs.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Copyright (C) 2018 Western Digital Corporation
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018 Western Digital Corporation
*/
#ifndef __UFS_SYSFS_H__
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 3327981ef894..cfe380348bf0 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -63,7 +63,6 @@
#define UFS_UPIU_MAX_UNIT_NUM_ID 0x7F
#define UFS_MAX_LUNS (SCSI_W_LUN_BASE + UFS_UPIU_MAX_UNIT_NUM_ID)
#define UFS_UPIU_WLUN_ID (1 << 7)
-#define UFS_UPIU_MAX_GENERAL_LUN 8
/* Well known logical unit id in LUN field of UPIU */
enum {
@@ -499,9 +498,9 @@ struct ufs_query_res {
#define UFS_VREG_VCC_MAX_UV 3600000 /* uV */
#define UFS_VREG_VCC_1P8_MIN_UV 1700000 /* uV */
#define UFS_VREG_VCC_1P8_MAX_UV 1950000 /* uV */
-#define UFS_VREG_VCCQ_MIN_UV 1100000 /* uV */
-#define UFS_VREG_VCCQ_MAX_UV 1300000 /* uV */
-#define UFS_VREG_VCCQ2_MIN_UV 1650000 /* uV */
+#define UFS_VREG_VCCQ_MIN_UV 1140000 /* uV */
+#define UFS_VREG_VCCQ_MAX_UV 1260000 /* uV */
+#define UFS_VREG_VCCQ2_MIN_UV 1700000 /* uV */
#define UFS_VREG_VCCQ2_MAX_UV 1950000 /* uV */
/*
@@ -530,28 +529,28 @@ struct ufs_dev_info {
bool f_power_on_wp_en;
/* Keeps information if any of the LU is power on write protected */
bool is_lu_power_on_wp;
-};
-
-#define MAX_MODEL_LEN 16
-/**
- * ufs_dev_desc - ufs device details from the device descriptor
- *
- * @wmanufacturerid: card details
- * @model: card model
- */
-struct ufs_dev_desc {
+ /* Maximum number of general LU supported by the UFS device */
+ u8 max_lu_supported;
u16 wmanufacturerid;
+ /*UFS device Product Name */
u8 *model;
};
/**
* ufs_is_valid_unit_desc_lun - checks if the given LUN has a unit descriptor
+ * @dev_info: pointer of instance of struct ufs_dev_info
* @lun: LU number to check
* @return: true if the lun has a matching unit descriptor, false otherwise
*/
-static inline bool ufs_is_valid_unit_desc_lun(u8 lun)
+static inline bool ufs_is_valid_unit_desc_lun(struct ufs_dev_info *dev_info,
+ u8 lun)
{
- return lun == UFS_UPIU_RPMB_WLUN || (lun < UFS_UPIU_MAX_GENERAL_LUN);
+ if (!dev_info || !dev_info->max_lu_supported) {
+ pr_err("Max General LU supported by UFS isn't initialized\n");
+ return false;
+ }
+
+ return lun == UFS_UPIU_RPMB_WLUN || (lun < dev_info->max_lu_supported);
}
#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufs_bsg.c b/drivers/scsi/ufs/ufs_bsg.c
index baeecee35d1e..53dd87628cbe 100644
--- a/drivers/scsi/ufs/ufs_bsg.c
+++ b/drivers/scsi/ufs/ufs_bsg.c
@@ -203,7 +203,7 @@ int ufs_bsg_probe(struct ufs_hba *hba)
bsg_dev->parent = get_device(parent);
bsg_dev->release = ufs_bsg_node_release;
- dev_set_name(bsg_dev, "ufs-bsg");
+ dev_set_name(bsg_dev, "ufs-bsg%u", shost->host_no);
ret = device_add(bsg_dev);
if (ret)
diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
index fe6cad9b2a0d..d0ab147f98d3 100644
--- a/drivers/scsi/ufs/ufs_quirks.h
+++ b/drivers/scsi/ufs/ufs_quirks.h
@@ -22,16 +22,17 @@
* @quirk: device quirk
*/
struct ufs_dev_fix {
- struct ufs_dev_desc card;
+ u16 wmanufacturerid;
+ u8 *model;
unsigned int quirk;
};
-#define END_FIX { { 0 }, 0 }
+#define END_FIX { }
/* add specific device quirk */
#define UFS_FIX(_vendor, _model, _quirk) { \
- .card.wmanufacturerid = (_vendor),\
- .card.model = (_model), \
+ .wmanufacturerid = (_vendor),\
+ .model = (_model), \
.quirk = (_quirk), \
}
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index b5966faf3e98..abd0e6b05f79 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -246,11 +246,10 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba);
static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
static void ufshcd_hba_exit(struct ufs_hba *hba);
-static int ufshcd_probe_hba(struct ufs_hba *hba);
+static int ufshcd_probe_hba(struct ufs_hba *hba, bool async);
static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
bool skip_ref_clk);
static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
-static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
@@ -266,26 +265,18 @@ static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
return tag >= 0 && tag < hba->nutrs;
}
-static inline int ufshcd_enable_irq(struct ufs_hba *hba)
+static inline void ufshcd_enable_irq(struct ufs_hba *hba)
{
- int ret = 0;
-
if (!hba->is_irq_enabled) {
- ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD,
- hba);
- if (ret)
- dev_err(hba->dev, "%s: request_irq failed, ret=%d\n",
- __func__, ret);
+ enable_irq(hba->irq);
hba->is_irq_enabled = true;
}
-
- return ret;
}
static inline void ufshcd_disable_irq(struct ufs_hba *hba)
{
if (hba->is_irq_enabled) {
- free_irq(hba->irq, hba);
+ disable_irq(hba->irq);
hba->is_irq_enabled = false;
}
}
@@ -335,27 +326,27 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba,
u8 opcode = 0;
u32 intr, doorbell;
struct ufshcd_lrb *lrbp = &hba->lrb[tag];
+ struct scsi_cmnd *cmd = lrbp->cmd;
int transfer_len = -1;
if (!trace_ufshcd_command_enabled()) {
/* trace UPIU W/O tracing command */
- if (lrbp->cmd)
+ if (cmd)
ufshcd_add_cmd_upiu_trace(hba, tag, str);
return;
}
- if (lrbp->cmd) { /* data phase exists */
+ if (cmd) { /* data phase exists */
/* trace UPIU also */
ufshcd_add_cmd_upiu_trace(hba, tag, str);
- opcode = (u8)(*lrbp->cmd->cmnd);
+ opcode = cmd->cmnd[0];
if ((opcode == READ_10) || (opcode == WRITE_10)) {
/*
* Currently we only fully trace read(10) and write(10)
* commands
*/
- if (lrbp->cmd->request && lrbp->cmd->request->bio)
- lba =
- lrbp->cmd->request->bio->bi_iter.bi_sector;
+ if (cmd->request && cmd->request->bio)
+ lba = cmd->request->bio->bi_iter.bi_sector;
transfer_len = be32_to_cpu(
lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
}
@@ -393,7 +384,7 @@ static void ufshcd_print_err_hist(struct ufs_hba *hba,
for (i = 0; i < UFS_ERR_REG_HIST_LENGTH; i++) {
int p = (i + err_hist->pos) % UFS_ERR_REG_HIST_LENGTH;
- if (err_hist->reg[p] == 0)
+ if (err_hist->tstamp[p] == 0)
continue;
dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p,
err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
@@ -401,7 +392,7 @@ static void ufshcd_print_err_hist(struct ufs_hba *hba,
}
if (!found)
- dev_err(hba->dev, "No record of %s errors\n", err_name);
+ dev_err(hba->dev, "No record of %s\n", err_name);
}
static void ufshcd_print_host_regs(struct ufs_hba *hba)
@@ -436,8 +427,7 @@ static void ufshcd_print_host_regs(struct ufs_hba *hba)
ufshcd_print_clk_freqs(hba);
- if (hba->vops && hba->vops->dbg_register_dump)
- hba->vops->dbg_register_dump(hba);
+ ufshcd_vops_dbg_register_dump(hba);
}
static
@@ -497,8 +487,8 @@ static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
static void ufshcd_print_host_state(struct ufs_hba *hba)
{
dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
- dev_err(hba->dev, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n",
- hba->lrb_in_use, hba->outstanding_reqs, hba->outstanding_tasks);
+ dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
+ hba->outstanding_reqs, hba->outstanding_tasks);
dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
hba->saved_err, hba->saved_uic_err);
dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
@@ -646,40 +636,6 @@ static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
}
/**
- * ufshcd_get_tm_free_slot - get a free slot for task management request
- * @hba: per adapter instance
- * @free_slot: pointer to variable with available slot value
- *
- * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
- * Returns 0 if free slot is not available, else return 1 with tag value
- * in @free_slot.
- */
-static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
-{
- int tag;
- bool ret = false;
-
- if (!free_slot)
- goto out;
-
- do {
- tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
- if (tag >= hba->nutmrs)
- goto out;
- } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
-
- *free_slot = tag;
- ret = true;
-out:
- return ret;
-}
-
-static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
-{
- clear_bit_unlock(slot, &hba->tm_slots_in_use);
-}
-
-/**
* ufshcd_utrl_clear - Clear a bit in UTRLCLR register
* @hba: per adapter instance
* @pos: position of the bit to be cleared
@@ -1273,6 +1229,24 @@ out:
return ret;
}
+static bool ufshcd_is_busy(struct request *req, void *priv, bool reserved)
+{
+ int *busy = priv;
+
+ WARN_ON_ONCE(reserved);
+ (*busy)++;
+ return false;
+}
+
+/* Whether or not any tag is in use by a request that is in progress. */
+static bool ufshcd_any_tag_in_use(struct ufs_hba *hba)
+{
+ struct request_queue *q = hba->cmd_queue;
+ int busy = 0;
+
+ blk_mq_tagset_busy_iter(q->tag_set, ufshcd_is_busy, &busy);
+ return busy;
+}
static int ufshcd_devfreq_get_dev_status(struct device *dev,
struct devfreq_dev_status *stat)
@@ -1490,6 +1464,8 @@ static void ufshcd_ungate_work(struct work_struct *work)
spin_unlock_irqrestore(hba->host->host_lock, flags);
ufshcd_setup_clocks(hba, true);
+ ufshcd_enable_irq(hba);
+
/* Exit from hibern8 */
if (ufshcd_can_hibern8_during_gating(hba)) {
/* Prevent gating in this path */
@@ -1619,7 +1595,7 @@ static void ufshcd_gate_work(struct work_struct *work)
if (hba->clk_gating.active_reqs
|| hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
- || hba->lrb_in_use || hba->outstanding_tasks
+ || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks
|| hba->active_uic_cmd || hba->uic_async_done)
goto rel_lock;
@@ -1636,6 +1612,8 @@ static void ufshcd_gate_work(struct work_struct *work)
ufshcd_set_link_hibern8(hba);
}
+ ufshcd_disable_irq(hba);
+
if (!ufshcd_is_link_active(hba))
ufshcd_setup_clocks(hba, false);
else
@@ -1673,7 +1651,7 @@ static void __ufshcd_release(struct ufs_hba *hba)
if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
|| hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
- || hba->lrb_in_use || hba->outstanding_tasks
+ || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks
|| hba->active_uic_cmd || hba->uic_async_done
|| ufshcd_eh_in_progress(hba))
return;
@@ -1881,12 +1859,12 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
{
hba->lrb[task_tag].issue_time_stamp = ktime_get();
hba->lrb[task_tag].compl_time_stamp = ktime_set(0, 0);
+ ufshcd_add_command_trace(hba, task_tag, "send");
ufshcd_clk_scaling_start_busy(hba);
__set_bit(task_tag, &hba->outstanding_reqs);
ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
/* Make sure that doorbell is committed immediately */
wmb();
- ufshcd_add_command_trace(hba, task_tag, "send");
}
/**
@@ -2239,6 +2217,7 @@ static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
static
void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
{
+ struct scsi_cmnd *cmd = lrbp->cmd;
struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
unsigned short cdb_len;
@@ -2252,12 +2231,11 @@ void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
/* Total EHS length and Data segment length will be zero */
ucd_req_ptr->header.dword_2 = 0;
- ucd_req_ptr->sc.exp_data_transfer_len =
- cpu_to_be32(lrbp->cmd->sdb.length);
+ ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length);
- cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, UFS_CDB_SIZE);
+ cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE);
memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
- memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
+ memcpy(ucd_req_ptr->sc.cdb, cmd->cmnd, cdb_len);
memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
}
@@ -2443,22 +2421,9 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
hba->req_abort_count = 0;
- /* acquire the tag to make sure device cmds don't use it */
- if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
- /*
- * Dev manage command in progress, requeue the command.
- * Requeuing the command helps in cases where the request *may*
- * find different tag instead of waiting for dev manage command
- * completion.
- */
- err = SCSI_MLQUEUE_HOST_BUSY;
- goto out;
- }
-
err = ufshcd_hold(hba, true);
if (err) {
err = SCSI_MLQUEUE_HOST_BUSY;
- clear_bit_unlock(tag, &hba->lrb_in_use);
goto out;
}
WARN_ON(hba->clk_gating.state != CLKS_ON);
@@ -2479,7 +2444,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
err = ufshcd_map_sg(hba, lrbp);
if (err) {
lrbp->cmd = NULL;
- clear_bit_unlock(tag, &hba->lrb_in_use);
+ ufshcd_release(hba);
goto out;
}
/* Make sure descriptors are ready before ringing the doorbell */
@@ -2627,44 +2592,6 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
}
/**
- * ufshcd_get_dev_cmd_tag - Get device management command tag
- * @hba: per-adapter instance
- * @tag_out: pointer to variable with available slot value
- *
- * Get a free slot and lock it until device management command
- * completes.
- *
- * Returns false if free slot is unavailable for locking, else
- * return true with tag value in @tag.
- */
-static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
-{
- int tag;
- bool ret = false;
- unsigned long tmp;
-
- if (!tag_out)
- goto out;
-
- do {
- tmp = ~hba->lrb_in_use;
- tag = find_last_bit(&tmp, hba->nutrs);
- if (tag >= hba->nutrs)
- goto out;
- } while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
-
- *tag_out = tag;
- ret = true;
-out:
- return ret;
-}
-
-static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
-{
- clear_bit_unlock(tag, &hba->lrb_in_use);
-}
-
-/**
* ufshcd_exec_dev_cmd - API for sending device management requests
* @hba: UFS hba
* @cmd_type: specifies the type (NOP, Query...)
@@ -2676,6 +2603,8 @@ static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
enum dev_cmd_type cmd_type, int timeout)
{
+ struct request_queue *q = hba->cmd_queue;
+ struct request *req;
struct ufshcd_lrb *lrbp;
int err;
int tag;
@@ -2689,7 +2618,13 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
* Even though we use wait_event() which sleeps indefinitely,
* the maximum wait time is bounded by SCSI request timeout.
*/
- wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
+ req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
+ if (IS_ERR(req)) {
+ err = PTR_ERR(req);
+ goto out_unlock;
+ }
+ tag = req->tag;
+ WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
init_completion(&wait);
lrbp = &hba->lrb[tag];
@@ -2714,8 +2649,8 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
err ? "query_complete_err" : "query_complete");
out_put_tag:
- ufshcd_put_dev_cmd_tag(hba, tag);
- wake_up(&hba->dev_cmd.tag_wq);
+ blk_put_request(req);
+out_unlock:
up_read(&hba->clk_scaling_lock);
return err;
}
@@ -2918,7 +2853,7 @@ static int ufshcd_query_attr_retry(struct ufs_hba *hba,
int ret = 0;
u32 retries;
- for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
+ for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
ret = ufshcd_query_attr(hba, opcode, idn, index,
selector, attr_val);
if (ret)
@@ -3210,17 +3145,6 @@ static inline int ufshcd_read_desc(struct ufs_hba *hba,
return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
}
-static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
- u8 *buf,
- u32 size)
-{
- return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
-}
-
-static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
-{
- return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
-}
/**
* struct uc_string_id - unicode string
@@ -3345,7 +3269,7 @@ static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
* Unit descriptors are only available for general purpose LUs (LUN id
* from 0 to 7) and RPMB Well known LU.
*/
- if (!ufs_is_valid_unit_desc_lun(lun))
+ if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun))
return -EOPNOTSUPP;
return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
@@ -3929,7 +3853,7 @@ out:
return ret;
}
-static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
+int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
{
struct uic_command uic_cmd = {0};
int ret;
@@ -3955,6 +3879,25 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
return ret;
}
+EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit);
+
+void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
+{
+ unsigned long flags;
+
+ if (!(hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT))
+ return;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (hba->ahit == ahit)
+ goto out_unlock;
+ hba->ahit = ahit;
+ if (!pm_runtime_suspended(hba->dev))
+ ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
+out_unlock:
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
+EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update);
void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
{
@@ -4095,6 +4038,26 @@ static int ufshcd_change_power_mode(struct ufs_hba *hba,
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
pwr_mode->hs_rate);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
+ DL_FC0ProtectionTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
+ DL_TC0ReplayTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
+ DL_AFC0ReqTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
+ DL_FC1ProtectionTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
+ DL_TC1ReplayTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
+ DL_AFC1ReqTimeOutVal_Default);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
+ DL_FC0ProtectionTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
+ DL_TC0ReplayTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
+ DL_AFC0ReqTimeOutVal_Default);
+
ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
| pwr_mode->pwr_tx);
@@ -4188,7 +4151,7 @@ out:
*
* Returns 0 on success, non-zero value on failure
*/
-static int ufshcd_make_hba_operational(struct ufs_hba *hba)
+int ufshcd_make_hba_operational(struct ufs_hba *hba)
{
int err = 0;
u32 reg;
@@ -4234,6 +4197,7 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba)
out:
return err;
}
+EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational);
/**
* ufshcd_hba_stop - Send controller to reset state
@@ -4311,7 +4275,7 @@ static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
return 0;
}
-static int ufshcd_hba_enable(struct ufs_hba *hba)
+int ufshcd_hba_enable(struct ufs_hba *hba)
{
int ret;
@@ -4336,6 +4300,8 @@ static int ufshcd_hba_enable(struct ufs_hba *hba)
return ret;
}
+EXPORT_SYMBOL_GPL(ufshcd_hba_enable);
+
static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
{
int tx_lanes, i, err = 0;
@@ -4372,13 +4338,14 @@ static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
return ufshcd_disable_tx_lcc(hba, true);
}
-static void ufshcd_update_reg_hist(struct ufs_err_reg_hist *reg_hist,
- u32 reg)
+void ufshcd_update_reg_hist(struct ufs_err_reg_hist *reg_hist,
+ u32 reg)
{
reg_hist->reg[reg_hist->pos] = reg;
reg_hist->tstamp[reg_hist->pos] = ktime_get();
reg_hist->pos = (reg_hist->pos + 1) % UFS_ERR_REG_HIST_LENGTH;
}
+EXPORT_SYMBOL_GPL(ufshcd_update_reg_hist);
/**
* ufshcd_link_startup - Initialize unipro link startup
@@ -4561,7 +4528,7 @@ static int ufshcd_get_lu_wp(struct ufs_hba *hba,
* protected so skip reading bLUWriteProtect parameter for
* it. For other W-LUs, UNIT DESCRIPTOR is not available.
*/
- else if (lun >= UFS_UPIU_MAX_GENERAL_LUN)
+ else if (lun >= hba->dev_info.max_lu_supported)
ret = -ENOTSUPP;
else
ret = ufshcd_read_unit_desc_param(hba,
@@ -4608,6 +4575,9 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev)
/* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
sdev->use_10_for_ms = 1;
+ /* DBD field should be set to 1 in mode sense(10) */
+ sdev->set_dbd_for_ms = 1;
+
/* allow SCSI layer to restart the device in case of errors */
sdev->allow_restart = 1;
@@ -4799,7 +4769,7 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
break;
} /* end of switch */
- if (host_byte(result) != DID_OK)
+ if ((host_byte(result) != DID_OK) && !hba->silence_err_logs)
ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
return result;
}
@@ -4856,12 +4826,13 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
cmd->result = result;
/* Mark completed command as NULL in LRB */
lrbp->cmd = NULL;
- clear_bit_unlock(index, &hba->lrb_in_use);
+ lrbp->compl_time_stamp = ktime_get();
/* Do not touch lrbp after scsi done */
cmd->scsi_done(cmd);
__ufshcd_release(hba);
} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
+ lrbp->compl_time_stamp = ktime_get();
if (hba->dev_cmd.complete) {
ufshcd_add_command_trace(hba, index,
"dev_complete");
@@ -4870,17 +4841,12 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
}
if (ufshcd_is_clkscaling_supported(hba))
hba->clk_scaling.active_reqs--;
-
- lrbp->compl_time_stamp = ktime_get();
}
/* clear corresponding bits of completed commands */
hba->outstanding_reqs ^= completed_reqs;
ufshcd_clk_scaling_update_busy(hba);
-
- /* we might have free'd some tags above */
- wake_up(&hba->dev_cmd.tag_wq);
}
/**
@@ -5053,6 +5019,7 @@ static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
hba->auto_bkops_enabled = false;
trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
+ hba->is_urgent_bkops_lvl_checked = false;
out:
return err;
}
@@ -5077,6 +5044,7 @@ static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
ufshcd_disable_auto_bkops(hba);
}
+ hba->is_urgent_bkops_lvl_checked = false;
}
static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
@@ -5123,6 +5091,7 @@ static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
err = ufshcd_enable_auto_bkops(hba);
else
err = ufshcd_disable_auto_bkops(hba);
+ hba->urgent_bkops_lvl = curr_status;
out:
return err;
}
@@ -5200,7 +5169,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
hba = container_of(work, struct ufs_hba, eeh_work);
pm_runtime_get_sync(hba->dev);
- scsi_block_requests(hba->host);
+ ufshcd_scsi_block_requests(hba);
err = ufshcd_get_ee_status(hba, &status);
if (err) {
dev_err(hba->dev, "%s: failed to get exception status %d\n",
@@ -5214,7 +5183,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
ufshcd_bkops_exception_event_handler(hba);
out:
- scsi_unblock_requests(hba->host);
+ ufshcd_scsi_unblock_requests(hba);
pm_runtime_put_sync(hba->dev);
return;
}
@@ -5348,8 +5317,8 @@ static void ufshcd_err_handler(struct work_struct *work)
/*
* if host reset is required then skip clearing the pending
- * transfers forcefully because they will automatically get
- * cleared after link startup.
+ * transfers forcefully because they will get cleared during
+ * host reset and restore
*/
if (needs_reset)
goto skip_pending_xfer_clear;
@@ -5603,6 +5572,27 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
return retval;
}
+struct ctm_info {
+ struct ufs_hba *hba;
+ unsigned long pending;
+ unsigned int ncpl;
+};
+
+static bool ufshcd_compl_tm(struct request *req, void *priv, bool reserved)
+{
+ struct ctm_info *const ci = priv;
+ struct completion *c;
+
+ WARN_ON_ONCE(reserved);
+ if (test_bit(req->tag, &ci->pending))
+ return true;
+ ci->ncpl++;
+ c = req->end_io_data;
+ if (c)
+ complete(c);
+ return true;
+}
+
/**
* ufshcd_tmc_handler - handle task management function completion
* @hba: per adapter instance
@@ -5613,16 +5603,14 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
*/
static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
{
- u32 tm_doorbell;
+ struct request_queue *q = hba->tmf_queue;
+ struct ctm_info ci = {
+ .hba = hba,
+ .pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL),
+ };
- tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
- hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
- if (hba->tm_condition) {
- wake_up(&hba->tm_wq);
- return IRQ_HANDLED;
- } else {
- return IRQ_NONE;
- }
+ blk_mq_tagset_busy_iter(q->tag_set, ufshcd_compl_tm, &ci);
+ return ci.ncpl ? IRQ_HANDLED : IRQ_NONE;
}
/**
@@ -5728,7 +5716,10 @@ out:
static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
struct utp_task_req_desc *treq, u8 tm_function)
{
+ struct request_queue *q = hba->tmf_queue;
struct Scsi_Host *host = hba->host;
+ DECLARE_COMPLETION_ONSTACK(wait);
+ struct request *req;
unsigned long flags;
int free_slot, task_tag, err;
@@ -5737,7 +5728,10 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
* Even though we use wait_event() which sleeps indefinitely,
* the maximum wait time is bounded by %TM_CMD_TIMEOUT.
*/
- wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
+ req = blk_get_request(q, REQ_OP_DRV_OUT, BLK_MQ_REQ_RESERVED);
+ req->end_io_data = &wait;
+ free_slot = req->tag;
+ WARN_ON_ONCE(free_slot < 0 || free_slot >= hba->nutmrs);
ufshcd_hold(hba, false);
spin_lock_irqsave(host->host_lock, flags);
@@ -5763,10 +5757,14 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_send");
/* wait until the task management command is completed */
- err = wait_event_timeout(hba->tm_wq,
- test_bit(free_slot, &hba->tm_condition),
+ err = wait_for_completion_io_timeout(&wait,
msecs_to_jiffies(TM_CMD_TIMEOUT));
if (!err) {
+ /*
+ * Make sure that ufshcd_compl_tm() does not trigger a
+ * use-after-free.
+ */
+ req->end_io_data = NULL;
ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err");
dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
__func__, tm_function);
@@ -5785,9 +5783,7 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
__clear_bit(free_slot, &hba->outstanding_tasks);
spin_unlock_irqrestore(hba->host->host_lock, flags);
- clear_bit(free_slot, &hba->tm_condition);
- ufshcd_put_tm_slot(hba, free_slot);
- wake_up(&hba->tm_tag_wq);
+ blk_put_request(req);
ufshcd_release(hba);
return err;
@@ -5863,6 +5859,8 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
enum dev_cmd_type cmd_type,
enum query_opcode desc_op)
{
+ struct request_queue *q = hba->cmd_queue;
+ struct request *req;
struct ufshcd_lrb *lrbp;
int err = 0;
int tag;
@@ -5872,7 +5870,13 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
down_read(&hba->clk_scaling_lock);
- wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
+ req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
+ if (IS_ERR(req)) {
+ err = PTR_ERR(req);
+ goto out_unlock;
+ }
+ tag = req->tag;
+ WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
init_completion(&wait);
lrbp = &hba->lrb[tag];
@@ -5948,8 +5952,8 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
}
}
- ufshcd_put_dev_cmd_tag(hba, tag);
- wake_up(&hba->dev_cmd.tag_wq);
+ blk_put_request(req);
+out_unlock:
up_read(&hba->clk_scaling_lock);
return err;
}
@@ -6244,9 +6248,6 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
hba->lrb[tag].cmd = NULL;
spin_unlock_irqrestore(host->host_lock, flags);
- clear_bit_unlock(tag, &hba->lrb_in_use);
- wake_up(&hba->dev_cmd.tag_wq);
-
out:
if (!err) {
err = SUCCESS;
@@ -6279,9 +6280,15 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
int err;
unsigned long flags;
- /* Reset the host controller */
+ /*
+ * Stop the host controller and complete the requests
+ * cleared by h/w
+ */
spin_lock_irqsave(hba->host->host_lock, flags);
ufshcd_hba_stop(hba, false);
+ hba->silence_err_logs = true;
+ ufshcd_complete_requests(hba);
+ hba->silence_err_logs = false;
spin_unlock_irqrestore(hba->host->host_lock, flags);
/* scale up clocks to max frequency before full reinitialization */
@@ -6292,7 +6299,7 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
goto out;
/* Establish the link again and restore the device */
- err = ufshcd_probe_hba(hba);
+ err = ufshcd_probe_hba(hba, false);
if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
err = -EIO;
@@ -6315,7 +6322,6 @@ out:
static int ufshcd_reset_and_restore(struct ufs_hba *hba)
{
int err = 0;
- unsigned long flags;
int retries = MAX_HOST_RESET_RETRIES;
do {
@@ -6325,15 +6331,6 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba)
err = ufshcd_host_reset_and_restore(hba);
} while (err && --retries);
- /*
- * After reset the door-bell might be cleared, complete
- * outstanding requests in s/w here.
- */
- spin_lock_irqsave(hba->host->host_lock, flags);
- ufshcd_transfer_req_compl(hba);
- ufshcd_tmc_handler(hba);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
return err;
}
@@ -6488,7 +6485,8 @@ static void ufshcd_init_icc_levels(struct ufs_hba *hba)
if (!desc_buf)
return;
- ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
+ ret = ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0,
+ desc_buf, buff_len);
if (ret) {
dev_err(hba->dev,
"%s: Failed reading power descriptor.len = %d ret = %d",
@@ -6578,16 +6576,13 @@ out:
return ret;
}
-static int ufs_get_device_desc(struct ufs_hba *hba,
- struct ufs_dev_desc *dev_desc)
+static int ufs_get_device_desc(struct ufs_hba *hba)
{
int err;
size_t buff_len;
u8 model_index;
u8 *desc_buf;
-
- if (!dev_desc)
- return -EINVAL;
+ struct ufs_dev_info *dev_info = &hba->dev_info;
buff_len = max_t(size_t, hba->desc_size.dev_desc,
QUERY_DESC_MAX_SIZE + 1);
@@ -6597,7 +6592,8 @@ static int ufs_get_device_desc(struct ufs_hba *hba,
goto out;
}
- err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc);
+ err = ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, desc_buf,
+ hba->desc_size.dev_desc);
if (err) {
dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
__func__, err);
@@ -6608,12 +6604,12 @@ static int ufs_get_device_desc(struct ufs_hba *hba,
* getting vendor (manufacturerID) and Bank Index in big endian
* format
*/
- dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
+ dev_info->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
err = ufshcd_read_string_desc(hba, model_index,
- &dev_desc->model, SD_ASCII_STD);
+ &dev_info->model, SD_ASCII_STD);
if (err < 0) {
dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
__func__, err);
@@ -6631,23 +6627,25 @@ out:
return err;
}
-static void ufs_put_device_desc(struct ufs_dev_desc *dev_desc)
+static void ufs_put_device_desc(struct ufs_hba *hba)
{
- kfree(dev_desc->model);
- dev_desc->model = NULL;
+ struct ufs_dev_info *dev_info = &hba->dev_info;
+
+ kfree(dev_info->model);
+ dev_info->model = NULL;
}
-static void ufs_fixup_device_setup(struct ufs_hba *hba,
- struct ufs_dev_desc *dev_desc)
+static void ufs_fixup_device_setup(struct ufs_hba *hba)
{
struct ufs_dev_fix *f;
+ struct ufs_dev_info *dev_info = &hba->dev_info;
for (f = ufs_fixups; f->quirk; f++) {
- if ((f->card.wmanufacturerid == dev_desc->wmanufacturerid ||
- f->card.wmanufacturerid == UFS_ANY_VENDOR) &&
- ((dev_desc->model &&
- STR_PRFX_EQUAL(f->card.model, dev_desc->model)) ||
- !strcmp(f->card.model, UFS_ANY_MODEL)))
+ if ((f->wmanufacturerid == dev_info->wmanufacturerid ||
+ f->wmanufacturerid == UFS_ANY_VENDOR) &&
+ ((dev_info->model &&
+ STR_PRFX_EQUAL(f->model, dev_info->model)) ||
+ !strcmp(f->model, UFS_ANY_MODEL)))
hba->dev_quirks |= f->quirk;
}
}
@@ -6863,6 +6861,37 @@ static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
}
+static int ufshcd_device_geo_params_init(struct ufs_hba *hba)
+{
+ int err;
+ size_t buff_len;
+ u8 *desc_buf;
+
+ buff_len = hba->desc_size.geom_desc;
+ desc_buf = kmalloc(buff_len, GFP_KERNEL);
+ if (!desc_buf) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = ufshcd_read_desc(hba, QUERY_DESC_IDN_GEOMETRY, 0,
+ desc_buf, buff_len);
+ if (err) {
+ dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 1)
+ hba->dev_info.max_lu_supported = 32;
+ else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0)
+ hba->dev_info.max_lu_supported = 8;
+
+out:
+ kfree(desc_buf);
+ return err;
+}
+
static struct ufs_ref_clk ufs_ref_clk_freqs[] = {
{19200000, REF_CLK_FREQ_19_2_MHZ},
{26000000, REF_CLK_FREQ_26_MHZ},
@@ -6931,15 +6960,92 @@ out:
return err;
}
+static int ufshcd_device_params_init(struct ufs_hba *hba)
+{
+ bool flag;
+ int ret;
+
+ /* Clear any previous UFS device information */
+ memset(&hba->dev_info, 0, sizeof(hba->dev_info));
+
+ /* Init check for device descriptor sizes */
+ ufshcd_init_desc_sizes(hba);
+
+ /* Init UFS geometry descriptor related parameters */
+ ret = ufshcd_device_geo_params_init(hba);
+ if (ret)
+ goto out;
+
+ /* Check and apply UFS device quirks */
+ ret = ufs_get_device_desc(hba);
+ if (ret) {
+ dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
+ __func__, ret);
+ goto out;
+ }
+
+ ufs_fixup_device_setup(hba);
+
+ if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
+ QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
+ hba->dev_info.f_power_on_wp_en = flag;
+
+ /* Probe maximum power mode co-supported by both UFS host and device */
+ if (ufshcd_get_max_pwr_mode(hba))
+ dev_err(hba->dev,
+ "%s: Failed getting max supported power mode\n",
+ __func__);
+out:
+ return ret;
+}
+
+/**
+ * ufshcd_add_lus - probe and add UFS logical units
+ * @hba: per-adapter instance
+ */
+static int ufshcd_add_lus(struct ufs_hba *hba)
+{
+ int ret;
+
+ ufshcd_init_icc_levels(hba);
+
+ /* Add required well known logical units to scsi mid layer */
+ ret = ufshcd_scsi_add_wlus(hba);
+ if (ret)
+ goto out;
+
+ /* Initialize devfreq after UFS device is detected */
+ if (ufshcd_is_clkscaling_supported(hba)) {
+ memcpy(&hba->clk_scaling.saved_pwr_info.info,
+ &hba->pwr_info,
+ sizeof(struct ufs_pa_layer_attr));
+ hba->clk_scaling.saved_pwr_info.is_valid = true;
+ if (!hba->devfreq) {
+ ret = ufshcd_devfreq_init(hba);
+ if (ret)
+ goto out;
+ }
+
+ hba->clk_scaling.is_allowed = true;
+ }
+
+ ufs_bsg_probe(hba);
+ scsi_scan_host(hba->host);
+ pm_runtime_put_sync(hba->dev);
+
+out:
+ return ret;
+}
+
/**
* ufshcd_probe_hba - probe hba to detect device and initialize
* @hba: per-adapter instance
+ * @async: asynchronous execution or not
*
* Execute link-startup and verify device initialization
*/
-static int ufshcd_probe_hba(struct ufs_hba *hba)
+static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
{
- struct ufs_dev_desc card = {0};
int ret;
ktime_t start = ktime_get();
@@ -6957,27 +7063,26 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
/* UniPro link is active now */
ufshcd_set_link_active(hba);
+ /* Verify device initialization by sending NOP OUT UPIU */
ret = ufshcd_verify_dev_init(hba);
if (ret)
goto out;
+ /* Initiate UFS initialization, and waiting until completion */
ret = ufshcd_complete_dev_init(hba);
if (ret)
goto out;
- /* Init check for device descriptor sizes */
- ufshcd_init_desc_sizes(hba);
-
- ret = ufs_get_device_desc(hba, &card);
- if (ret) {
- dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
- __func__, ret);
- goto out;
+ /*
+ * Initialize UFS device parameters used by driver, these
+ * parameters are associated with UFS descriptors.
+ */
+ if (async) {
+ ret = ufshcd_device_params_init(hba);
+ if (ret)
+ goto out;
}
- ufs_fixup_device_setup(hba, &card);
- ufs_put_device_desc(&card);
-
ufshcd_tune_unipro_params(hba);
/* UFS device is also active now */
@@ -6985,11 +7090,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
ufshcd_force_reset_auto_bkops(hba);
hba->wlun_dev_clr_ua = true;
- if (ufshcd_get_max_pwr_mode(hba)) {
- dev_err(hba->dev,
- "%s: Failed getting max supported power mode\n",
- __func__);
- } else {
+ /* Gear up to HS gear if supported */
+ if (hba->max_pwr_info.is_valid) {
/*
* Set the right value to bRefClkFreq before attempting to
* switch to HS gears.
@@ -7010,59 +7112,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
/* Enable Auto-Hibernate if configured */
ufshcd_auto_hibern8_enable(hba);
- /*
- * If we are in error handling context or in power management callbacks
- * context, no need to scan the host
- */
- if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
- bool flag;
-
- /* clear any previous UFS device information */
- memset(&hba->dev_info, 0, sizeof(hba->dev_info));
- if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
- QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
- hba->dev_info.f_power_on_wp_en = flag;
-
- if (!hba->is_init_prefetch)
- ufshcd_init_icc_levels(hba);
-
- /* Add required well known logical units to scsi mid layer */
- if (ufshcd_scsi_add_wlus(hba))
- goto out;
-
- /* Initialize devfreq after UFS device is detected */
- if (ufshcd_is_clkscaling_supported(hba)) {
- memcpy(&hba->clk_scaling.saved_pwr_info.info,
- &hba->pwr_info,
- sizeof(struct ufs_pa_layer_attr));
- hba->clk_scaling.saved_pwr_info.is_valid = true;
- if (!hba->devfreq) {
- ret = ufshcd_devfreq_init(hba);
- if (ret)
- goto out;
- }
- hba->clk_scaling.is_allowed = true;
- }
-
- ufs_bsg_probe(hba);
-
- scsi_scan_host(hba->host);
- pm_runtime_put_sync(hba->dev);
- }
-
- if (!hba->is_init_prefetch)
- hba->is_init_prefetch = true;
-
out:
- /*
- * If we failed to initialize the device or the device is not
- * present, turn off the power/clocks etc.
- */
- if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
- pm_runtime_put_sync(hba->dev);
- ufshcd_exit_clk_scaling(hba);
- ufshcd_hba_exit(hba);
- }
trace_ufshcd_init(dev_name(hba->dev), ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
@@ -7078,43 +7128,25 @@ out:
static void ufshcd_async_scan(void *data, async_cookie_t cookie)
{
struct ufs_hba *hba = (struct ufs_hba *)data;
+ int ret;
- ufshcd_probe_hba(hba);
-}
-
-static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
-{
- unsigned long flags;
- struct Scsi_Host *host;
- struct ufs_hba *hba;
- int index;
- bool found = false;
-
- if (!scmd || !scmd->device || !scmd->device->host)
- return BLK_EH_DONE;
-
- host = scmd->device->host;
- hba = shost_priv(host);
- if (!hba)
- return BLK_EH_DONE;
-
- spin_lock_irqsave(host->host_lock, flags);
-
- for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
- if (hba->lrb[index].cmd == scmd) {
- found = true;
- break;
- }
- }
-
- spin_unlock_irqrestore(host->host_lock, flags);
+ /* Initialize hba, detect and initialize UFS device */
+ ret = ufshcd_probe_hba(hba, true);
+ if (ret)
+ goto out;
+ /* Probe and add UFS logical units */
+ ret = ufshcd_add_lus(hba);
+out:
/*
- * Bypass SCSI error handling and reset the block layer timer if this
- * SCSI command was not actually dispatched to UFS driver, otherwise
- * let SCSI layer handle the error as usual.
+ * If we failed to initialize the device or the device is not
+ * present, turn off the power/clocks etc.
*/
- return found ? BLK_EH_DONE : BLK_EH_RESET_TIMER;
+ if (ret) {
+ pm_runtime_put_sync(hba->dev);
+ ufshcd_exit_clk_scaling(hba);
+ ufshcd_hba_exit(hba);
+ }
}
static const struct attribute_group *ufshcd_driver_groups[] = {
@@ -7135,7 +7167,6 @@ static struct scsi_host_template ufshcd_driver_template = {
.eh_abort_handler = ufshcd_abort,
.eh_device_reset_handler = ufshcd_eh_device_reset_handler,
.eh_host_reset_handler = ufshcd_eh_host_reset_handler,
- .eh_timed_out = ufshcd_eh_timed_out,
.this_id = -1,
.sg_tablesize = SG_ALL,
.cmd_per_lun = UFSHCD_CMD_PER_LUN,
@@ -7574,6 +7605,7 @@ static void ufshcd_hba_exit(struct ufs_hba *hba)
ufshcd_setup_clocks(hba, false);
ufshcd_setup_hba_vreg(hba, false);
hba->is_powered = false;
+ ufs_put_device_desc(hba);
}
}
@@ -7701,8 +7733,7 @@ static int ufshcd_link_state_transition(struct ufs_hba *hba,
* turning off the link would also turn off the device.
*/
else if ((req_link_state == UIC_LINK_OFF_STATE) &&
- (!check_for_bkops || (check_for_bkops &&
- !hba->auto_bkops_enabled))) {
+ (!check_for_bkops || !hba->auto_bkops_enabled)) {
/*
* Let's make sure that link is in low power mode, we are doing
* this currently by putting the link in Hibern8. Otherway to
@@ -7908,6 +7939,11 @@ disable_clks:
ret = ufshcd_vops_suspend(hba, pm_op);
if (ret)
goto set_link_active;
+ /*
+ * Disable the host irq as host controller as there won't be any
+ * host controller transaction expected till resume.
+ */
+ ufshcd_disable_irq(hba);
if (!ufshcd_is_link_active(hba))
ufshcd_setup_clocks(hba, false);
@@ -7917,11 +7953,7 @@ disable_clks:
hba->clk_gating.state = CLKS_OFF;
trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
- /*
- * Disable the host irq as host controller as there won't be any
- * host controller transaction expected till resume.
- */
- ufshcd_disable_irq(hba);
+
/* Put the host controller in low power mode if possible */
ufshcd_hba_vreg_set_lpm(hba);
goto out;
@@ -7974,9 +8006,7 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
goto out;
/* enable the host irq as host controller would be active soon */
- ret = ufshcd_enable_irq(hba);
- if (ret)
- goto disable_irq_and_vops_clks;
+ ufshcd_enable_irq(hba);
ret = ufshcd_vreg_set_hpm(hba);
if (ret)
@@ -8250,6 +8280,9 @@ void ufshcd_remove(struct ufs_hba *hba)
{
ufs_bsg_remove(hba);
ufs_sysfs_remove_nodes(hba->dev);
+ blk_cleanup_queue(hba->tmf_queue);
+ blk_mq_free_tag_set(&hba->tmf_tag_set);
+ blk_cleanup_queue(hba->cmd_queue);
scsi_remove_host(hba->host);
/* disable interrupts */
ufshcd_disable_intr(hba, hba->intr_mask);
@@ -8328,6 +8361,18 @@ out_error:
}
EXPORT_SYMBOL(ufshcd_alloc_host);
+/* This function exists because blk_mq_alloc_tag_set() requires this. */
+static blk_status_t ufshcd_queue_tmf(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *qd)
+{
+ WARN_ON_ONCE(true);
+ return BLK_STS_NOTSUPP;
+}
+
+static const struct blk_mq_ops ufshcd_tmf_ops = {
+ .queue_rq = ufshcd_queue_tmf,
+};
+
/**
* ufshcd_init - Driver initialization routine
* @hba: per-adapter instance
@@ -8397,10 +8442,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
hba->max_pwr_info.is_valid = false;
- /* Initailize wait queue for task management */
- init_waitqueue_head(&hba->tm_wq);
- init_waitqueue_head(&hba->tm_tag_wq);
-
/* Initialize work queues */
INIT_WORK(&hba->eh_work, ufshcd_err_handler);
INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
@@ -8413,9 +8454,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
init_rwsem(&hba->clk_scaling_lock);
- /* Initialize device management tag acquire wait queue */
- init_waitqueue_head(&hba->dev_cmd.tag_wq);
-
ufshcd_init_clk_gating(hba);
ufshcd_init_clk_scaling(hba);
@@ -8449,6 +8487,27 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
goto exit_gating;
}
+ hba->cmd_queue = blk_mq_init_queue(&hba->host->tag_set);
+ if (IS_ERR(hba->cmd_queue)) {
+ err = PTR_ERR(hba->cmd_queue);
+ goto out_remove_scsi_host;
+ }
+
+ hba->tmf_tag_set = (struct blk_mq_tag_set) {
+ .nr_hw_queues = 1,
+ .queue_depth = hba->nutmrs,
+ .ops = &ufshcd_tmf_ops,
+ .flags = BLK_MQ_F_NO_SCHED,
+ };
+ err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
+ if (err < 0)
+ goto free_cmd_queue;
+ hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set);
+ if (IS_ERR(hba->tmf_queue)) {
+ err = PTR_ERR(hba->tmf_queue);
+ goto free_tmf_tag_set;
+ }
+
/* Reset the attached device */
ufshcd_vops_device_reset(hba);
@@ -8458,7 +8517,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
dev_err(hba->dev, "Host controller enable failed\n");
ufshcd_print_host_regs(hba);
ufshcd_print_host_state(hba);
- goto out_remove_scsi_host;
+ goto free_tmf_queue;
}
/*
@@ -8495,6 +8554,12 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
return 0;
+free_tmf_queue:
+ blk_cleanup_queue(hba->tmf_queue);
+free_tmf_tag_set:
+ blk_mq_free_tag_set(&hba->tmf_tag_set);
+free_cmd_queue:
+ blk_cleanup_queue(hba->cmd_queue);
out_remove_scsi_host:
scsi_remove_host(hba->host);
exit_gating:
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 2740f6941ec6..2ae6c7c8528c 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -212,13 +212,11 @@ struct ufs_query {
* @type: device management command type - Query, NOP OUT
* @lock: lock to allow one command at a time
* @complete: internal commands completion
- * @tag_wq: wait queue until free command slot is available
*/
struct ufs_dev_cmd {
enum dev_cmd_type type;
struct mutex lock;
struct completion *complete;
- wait_queue_head_t tag_wq;
struct ufs_query query;
};
@@ -322,7 +320,7 @@ struct ufs_hba_variant_ops {
void (*setup_task_mgmt)(struct ufs_hba *, int, u8);
void (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme,
enum ufs_notify_change_status);
- int (*apply_dev_quirks)(struct ufs_hba *);
+ int (*apply_dev_quirks)(struct ufs_hba *hba);
int (*suspend)(struct ufs_hba *, enum ufs_pm_op);
int (*resume)(struct ufs_hba *, enum ufs_pm_op);
void (*dbg_register_dump)(struct ufs_hba *hba);
@@ -483,7 +481,7 @@ struct ufs_stats {
* @host: Scsi_Host instance of the driver
* @dev: device handle
* @lrb: local reference block
- * @lrb_in_use: lrb in use
+ * @cmd_queue: Used to allocate command tags from hba->host->tag_set.
* @outstanding_tasks: Bits representing outstanding task requests
* @outstanding_reqs: Bits representing outstanding transfer requests
* @capabilities: UFS Controller Capabilities
@@ -495,17 +493,14 @@ struct ufs_stats {
* @irq: Irq number of the controller
* @active_uic_cmd: handle of active UIC command
* @uic_cmd_mutex: mutex for uic command
- * @tm_wq: wait queue for task management
- * @tm_tag_wq: wait queue for free task management slots
- * @tm_slots_in_use: bit map of task management request slots in use
+ * @tmf_tag_set: TMF tag set.
+ * @tmf_queue: Used to allocate TMF tags.
* @pwr_done: completion for power mode change
- * @tm_condition: condition variable for task management
* @ufshcd_state: UFSHCD states
* @eh_flags: Error handling flags
* @intr_mask: Interrupt Mask Bits
* @ee_ctrl_mask: Exception event control mask
* @is_powered: flag to check if HBA is powered
- * @is_init_prefetch: flag to check if data was pre-fetched in initialization
* @init_prefetch_data: data pre-fetched during initialization
* @eh_work: Worker to handle UFS errors that require s/w attention
* @eeh_work: Worker to handle exception events
@@ -513,6 +508,7 @@ struct ufs_stats {
* @uic_error: UFS interconnect layer error status
* @saved_err: sticky error mask
* @saved_uic_err: sticky UIC error mask
+ * @silence_err_logs: flag to silence error logs
* @dev_cmd: ufs device management command information
* @last_dme_cmd_tstamp: time stamp of the last completed DME command
* @auto_bkops_enabled: to track whether bkops is enabled in device
@@ -541,6 +537,7 @@ struct ufs_hba {
struct Scsi_Host *host;
struct device *dev;
+ struct request_queue *cmd_queue;
/*
* This field is to keep a reference to "scsi_device" corresponding to
* "UFS device" W-LU.
@@ -561,7 +558,6 @@ struct ufs_hba {
u32 ahit;
struct ufshcd_lrb *lrb;
- unsigned long lrb_in_use;
unsigned long outstanding_tasks;
unsigned long outstanding_reqs;
@@ -643,10 +639,8 @@ struct ufs_hba {
/* Device deviations from standard UFS device spec. */
unsigned int dev_quirks;
- wait_queue_head_t tm_wq;
- wait_queue_head_t tm_tag_wq;
- unsigned long tm_condition;
- unsigned long tm_slots_in_use;
+ struct blk_mq_tag_set tmf_tag_set;
+ struct request_queue *tmf_queue;
struct uic_command *active_uic_cmd;
struct mutex uic_cmd_mutex;
@@ -657,7 +651,6 @@ struct ufs_hba {
u32 intr_mask;
u16 ee_ctrl_mask;
bool is_powered;
- bool is_init_prefetch;
struct ufs_init_prefetch init_prefetch_data;
/* Work Queues */
@@ -670,6 +663,7 @@ struct ufs_hba {
u32 saved_err;
u32 saved_uic_err;
struct ufs_stats ufs_stats;
+ bool silence_err_logs;
/* Device management request data */
struct ufs_dev_cmd dev_cmd;
@@ -803,12 +797,17 @@ static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg)
int ufshcd_alloc_host(struct device *, struct ufs_hba **);
void ufshcd_dealloc_host(struct ufs_hba *);
+int ufshcd_hba_enable(struct ufs_hba *hba);
int ufshcd_init(struct ufs_hba * , void __iomem * , unsigned int);
+int ufshcd_make_hba_operational(struct ufs_hba *hba);
void ufshcd_remove(struct ufs_hba *);
+int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
u32 val, unsigned long interval_us,
unsigned long timeout_ms, bool can_sleep);
void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk);
+void ufshcd_update_reg_hist(struct ufs_err_reg_hist *reg_hist,
+ u32 reg);
static inline void check_upiu_size(void)
{
@@ -927,6 +926,7 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
enum flag_idn idn, bool *flag_res);
void ufshcd_auto_hibern8_enable(struct ufs_hba *hba);
+void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
#define SD_ASCII_STD true
#define SD_RAW false
@@ -1086,8 +1086,10 @@ static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba)
static inline void ufshcd_vops_device_reset(struct ufs_hba *hba)
{
- if (hba->vops && hba->vops->device_reset)
+ if (hba->vops && hba->vops->device_reset) {
hba->vops->device_reset(hba);
+ ufshcd_update_reg_hist(&hba->ufs_stats.dev_reset, 0);
+ }
}
extern struct ufs_pm_lvl_states ufs_pm_lvl_states[];
diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h
index f539f873f94d..3dc4d8b76509 100644
--- a/drivers/scsi/ufs/unipro.h
+++ b/drivers/scsi/ufs/unipro.h
@@ -161,6 +161,17 @@
/* PHY Adapter Protocol Constants */
#define PA_MAXDATALANES 4
+#define DL_FC0ProtectionTimeOutVal_Default 8191
+#define DL_TC0ReplayTimeOutVal_Default 65535
+#define DL_AFC0ReqTimeOutVal_Default 32767
+#define DL_FC1ProtectionTimeOutVal_Default 8191
+#define DL_TC1ReplayTimeOutVal_Default 65535
+#define DL_AFC1ReqTimeOutVal_Default 32767
+
+#define DME_LocalFC0ProtectionTimeOutVal 0xD041
+#define DME_LocalTC0ReplayTimeOutVal 0xD042
+#define DME_LocalAFC0ReqTimeOutVal 0xD043
+
/* PA power modes */
enum {
FAST_MODE = 1,