aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/drivers/scsi/ufs
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/ufs')
-rw-r--r--drivers/scsi/ufs/ti-j721e-ufs.c13
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.c30
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c10
-rw-r--r--drivers/scsi/ufs/ufs-sysfs.c61
-rw-r--r--drivers/scsi/ufs/ufs.h43
-rw-r--r--drivers/scsi/ufs/ufs_quirks.h7
-rw-r--r--drivers/scsi/ufs/ufshcd.c515
-rw-r--r--drivers/scsi/ufs/ufshcd.h45
8 files changed, 613 insertions, 111 deletions
diff --git a/drivers/scsi/ufs/ti-j721e-ufs.c b/drivers/scsi/ufs/ti-j721e-ufs.c
index 5216d228cdd9..46bb905b4d6a 100644
--- a/drivers/scsi/ufs/ti-j721e-ufs.c
+++ b/drivers/scsi/ufs/ti-j721e-ufs.c
@@ -32,14 +32,14 @@ static int ti_j721e_ufs_probe(struct platform_device *pdev)
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
pm_runtime_put_noidle(dev);
- return ret;
+ goto disable_pm;
}
/* Select MPHY refclk frequency */
clk = devm_clk_get(dev, NULL);
if (IS_ERR(clk)) {
dev_err(dev, "Cannot claim MPHY clock.\n");
- return PTR_ERR(clk);
+ goto clk_err;
}
clk_rate = clk_get_rate(clk);
if (clk_rate == 26000000)
@@ -54,16 +54,23 @@ static int ti_j721e_ufs_probe(struct platform_device *pdev)
dev);
if (ret) {
dev_err(dev, "failed to populate child nodes %d\n", ret);
- pm_runtime_put_sync(dev);
+ goto clk_err;
}
return ret;
+
+clk_err:
+ pm_runtime_put_sync(dev);
+disable_pm:
+ pm_runtime_disable(dev);
+ return ret;
}
static int ti_j721e_ufs_remove(struct platform_device *pdev)
{
of_platform_depopulate(&pdev->dev);
pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
return 0;
}
diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
index 673c16596fb2..d56ce8d97d4e 100644
--- a/drivers/scsi/ufs/ufs-mediatek.c
+++ b/drivers/scsi/ufs/ufs-mediatek.c
@@ -30,6 +30,12 @@
#define ufs_mtk_device_reset_ctrl(high, res) \
ufs_mtk_smc(UFS_MTK_SIP_DEVICE_RESET, high, res)
+static struct ufs_dev_fix ufs_mtk_dev_fixups[] = {
+ UFS_FIX(UFS_VENDOR_SKHYNIX, "H9HQ21AFAMZDAR",
+ UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES),
+ END_FIX
+};
+
static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
{
u32 tmp;
@@ -73,9 +79,9 @@ static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
if (status == PRE_CHANGE) {
if (host->unipro_lpm)
- hba->hba_enable_delay_us = 0;
+ hba->vps->hba_enable_delay_us = 0;
else
- hba->hba_enable_delay_us = 600;
+ hba->vps->hba_enable_delay_us = 600;
}
return 0;
@@ -263,6 +269,10 @@ static int ufs_mtk_init(struct ufs_hba *hba)
/* Enable clock-gating */
hba->caps |= UFSHCD_CAP_CLK_GATING;
+ /* Enable WriteBooster */
+ hba->caps |= UFSHCD_CAP_WB_EN;
+ hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
+
/*
* ufshcd_vops_init() is invoked after
* ufshcd_setup_clock(true) in ufshcd_hba_init() thus
@@ -555,10 +565,8 @@ static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
struct ufs_dev_info *dev_info = &hba->dev_info;
u16 mid = dev_info->wmanufacturerid;
- if (mid == UFS_VENDOR_SAMSUNG) {
- hba->dev_quirks &= ~UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE;
+ if (mid == UFS_VENDOR_SAMSUNG)
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
- }
/*
* Decide waiting time before gating reference clock and
@@ -575,6 +583,17 @@ static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
return 0;
}
+static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
+{
+ struct ufs_dev_info *dev_info = &hba->dev_info;
+ u16 mid = dev_info->wmanufacturerid;
+
+ ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
+
+ if (mid == UFS_VENDOR_SAMSUNG)
+ hba->dev_quirks &= ~UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE;
+}
+
/**
* struct ufs_hba_mtk_vops - UFS MTK specific variant operations
*
@@ -589,6 +608,7 @@ static struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
.link_startup_notify = ufs_mtk_link_startup_notify,
.pwr_change_notify = ufs_mtk_pwr_change_notify,
.apply_dev_quirks = ufs_mtk_apply_dev_quirks,
+ .fixup_dev_quirks = ufs_mtk_fixup_dev_quirks,
.suspend = ufs_mtk_suspend,
.resume = ufs_mtk_resume,
.dbg_register_dump = ufs_mtk_dbg_register_dump,
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 19aa5c44e0da..2e6ddb5cdfc2 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -572,7 +572,6 @@ static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct phy *phy = host->generic_phy;
- int ret = 0;
if (ufs_qcom_is_link_off(hba)) {
/*
@@ -587,7 +586,7 @@ static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
ufs_qcom_disable_lane_clks(host);
}
- return ret;
+ return 0;
}
static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
@@ -1071,6 +1070,7 @@ static void ufs_qcom_set_caps(struct ufs_hba *hba)
hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
hba->caps |= UFSHCD_CAP_CLK_SCALING;
hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
+ hba->caps |= UFSHCD_CAP_WB_EN;
if (host->hw_ver.major >= 0x2) {
host->caps = UFS_QCOM_CAP_QUNIPRO |
@@ -1658,11 +1658,11 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
/* sleep a bit intermittently as we are dumping too much data */
ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper);
- usleep_range(1000, 1100);
+ udelay(1000);
ufs_qcom_testbus_read(hba);
- usleep_range(1000, 1100);
+ udelay(1000);
ufs_qcom_print_unipro_testbus(hba);
- usleep_range(1000, 1100);
+ udelay(1000);
}
/**
diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/scsi/ufs/ufs-sysfs.c
index 92a63eebdca9..2d71d232a69d 100644
--- a/drivers/scsi/ufs/ufs-sysfs.c
+++ b/drivers/scsi/ufs/ufs-sysfs.c
@@ -276,6 +276,10 @@ UFS_DEVICE_DESC_PARAM(device_version, _DEV_VER, 2);
UFS_DEVICE_DESC_PARAM(number_of_secure_wpa, _NUM_SEC_WPA, 1);
UFS_DEVICE_DESC_PARAM(psa_max_data_size, _PSA_MAX_DATA, 4);
UFS_DEVICE_DESC_PARAM(psa_state_timeout, _PSA_TMT, 1);
+UFS_DEVICE_DESC_PARAM(ext_feature_sup, _EXT_UFS_FEATURE_SUP, 4);
+UFS_DEVICE_DESC_PARAM(wb_presv_us_en, _WB_PRESRV_USRSPC_EN, 1);
+UFS_DEVICE_DESC_PARAM(wb_type, _WB_TYPE, 1);
+UFS_DEVICE_DESC_PARAM(wb_shared_alloc_units, _WB_SHARED_ALLOC_UNITS, 4);
static struct attribute *ufs_sysfs_device_descriptor[] = {
&dev_attr_device_type.attr,
@@ -304,6 +308,10 @@ static struct attribute *ufs_sysfs_device_descriptor[] = {
&dev_attr_number_of_secure_wpa.attr,
&dev_attr_psa_max_data_size.attr,
&dev_attr_psa_state_timeout.attr,
+ &dev_attr_ext_feature_sup.attr,
+ &dev_attr_wb_presv_us_en.attr,
+ &dev_attr_wb_type.attr,
+ &dev_attr_wb_shared_alloc_units.attr,
NULL,
};
@@ -373,6 +381,12 @@ UFS_GEOMETRY_DESC_PARAM(enh4_memory_max_alloc_units,
_ENM4_MAX_NUM_UNITS, 4);
UFS_GEOMETRY_DESC_PARAM(enh4_memory_capacity_adjustment_factor,
_ENM4_CAP_ADJ_FCTR, 2);
+UFS_GEOMETRY_DESC_PARAM(wb_max_alloc_units, _WB_MAX_ALLOC_UNITS, 4);
+UFS_GEOMETRY_DESC_PARAM(wb_max_wb_luns, _WB_MAX_WB_LUNS, 1);
+UFS_GEOMETRY_DESC_PARAM(wb_buff_cap_adj, _WB_BUFF_CAP_ADJ, 1);
+UFS_GEOMETRY_DESC_PARAM(wb_sup_red_type, _WB_SUP_RED_TYPE, 1);
+UFS_GEOMETRY_DESC_PARAM(wb_sup_wb_type, _WB_SUP_WB_TYPE, 1);
+
static struct attribute *ufs_sysfs_geometry_descriptor[] = {
&dev_attr_raw_device_capacity.attr,
@@ -404,6 +418,11 @@ static struct attribute *ufs_sysfs_geometry_descriptor[] = {
&dev_attr_enh3_memory_capacity_adjustment_factor.attr,
&dev_attr_enh4_memory_max_alloc_units.attr,
&dev_attr_enh4_memory_capacity_adjustment_factor.attr,
+ &dev_attr_wb_max_alloc_units.attr,
+ &dev_attr_wb_max_wb_luns.attr,
+ &dev_attr_wb_buff_cap_adj.attr,
+ &dev_attr_wb_sup_red_type.attr,
+ &dev_attr_wb_sup_wb_type.attr,
NULL,
};
@@ -603,20 +622,29 @@ static const struct attribute_group ufs_sysfs_string_descriptors_group = {
.attrs = ufs_sysfs_string_descriptors,
};
+static inline bool ufshcd_is_wb_flags(enum flag_idn idn)
+{
+ return ((idn >= QUERY_FLAG_IDN_WB_EN) &&
+ (idn <= QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8));
+}
+
#define UFS_FLAG(_name, _uname) \
static ssize_t _name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
bool flag; \
+ u8 index = 0; \
int ret; \
struct ufs_hba *hba = dev_get_drvdata(dev); \
+ if (ufshcd_is_wb_flags(QUERY_FLAG_IDN##_uname)) \
+ index = ufshcd_wb_get_query_index(hba); \
pm_runtime_get_sync(hba->dev); \
ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG, \
- QUERY_FLAG_IDN##_uname, &flag); \
+ QUERY_FLAG_IDN##_uname, index, &flag); \
pm_runtime_put_sync(hba->dev); \
if (ret) \
return -EINVAL; \
- return sprintf(buf, "%s\n", flag ? "true" : "false"); \
+ return sprintf(buf, "%s\n", flag ? "true" : "false"); \
} \
static DEVICE_ATTR_RO(_name)
@@ -628,6 +656,9 @@ UFS_FLAG(life_span_mode_enable, _LIFE_SPAN_MODE_ENABLE);
UFS_FLAG(phy_resource_removal, _FPHYRESOURCEREMOVAL);
UFS_FLAG(busy_rtc, _BUSY_RTC);
UFS_FLAG(disable_fw_update, _PERMANENTLY_DISABLE_FW_UPDATE);
+UFS_FLAG(wb_enable, _WB_EN);
+UFS_FLAG(wb_flush_en, _WB_BUFF_FLUSH_EN);
+UFS_FLAG(wb_flush_during_h8, _WB_BUFF_FLUSH_DURING_HIBERN8);
static struct attribute *ufs_sysfs_device_flags[] = {
&dev_attr_device_init.attr,
@@ -638,6 +669,9 @@ static struct attribute *ufs_sysfs_device_flags[] = {
&dev_attr_phy_resource_removal.attr,
&dev_attr_busy_rtc.attr,
&dev_attr_disable_fw_update.attr,
+ &dev_attr_wb_enable.attr,
+ &dev_attr_wb_flush_en.attr,
+ &dev_attr_wb_flush_during_h8.attr,
NULL,
};
@@ -646,6 +680,12 @@ static const struct attribute_group ufs_sysfs_flags_group = {
.attrs = ufs_sysfs_device_flags,
};
+static inline bool ufshcd_is_wb_attrs(enum attr_idn idn)
+{
+ return ((idn >= QUERY_ATTR_IDN_WB_FLUSH_STATUS) &&
+ (idn <= QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE));
+}
+
#define UFS_ATTRIBUTE(_name, _uname) \
static ssize_t _name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
@@ -653,9 +693,12 @@ static ssize_t _name##_show(struct device *dev, \
struct ufs_hba *hba = dev_get_drvdata(dev); \
u32 value; \
int ret; \
+ u8 index = 0; \
+ if (ufshcd_is_wb_attrs(QUERY_ATTR_IDN##_uname)) \
+ index = ufshcd_wb_get_query_index(hba); \
pm_runtime_get_sync(hba->dev); \
ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, \
- QUERY_ATTR_IDN##_uname, 0, 0, &value); \
+ QUERY_ATTR_IDN##_uname, index, 0, &value); \
pm_runtime_put_sync(hba->dev); \
if (ret) \
return -EINVAL; \
@@ -679,6 +722,11 @@ UFS_ATTRIBUTE(exception_event_status, _EE_STATUS);
UFS_ATTRIBUTE(ffu_status, _FFU_STATUS);
UFS_ATTRIBUTE(psa_state, _PSA_STATE);
UFS_ATTRIBUTE(psa_data_size, _PSA_DATA_SIZE);
+UFS_ATTRIBUTE(wb_flush_status, _WB_FLUSH_STATUS);
+UFS_ATTRIBUTE(wb_avail_buf, _AVAIL_WB_BUFF_SIZE);
+UFS_ATTRIBUTE(wb_life_time_est, _WB_BUFF_LIFE_TIME_EST);
+UFS_ATTRIBUTE(wb_cur_buf, _CURR_WB_BUFF_SIZE);
+
static struct attribute *ufs_sysfs_attributes[] = {
&dev_attr_boot_lun_enabled.attr,
@@ -697,6 +745,10 @@ static struct attribute *ufs_sysfs_attributes[] = {
&dev_attr_ffu_status.attr,
&dev_attr_psa_state.attr,
&dev_attr_psa_data_size.attr,
+ &dev_attr_wb_flush_status.attr,
+ &dev_attr_wb_avail_buf.attr,
+ &dev_attr_wb_life_time_est.attr,
+ &dev_attr_wb_cur_buf.attr,
NULL,
};
@@ -748,6 +800,8 @@ UFS_UNIT_DESC_PARAM(provisioning_type, _PROVISIONING_TYPE, 1);
UFS_UNIT_DESC_PARAM(physical_memory_resourse_count, _PHY_MEM_RSRC_CNT, 8);
UFS_UNIT_DESC_PARAM(context_capabilities, _CTX_CAPABILITIES, 2);
UFS_UNIT_DESC_PARAM(large_unit_granularity, _LARGE_UNIT_SIZE_M1, 1);
+UFS_UNIT_DESC_PARAM(wb_buf_alloc_units, _WB_BUF_ALLOC_UNITS, 4);
+
static struct attribute *ufs_sysfs_unit_descriptor[] = {
&dev_attr_boot_lun_id.attr,
@@ -763,6 +817,7 @@ static struct attribute *ufs_sysfs_unit_descriptor[] = {
&dev_attr_physical_memory_resourse_count.attr,
&dev_attr_context_capabilities.attr,
&dev_attr_large_unit_granularity.attr,
+ &dev_attr_wb_buf_alloc_units.attr,
NULL,
};
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 990cb48e2403..c70845d41449 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -64,6 +64,9 @@
#define UFS_MAX_LUNS (SCSI_W_LUN_BASE + UFS_UPIU_MAX_UNIT_NUM_ID)
#define UFS_UPIU_WLUN_ID (1 << 7)
+/* WriteBooster buffer is available only for the logical unit from 0 to 7 */
+#define UFS_UPIU_MAX_WB_LUN_ID 8
+
/* Well known logical unit id in LUN field of UPIU */
enum {
UFS_UPIU_REPORT_LUNS_WLUN = 0x81,
@@ -140,6 +143,9 @@ enum flag_idn {
QUERY_FLAG_IDN_BUSY_RTC = 0x09,
QUERY_FLAG_IDN_RESERVED3 = 0x0A,
QUERY_FLAG_IDN_PERMANENTLY_DISABLE_FW_UPDATE = 0x0B,
+ QUERY_FLAG_IDN_WB_EN = 0x0E,
+ QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN = 0x0F,
+ QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8 = 0x10,
};
/* Attribute idn for Query requests */
@@ -168,6 +174,10 @@ enum attr_idn {
QUERY_ATTR_IDN_PSA_STATE = 0x15,
QUERY_ATTR_IDN_PSA_DATA_SIZE = 0x16,
QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME = 0x17,
+ QUERY_ATTR_IDN_WB_FLUSH_STATUS = 0x1C,
+ QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE = 0x1D,
+ QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST = 0x1E,
+ QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE = 0x1F,
};
/* Descriptor idn for Query requests */
@@ -191,9 +201,9 @@ enum desc_header_offset {
};
enum ufs_desc_def_size {
- QUERY_DESC_DEVICE_DEF_SIZE = 0x40,
+ QUERY_DESC_DEVICE_DEF_SIZE = 0x59,
QUERY_DESC_CONFIGURATION_DEF_SIZE = 0x90,
- QUERY_DESC_UNIT_DEF_SIZE = 0x23,
+ QUERY_DESC_UNIT_DEF_SIZE = 0x2D,
QUERY_DESC_INTERCONNECT_DEF_SIZE = 0x06,
QUERY_DESC_GEOMETRY_DEF_SIZE = 0x48,
QUERY_DESC_POWER_DEF_SIZE = 0x62,
@@ -219,6 +229,7 @@ enum unit_desc_param {
UNIT_DESC_PARAM_PHY_MEM_RSRC_CNT = 0x18,
UNIT_DESC_PARAM_CTX_CAPABILITIES = 0x20,
UNIT_DESC_PARAM_LARGE_UNIT_SIZE_M1 = 0x22,
+ UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS = 0x29,
};
/* Device descriptor parameters offsets in bytes*/
@@ -258,6 +269,10 @@ enum device_desc_param {
DEVICE_DESC_PARAM_PSA_MAX_DATA = 0x25,
DEVICE_DESC_PARAM_PSA_TMT = 0x29,
DEVICE_DESC_PARAM_PRDCT_REV = 0x2A,
+ DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP = 0x4F,
+ DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN = 0x53,
+ DEVICE_DESC_PARAM_WB_TYPE = 0x54,
+ DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS = 0x55,
};
/* Interconnect descriptor parameters offsets in bytes*/
@@ -302,6 +317,11 @@ enum geometry_desc_param {
GEOMETRY_DESC_PARAM_ENM4_MAX_NUM_UNITS = 0x3E,
GEOMETRY_DESC_PARAM_ENM4_CAP_ADJ_FCTR = 0x42,
GEOMETRY_DESC_PARAM_OPT_LOG_BLK_SIZE = 0x44,
+ GEOMETRY_DESC_PARAM_WB_MAX_ALLOC_UNITS = 0x4F,
+ GEOMETRY_DESC_PARAM_WB_MAX_WB_LUNS = 0x53,
+ GEOMETRY_DESC_PARAM_WB_BUFF_CAP_ADJ = 0x54,
+ GEOMETRY_DESC_PARAM_WB_SUP_RED_TYPE = 0x55,
+ GEOMETRY_DESC_PARAM_WB_SUP_WB_TYPE = 0x56,
};
/* Health descriptor parameters offsets in bytes*/
@@ -313,6 +333,12 @@ enum health_desc_param {
HEALTH_DESC_PARAM_LIFE_TIME_EST_B = 0x4,
};
+/* WriteBooster buffer mode */
+enum {
+ WB_BUF_MODE_LU_DEDICATED = 0x0,
+ WB_BUF_MODE_SHARED = 0x1,
+};
+
/*
* Logical Unit Write Protect
* 00h: LU not write protected
@@ -333,6 +359,11 @@ enum {
UFSHCD_AMP = 3,
};
+/* Possible values for dExtendedUFSFeaturesSupport */
+enum {
+ UFS_DEV_WRITE_BOOSTER_SUP = BIT(8),
+};
+
#define POWER_DESC_MAX_SIZE 0x62
#define POWER_DESC_MAX_ACTV_ICC_LVLS 16
@@ -447,6 +478,8 @@ enum ufs_dev_pwr_mode {
UFS_POWERDOWN_PWR_MODE = 3,
};
+#define UFS_WB_BUF_REMAIN_PERCENT(val) ((val) / 10)
+
/**
* struct utp_cmd_rsp - Response UPIU structure
* @residual_transfer_count: Residual transfer count DW-3
@@ -532,11 +565,17 @@ struct ufs_dev_info {
bool is_lu_power_on_wp;
/* Maximum number of general LU supported by the UFS device */
u8 max_lu_supported;
+ u8 wb_dedicated_lu;
u16 wmanufacturerid;
/*UFS device Product Name */
u8 *model;
u16 wspecversion;
u32 clk_gating_wait_us;
+ u32 d_ext_ufs_feature_sup;
+ u8 b_wb_buffer_type;
+ u32 d_wb_alloc_units;
+ bool b_rpm_dev_flush_capable;
+ u8 b_presrv_uspc_en;
};
/**
diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
index df7a1e6805a3..e3175a63c676 100644
--- a/drivers/scsi/ufs/ufs_quirks.h
+++ b/drivers/scsi/ufs/ufs_quirks.h
@@ -101,4 +101,11 @@ struct ufs_dev_fix {
*/
#define UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME (1 << 9)
+/*
+ * Some pre-3.1 UFS devices can support extended features by upgrading
+ * the firmware. Enable this quirk to make UFS core driver probe and enable
+ * supported features on such devices.
+ */
+#define UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES (1 << 10)
+
#endif /* UFS_QUIRKS_H_ */
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 698e8d20b4ba..5db18f444ea9 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -48,6 +48,8 @@
#include "unipro.h"
#include "ufs-sysfs.h"
#include "ufs_bsg.h"
+#include <asm/unaligned.h>
+#include <linux/blkdev.h>
#define CREATE_TRACE_POINTS
#include <trace/events/ufs.h>
@@ -92,6 +94,9 @@
/* default delay of autosuspend: 2000 ms */
#define RPM_AUTOSUSPEND_DELAY_MS 2000
+/* Default delay of RPM device flush delayed work */
+#define RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS 5000
+
/* Default value of wait time before gating device ref clock */
#define UFSHCD_REF_CLK_GATING_WAIT_US 0xFF /* microsecs */
@@ -251,6 +256,12 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
static irqreturn_t ufshcd_intr(int irq, void *__hba);
static int ufshcd_change_power_mode(struct ufs_hba *hba,
struct ufs_pa_layer_attr *pwr_mode);
+static int ufshcd_wb_buf_flush_enable(struct ufs_hba *hba);
+static int ufshcd_wb_buf_flush_disable(struct ufs_hba *hba);
+static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable);
+static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set);
+static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable);
+
static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
{
return tag >= 0 && tag < hba->nutrs;
@@ -272,6 +283,25 @@ static inline void ufshcd_disable_irq(struct ufs_hba *hba)
}
}
+static inline void ufshcd_wb_config(struct ufs_hba *hba)
+{
+ int ret;
+
+ if (!ufshcd_is_wb_allowed(hba))
+ return;
+
+ ret = ufshcd_wb_ctrl(hba, true);
+ if (ret)
+ dev_err(hba->dev, "%s: Enable WB failed: %d\n", __func__, ret);
+ else
+ dev_info(hba->dev, "%s: Write Booster Configured\n", __func__);
+ ret = ufshcd_wb_toggle_flush_during_h8(hba, true);
+ if (ret)
+ dev_err(hba->dev, "%s: En WB flush during H8: failed: %d\n",
+ __func__, ret);
+ ufshcd_wb_toggle_flush(hba, true);
+}
+
static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
{
if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt))
@@ -535,21 +565,21 @@ void ufshcd_delay_us(unsigned long us, unsigned long tolerance)
}
EXPORT_SYMBOL_GPL(ufshcd_delay_us);
-/*
+/**
* ufshcd_wait_for_register - wait for register value to change
- * @hba - per-adapter interface
- * @reg - mmio register offset
- * @mask - mask to apply to read register value
- * @val - wait condition
- * @interval_us - polling interval in microsecs
- * @timeout_ms - timeout in millisecs
- * @can_sleep - perform sleep or just spin
+ * @hba: per-adapter interface
+ * @reg: mmio register offset
+ * @mask: mask to apply to the read register value
+ * @val: value to wait for
+ * @interval_us: polling interval in microseconds
+ * @timeout_ms: timeout in milliseconds
*
- * Returns -ETIMEDOUT on error, zero on success
+ * Return:
+ * -ETIMEDOUT on error, zero on success.
*/
int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
u32 val, unsigned long interval_us,
- unsigned long timeout_ms, bool can_sleep)
+ unsigned long timeout_ms)
{
int err = 0;
unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
@@ -558,10 +588,7 @@ int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
val = val & mask;
while ((ufshcd_readl(hba, reg) & mask) != val) {
- if (can_sleep)
- usleep_range(interval_us, interval_us + 50);
- else
- udelay(interval_us);
+ usleep_range(interval_us, interval_us + 50);
if (time_after(jiffies, timeout)) {
if ((ufshcd_readl(hba, reg) & mask) != val)
err = -ETIMEDOUT;
@@ -1150,10 +1177,17 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
/* scale up the gear after scaling up clocks */
if (scale_up) {
ret = ufshcd_scale_gear(hba, true);
- if (ret)
+ if (ret) {
ufshcd_scale_clks(hba, false);
+ goto out_unprepare;
+ }
}
+ /* Enable Write Booster if we have scaled up else disable it */
+ up_write(&hba->clk_scaling_lock);
+ ufshcd_wb_ctrl(hba, scale_up);
+ down_write(&hba->clk_scaling_lock);
+
out_unprepare:
ufshcd_clock_scaling_unprepare(hba);
out:
@@ -1319,23 +1353,6 @@ start_window:
return 0;
}
-static struct devfreq_dev_profile ufs_devfreq_profile = {
- .polling_ms = 100,
- .target = ufshcd_devfreq_target,
- .get_dev_status = ufshcd_devfreq_get_dev_status,
-};
-
-#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
-static struct devfreq_simple_ondemand_data ufs_ondemand_data = {
- .upthreshold = 70,
- .downdifferential = 5,
-};
-
-static void *gov_data = &ufs_ondemand_data;
-#else
-static void *gov_data; /* NULL */
-#endif
-
static int ufshcd_devfreq_init(struct ufs_hba *hba)
{
struct list_head *clk_list = &hba->clk_list_head;
@@ -1351,12 +1368,12 @@ static int ufshcd_devfreq_init(struct ufs_hba *hba)
dev_pm_opp_add(hba->dev, clki->min_freq, 0);
dev_pm_opp_add(hba->dev, clki->max_freq, 0);
- ufshcd_vops_config_scaling_param(hba, &ufs_devfreq_profile,
- gov_data);
+ ufshcd_vops_config_scaling_param(hba, &hba->vps->devfreq_profile,
+ &hba->vps->ondemand_data);
devfreq = devfreq_add_device(hba->dev,
- &ufs_devfreq_profile,
+ &hba->vps->devfreq_profile,
DEVFREQ_GOV_SIMPLE_ONDEMAND,
- gov_data);
+ &hba->vps->ondemand_data);
if (IS_ERR(devfreq)) {
ret = PTR_ERR(devfreq);
dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
@@ -2560,7 +2577,7 @@ ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
*/
err = ufshcd_wait_for_register(hba,
REG_UTP_TRANSFER_REQ_DOOR_BELL,
- mask, ~mask, 1000, 1000, true);
+ mask, ~mask, 1000, 1000);
return err;
}
@@ -2747,13 +2764,13 @@ static inline void ufshcd_init_query(struct ufs_hba *hba,
}
static int ufshcd_query_flag_retry(struct ufs_hba *hba,
- enum query_opcode opcode, enum flag_idn idn, bool *flag_res)
+ enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res)
{
int ret;
int retries;
for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
- ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
+ ret = ufshcd_query_flag(hba, opcode, idn, index, flag_res);
if (ret)
dev_dbg(hba->dev,
"%s: failed with error %d, retries %d\n",
@@ -2774,16 +2791,17 @@ static int ufshcd_query_flag_retry(struct ufs_hba *hba,
* @hba: per-adapter instance
* @opcode: flag query to perform
* @idn: flag idn to access
+ * @index: flag index to access
* @flag_res: the flag value after the query request completes
*
* Returns 0 for success, non-zero in case of failure
*/
int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
- enum flag_idn idn, bool *flag_res)
+ enum flag_idn idn, u8 index, bool *flag_res)
{
struct ufs_query_req *request = NULL;
struct ufs_query_res *response = NULL;
- int err, index = 0, selector = 0;
+ int err, selector = 0;
int timeout = QUERY_REQ_TIMEOUT;
BUG_ON(!hba);
@@ -3223,7 +3241,7 @@ static inline int ufshcd_read_desc(struct ufs_hba *hba,
struct uc_string_id {
u8 len;
u8 type;
- wchar_t uc[0];
+ wchar_t uc[];
} __packed;
/* replace non-printable or non-ASCII characters with spaces */
@@ -4137,10 +4155,10 @@ static int ufshcd_complete_dev_init(struct ufs_hba *hba)
{
int i;
int err;
- bool flag_res = 1;
+ bool flag_res = true;
err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
- QUERY_FLAG_IDN_FDEVICEINIT, NULL);
+ QUERY_FLAG_IDN_FDEVICEINIT, 0, NULL);
if (err) {
dev_err(hba->dev,
"%s setting fDeviceInit flag failed with error %d\n",
@@ -4151,7 +4169,7 @@ static int ufshcd_complete_dev_init(struct ufs_hba *hba)
/* poll for max. 1000 iterations for fDeviceInit flag to clear */
for (i = 0; i < 1000 && !err && flag_res; i++)
err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
- QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
+ QUERY_FLAG_IDN_FDEVICEINIT, 0, &flag_res);
if (err)
dev_err(hba->dev,
@@ -4229,16 +4247,23 @@ EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational);
/**
* ufshcd_hba_stop - Send controller to reset state
* @hba: per adapter instance
- * @can_sleep: perform sleep or just spin
*/
-static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
+static inline void ufshcd_hba_stop(struct ufs_hba *hba)
{
+ unsigned long flags;
int err;
+ /*
+ * Obtain the host lock to prevent that the controller is disabled
+ * while the UFS interrupt handler is active on another CPU.
+ */
+ spin_lock_irqsave(hba->host->host_lock, flags);
ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
CONTROLLER_ENABLE, CONTROLLER_DISABLE,
- 10, 1, can_sleep);
+ 10, 1);
if (err)
dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
}
@@ -4259,7 +4284,7 @@ int ufshcd_hba_enable(struct ufs_hba *hba)
if (!ufshcd_is_hba_active(hba))
/* change controller state to "reset state" */
- ufshcd_hba_stop(hba, true);
+ ufshcd_hba_stop(hba);
/* UniPro link is disabled at this point */
ufshcd_set_link_off(hba);
@@ -4279,7 +4304,7 @@ int ufshcd_hba_enable(struct ufs_hba *hba)
* instruction might be read back.
* This delay can be changed based on the controller.
*/
- ufshcd_delay_us(hba->hba_enable_delay_us, 100);
+ ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100);
/* wait for the host controller to complete initialization */
retry = 50;
@@ -4966,7 +4991,7 @@ static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
goto out;
err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
- QUERY_FLAG_IDN_BKOPS_EN, NULL);
+ QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
if (err) {
dev_err(hba->dev, "%s: failed to enable bkops %d\n",
__func__, err);
@@ -5016,7 +5041,7 @@ static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
}
err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
- QUERY_FLAG_IDN_BKOPS_EN, NULL);
+ QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
if (err) {
dev_err(hba->dev, "%s: failed to disable bkops %d\n",
__func__, err);
@@ -5161,6 +5186,190 @@ out:
__func__, err);
}
+static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable)
+{
+ int ret;
+ u8 index;
+ enum query_opcode opcode;
+
+ if (!ufshcd_is_wb_allowed(hba))
+ return 0;
+
+ if (!(enable ^ hba->wb_enabled))
+ return 0;
+ if (enable)
+ opcode = UPIU_QUERY_OPCODE_SET_FLAG;
+ else
+ opcode = UPIU_QUERY_OPCODE_CLEAR_FLAG;
+
+ index = ufshcd_wb_get_query_index(hba);
+ ret = ufshcd_query_flag_retry(hba, opcode,
+ QUERY_FLAG_IDN_WB_EN, index, NULL);
+ if (ret) {
+ dev_err(hba->dev, "%s write booster %s failed %d\n",
+ __func__, enable ? "enable" : "disable", ret);
+ return ret;
+ }
+
+ hba->wb_enabled = enable;
+ dev_dbg(hba->dev, "%s write booster %s %d\n",
+ __func__, enable ? "enable" : "disable", ret);
+
+ return ret;
+}
+
+static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set)
+{
+ int val;
+ u8 index;
+
+ if (set)
+ val = UPIU_QUERY_OPCODE_SET_FLAG;
+ else
+ val = UPIU_QUERY_OPCODE_CLEAR_FLAG;
+
+ index = ufshcd_wb_get_query_index(hba);
+ return ufshcd_query_flag_retry(hba, val,
+ QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8,
+ index, NULL);
+}
+
+static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable)
+{
+ if (enable)
+ ufshcd_wb_buf_flush_enable(hba);
+ else
+ ufshcd_wb_buf_flush_disable(hba);
+
+}
+
+static int ufshcd_wb_buf_flush_enable(struct ufs_hba *hba)
+{
+ int ret;
+ u8 index;
+
+ if (!ufshcd_is_wb_allowed(hba) || hba->wb_buf_flush_enabled)
+ return 0;
+
+ index = ufshcd_wb_get_query_index(hba);
+ ret = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
+ QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN,
+ index, NULL);
+ if (ret)
+ dev_err(hba->dev, "%s WB - buf flush enable failed %d\n",
+ __func__, ret);
+ else
+ hba->wb_buf_flush_enabled = true;
+
+ dev_dbg(hba->dev, "WB - Flush enabled: %d\n", ret);
+ return ret;
+}
+
+static int ufshcd_wb_buf_flush_disable(struct ufs_hba *hba)
+{
+ int ret;
+ u8 index;
+
+ if (!ufshcd_is_wb_allowed(hba) || !hba->wb_buf_flush_enabled)
+ return 0;
+
+ index = ufshcd_wb_get_query_index(hba);
+ ret = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
+ QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN,
+ index, NULL);
+ if (ret) {
+ dev_warn(hba->dev, "%s: WB - buf flush disable failed %d\n",
+ __func__, ret);
+ } else {
+ hba->wb_buf_flush_enabled = false;
+ dev_dbg(hba->dev, "WB - Flush disabled: %d\n", ret);
+ }
+
+ return ret;
+}
+
+static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
+ u32 avail_buf)
+{
+ u32 cur_buf;
+ int ret;
+ u8 index;
+
+ index = ufshcd_wb_get_query_index(hba);
+ ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE,
+ index, 0, &cur_buf);
+ if (ret) {
+ dev_err(hba->dev, "%s dCurWriteBoosterBufferSize read failed %d\n",
+ __func__, ret);
+ return false;
+ }
+
+ if (!cur_buf) {
+ dev_info(hba->dev, "dCurWBBuf: %d WB disabled until free-space is available\n",
+ cur_buf);
+ return false;
+ }
+ /* Let it continue to flush when available buffer exceeds threshold */
+ if (avail_buf < hba->vps->wb_flush_threshold)
+ return true;
+
+ return false;
+}
+
+static bool ufshcd_wb_need_flush(struct ufs_hba *hba)
+{
+ int ret;
+ u32 avail_buf;
+ u8 index;
+
+ if (!ufshcd_is_wb_allowed(hba))
+ return false;
+ /*
+ * The ufs device needs the vcc to be ON to flush.
+ * With user-space reduction enabled, it's enough to enable flush
+ * by checking only the available buffer. The threshold
+ * defined here is > 90% full.
+ * With user-space preserved enabled, the current-buffer
+ * should be checked too because the wb buffer size can reduce
+ * when disk tends to be full. This info is provided by current
+ * buffer (dCurrentWriteBoosterBufferSize). There's no point in
+ * keeping vcc on when current buffer is empty.
+ */
+ index = ufshcd_wb_get_query_index(hba);
+ ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE,
+ index, 0, &avail_buf);
+ if (ret) {
+ dev_warn(hba->dev, "%s dAvailableWriteBoosterBufferSize read failed %d\n",
+ __func__, ret);
+ return false;
+ }
+
+ if (!hba->dev_info.b_presrv_uspc_en) {
+ if (avail_buf <= UFS_WB_BUF_REMAIN_PERCENT(10))
+ return true;
+ return false;
+ }
+
+ return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf);
+}
+
+static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct *work)
+{
+ struct ufs_hba *hba = container_of(to_delayed_work(work),
+ struct ufs_hba,
+ rpm_dev_flush_recheck_work);
+ /*
+ * To prevent unnecessary VCC power drain after device finishes
+ * WriteBooster buffer flush or Auto BKOPs, force runtime resume
+ * after a certain delay to recheck the threshold by next runtime
+ * suspend.
+ */
+ pm_runtime_get_sync(hba->dev);
+ pm_runtime_put_sync(hba->dev);
+}
+
/**
* ufshcd_exception_event_handler - handle exceptions raised by device
* @work: pointer to work data
@@ -5723,7 +5932,7 @@ static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
/* poll for max. 1 sec to clear door bell register by h/w */
err = ufshcd_wait_for_register(hba,
REG_UTP_TASK_REQ_DOOR_BELL,
- mask, 0, 1000, 1000, true);
+ mask, 0, 1000, 1000);
out:
return err;
}
@@ -6299,8 +6508,9 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
* Stop the host controller and complete the requests
* cleared by h/w
*/
+ ufshcd_hba_stop(hba);
+
spin_lock_irqsave(hba->host->host_lock, flags);
- ufshcd_hba_stop(hba, false);
hba->silence_err_logs = true;
ufshcd_complete_requests(hba);
hba->silence_err_logs = false;
@@ -6603,6 +6813,93 @@ out:
return ret;
}
+static void ufshcd_wb_probe(struct ufs_hba *hba, u8 *desc_buf)
+{
+ u8 lun;
+ u32 d_lu_wb_buf_alloc;
+
+ if (!ufshcd_is_wb_allowed(hba))
+ return;
+
+ if (hba->desc_size.dev_desc < DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP + 4)
+ goto wb_disabled;
+
+ hba->dev_info.d_ext_ufs_feature_sup =
+ get_unaligned_be32(desc_buf +
+ DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
+
+ if (!(hba->dev_info.d_ext_ufs_feature_sup & UFS_DEV_WRITE_BOOSTER_SUP))
+ goto wb_disabled;
+
+ /*
+ * WB may be supported but not configured while provisioning.
+ * The spec says, in dedicated wb buffer mode,
+ * a max of 1 lun would have wb buffer configured.
+ * Now only shared buffer mode is supported.
+ */
+ hba->dev_info.b_wb_buffer_type =
+ desc_buf[DEVICE_DESC_PARAM_WB_TYPE];
+
+ hba->dev_info.b_presrv_uspc_en =
+ desc_buf[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN];
+
+ if (hba->dev_info.b_wb_buffer_type == WB_BUF_MODE_SHARED) {
+ hba->dev_info.d_wb_alloc_units =
+ get_unaligned_be32(desc_buf +
+ DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS);
+ if (!hba->dev_info.d_wb_alloc_units)
+ goto wb_disabled;
+ } else {
+ for (lun = 0; lun < UFS_UPIU_MAX_WB_LUN_ID; lun++) {
+ d_lu_wb_buf_alloc = 0;
+ ufshcd_read_unit_desc_param(hba,
+ lun,
+ UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS,
+ (u8 *)&d_lu_wb_buf_alloc,
+ sizeof(d_lu_wb_buf_alloc));
+ if (d_lu_wb_buf_alloc) {
+ hba->dev_info.wb_dedicated_lu = lun;
+ break;
+ }
+ }
+
+ if (!d_lu_wb_buf_alloc)
+ goto wb_disabled;
+ }
+ return;
+
+wb_disabled:
+ hba->caps &= ~UFSHCD_CAP_WB_EN;
+}
+
+void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, struct ufs_dev_fix *fixups)
+{
+ struct ufs_dev_fix *f;
+ struct ufs_dev_info *dev_info = &hba->dev_info;
+
+ if (!fixups)
+ return;
+
+ for (f = fixups; f->quirk; f++) {
+ if ((f->wmanufacturerid == dev_info->wmanufacturerid ||
+ f->wmanufacturerid == UFS_ANY_VENDOR) &&
+ ((dev_info->model &&
+ STR_PRFX_EQUAL(f->model, dev_info->model)) ||
+ !strcmp(f->model, UFS_ANY_MODEL)))
+ hba->dev_quirks |= f->quirk;
+ }
+}
+EXPORT_SYMBOL_GPL(ufshcd_fixup_dev_quirks);
+
+static void ufs_fixup_device_setup(struct ufs_hba *hba)
+{
+ /* fix by general quirk table */
+ ufshcd_fixup_dev_quirks(hba, ufs_fixups);
+
+ /* allow vendors to fix quirks */
+ ufshcd_vops_fixup_dev_quirks(hba);
+}
+
static int ufs_get_device_desc(struct ufs_hba *hba)
{
int err;
@@ -6639,6 +6936,7 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
+
err = ufshcd_read_string_desc(hba, model_index,
&dev_info->model, SD_ASCII_STD);
if (err < 0) {
@@ -6647,6 +6945,17 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
goto out;
}
+ ufs_fixup_device_setup(hba);
+
+ /*
+ * Probe WB only for UFS-3.1 devices or UFS devices with quirk
+ * UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES enabled
+ */
+ if (dev_info->wspecversion >= 0x310 ||
+ dev_info->wspecversion == 0x220 ||
+ (hba->dev_quirks & UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES))
+ ufshcd_wb_probe(hba, desc_buf);
+
/*
* ufshcd_read_string_desc returns size of the string
* reset the error value
@@ -6666,21 +6975,6 @@ static void ufs_put_device_desc(struct ufs_hba *hba)
dev_info->model = NULL;
}
-static void ufs_fixup_device_setup(struct ufs_hba *hba)
-{
- struct ufs_dev_fix *f;
- struct ufs_dev_info *dev_info = &hba->dev_info;
-
- for (f = ufs_fixups; f->quirk; f++) {
- if ((f->wmanufacturerid == dev_info->wmanufacturerid ||
- f->wmanufacturerid == UFS_ANY_VENDOR) &&
- ((dev_info->model &&
- STR_PRFX_EQUAL(f->model, dev_info->model)) ||
- !strcmp(f->model, UFS_ANY_MODEL)))
- hba->dev_quirks |= f->quirk;
- }
-}
-
/**
* ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
* @hba: per-adapter instance
@@ -6996,9 +7290,6 @@ static int ufshcd_device_params_init(struct ufs_hba *hba)
bool flag;
int ret;
- /* Clear any previous UFS device information */
- memset(&hba->dev_info, 0, sizeof(hba->dev_info));
-
/* Init check for device descriptor sizes */
ufshcd_init_desc_sizes(hba);
@@ -7017,10 +7308,8 @@ static int ufshcd_device_params_init(struct ufs_hba *hba)
ufshcd_get_ref_clk_gating_wait(hba);
- ufs_fixup_device_setup(hba);
-
if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
- QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
+ QUERY_FLAG_IDN_PWR_ON_WPE, 0, &flag))
hba->dev_info.f_power_on_wp_en = flag;
/* Probe maximum power mode co-supported by both UFS host and device */
@@ -7149,6 +7438,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
/* set the state as operational after switching to desired gear */
hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+ ufshcd_wb_config(hba);
/* Enable Auto-Hibernate if configured */
ufshcd_auto_hibern8_enable(hba);
@@ -7195,6 +7485,16 @@ static const struct attribute_group *ufshcd_driver_groups[] = {
NULL,
};
+static struct ufs_hba_variant_params ufs_hba_vps = {
+ .hba_enable_delay_us = 1000,
+ .wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(40),
+ .devfreq_profile.polling_ms = 100,
+ .devfreq_profile.target = ufshcd_devfreq_target,
+ .devfreq_profile.get_dev_status = ufshcd_devfreq_get_dev_status,
+ .ondemand_data.upthreshold = 70,
+ .ondemand_data.downdifferential = 5,
+};
+
static struct scsi_host_template ufshcd_driver_template = {
.module = THIS_MODULE,
.name = UFSHCD,
@@ -7774,7 +8074,7 @@ static int ufshcd_link_state_transition(struct ufs_hba *hba,
* Change controller state to "reset state" which
* should also put the link in off/reset state
*/
- ufshcd_hba_stop(hba, true);
+ ufshcd_hba_stop(hba);
/*
* TODO: Check if we need any delay to make sure that
* controller is reset
@@ -7809,6 +8109,9 @@ static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
*
* Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
* in low power state which would save some power.
+ *
+ * If Write Booster is enabled and the device needs to flush the WB
+ * buffer OR if bkops status is urgent for WB, keep Vcc on.
*/
if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
!hba->dev_info.is_lu_power_on_wp) {
@@ -7938,16 +8241,31 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
/* make sure that auto bkops is disabled */
ufshcd_disable_auto_bkops(hba);
}
- }
+ /*
+ * If device needs to do BKOP or WB buffer flush during
+ * Hibern8, keep device power mode as "active power mode"
+ * and VCC supply.
+ */
+ hba->dev_info.b_rpm_dev_flush_capable =
+ hba->auto_bkops_enabled ||
+ (((req_link_state == UIC_LINK_HIBERN8_STATE) ||
+ ((req_link_state == UIC_LINK_ACTIVE_STATE) &&
+ ufshcd_is_auto_hibern8_enabled(hba))) &&
+ ufshcd_wb_need_flush(hba));
+ }
+
+ if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) {
+ if ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
+ !ufshcd_is_runtime_pm(pm_op)) {
+ /* ensure that bkops is disabled */
+ ufshcd_disable_auto_bkops(hba);
+ }
- if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
- ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
- !ufshcd_is_runtime_pm(pm_op))) {
- /* ensure that bkops is disabled */
- ufshcd_disable_auto_bkops(hba);
- ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
- if (ret)
- goto enable_gating;
+ if (!hba->dev_info.b_rpm_dev_flush_capable) {
+ ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
+ if (ret)
+ goto enable_gating;
+ }
}
flush_work(&hba->eeh_work);
@@ -8000,9 +8318,16 @@ enable_gating:
if (hba->clk_scaling.is_allowed)
ufshcd_resume_clkscaling(hba);
hba->clk_gating.is_suspended = false;
+ hba->dev_info.b_rpm_dev_flush_capable = false;
ufshcd_release(hba);
out:
+ if (hba->dev_info.b_rpm_dev_flush_capable) {
+ schedule_delayed_work(&hba->rpm_dev_flush_recheck_work,
+ msecs_to_jiffies(RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS));
+ }
+
hba->pm_op_in_progress = 0;
+
if (ret)
ufshcd_update_reg_hist(&hba->ufs_stats.suspend_err, (u32)ret);
return ret;
@@ -8055,9 +8380,13 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
else
goto vendor_suspend;
} else if (ufshcd_is_link_off(hba)) {
- ret = ufshcd_host_reset_and_restore(hba);
/*
- * ufshcd_host_reset_and_restore() should have already
+ * A full initialization of the host and the device is
+ * required since the link was put to off during suspend.
+ */
+ ret = ufshcd_reset_and_restore(hba);
+ /*
+ * ufshcd_reset_and_restore() should have already
* set the link state as active
*/
if (ret || !ufshcd_is_link_active(hba))
@@ -8087,6 +8416,11 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
/* Enable Auto-Hibernate if configured */
ufshcd_auto_hibern8_enable(hba);
+ if (hba->dev_info.b_rpm_dev_flush_capable) {
+ hba->dev_info.b_rpm_dev_flush_capable = false;
+ cancel_delayed_work(&hba->rpm_dev_flush_recheck_work);
+ }
+
/* Schedule clock gating in case of no access to UFS device yet */
ufshcd_release(hba);
@@ -8313,7 +8647,7 @@ void ufshcd_remove(struct ufs_hba *hba)
scsi_remove_host(hba->host);
/* disable interrupts */
ufshcd_disable_intr(hba, hba->intr_mask);
- ufshcd_hba_stop(hba, true);
+ ufshcd_hba_stop(hba);
ufshcd_exit_clk_scaling(hba);
ufshcd_exit_clk_gating(hba);
@@ -8422,7 +8756,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
hba->mmio_base = mmio_base;
hba->irq = irq;
- hba->hba_enable_delay_us = 1000;
+ hba->vps = &ufs_hba_vps;
err = ufshcd_hba_init(hba);
if (err)
@@ -8560,6 +8894,9 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
UFS_SLEEP_PWR_MODE,
UIC_LINK_HIBERN8_STATE);
+ INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work,
+ ufshcd_rpm_dev_flush_recheck_work);
+
/* Set the default auto-hiberate idle timer value to 150 ms */
if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) {
hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) |
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 6ffc08ad85f6..bf97d616e597 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -69,6 +69,7 @@
#include <scsi/scsi_eh.h>
#include "ufs.h"
+#include "ufs_quirks.h"
#include "ufshci.h"
#define UFSHCD "ufshcd"
@@ -336,6 +337,7 @@ struct ufs_hba_variant_ops {
void (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme,
enum ufs_notify_change_status);
int (*apply_dev_quirks)(struct ufs_hba *hba);
+ void (*fixup_dev_quirks)(struct ufs_hba *hba);
int (*suspend)(struct ufs_hba *, enum ufs_pm_op);
int (*resume)(struct ufs_hba *, enum ufs_pm_op);
void (*dbg_register_dump)(struct ufs_hba *hba);
@@ -555,6 +557,20 @@ enum ufshcd_caps {
* for userspace to control the power management.
*/
UFSHCD_CAP_RPM_AUTOSUSPEND = 1 << 6,
+
+ /*
+ * This capability allows the host controller driver to turn-on
+ * WriteBooster, if the underlying device supports it and is
+ * provisioned to be used. This would increase the write performance.
+ */
+ UFSHCD_CAP_WB_EN = 1 << 7,
+};
+
+struct ufs_hba_variant_params {
+ struct devfreq_dev_profile devfreq_profile;
+ struct devfreq_simple_ondemand_data ondemand_data;
+ u16 hba_enable_delay_us;
+ u32 wb_flush_threshold;
};
/**
@@ -654,6 +670,7 @@ struct ufs_hba {
int nutmrs;
u32 ufs_version;
const struct ufs_hba_variant_ops *vops;
+ struct ufs_hba_variant_params *vps;
void *priv;
unsigned int irq;
bool is_irq_enabled;
@@ -675,7 +692,6 @@ struct ufs_hba {
u32 eh_flags;
u32 intr_mask;
u16 ee_ctrl_mask;
- u16 hba_enable_delay_us;
bool is_powered;
/* Work Queues */
@@ -727,6 +743,9 @@ struct ufs_hba {
struct device bsg_dev;
struct request_queue *bsg_queue;
+ bool wb_buf_flush_enabled;
+ bool wb_enabled;
+ struct delayed_work rpm_dev_flush_recheck_work;
};
/* Returns true if clocks can be gated. Otherwise false */
@@ -775,6 +794,11 @@ static inline bool ufshcd_is_auto_hibern8_enabled(struct ufs_hba *hba)
return FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, hba->ahit) ? true : false;
}
+static inline bool ufshcd_is_wb_allowed(struct ufs_hba *hba)
+{
+ return hba->caps & UFSHCD_CAP_WB_EN;
+}
+
#define ufshcd_writel(hba, val, reg) \
writel((val), (hba)->mmio_base + (reg))
#define ufshcd_readl(hba, reg) \
@@ -808,7 +832,7 @@ int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
void ufshcd_delay_us(unsigned long us, unsigned long tolerance);
int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
u32 val, unsigned long interval_us,
- unsigned long timeout_ms, bool can_sleep);
+ unsigned long timeout_ms);
void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk);
void ufshcd_update_reg_hist(struct ufs_err_reg_hist *reg_hist,
u32 reg);
@@ -845,6 +869,13 @@ static inline bool ufshcd_keep_autobkops_enabled_except_suspend(
return hba->caps & UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND;
}
+static inline u8 ufshcd_wb_get_query_index(struct ufs_hba *hba)
+{
+ if (hba->dev_info.b_wb_buffer_type == WB_BUF_MODE_LU_DEDICATED)
+ return hba->dev_info.wb_dedicated_lu;
+ return 0;
+}
+
extern int ufshcd_runtime_suspend(struct ufs_hba *hba);
extern int ufshcd_runtime_resume(struct ufs_hba *hba);
extern int ufshcd_runtime_idle(struct ufs_hba *hba);
@@ -932,11 +963,11 @@ int ufshcd_read_desc_param(struct ufs_hba *hba,
int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
enum attr_idn idn, u8 index, u8 selector, u32 *attr_val);
int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
- enum flag_idn idn, bool *flag_res);
+ enum flag_idn idn, u8 index, bool *flag_res);
void ufshcd_auto_hibern8_enable(struct ufs_hba *hba);
void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
-
+void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, struct ufs_dev_fix *fixups);
#define SD_ASCII_STD true
#define SD_RAW false
int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
@@ -1071,6 +1102,12 @@ static inline int ufshcd_vops_apply_dev_quirks(struct ufs_hba *hba)
return 0;
}
+static inline void ufshcd_vops_fixup_dev_quirks(struct ufs_hba *hba)
+{
+ if (hba->vops && hba->vops->fixup_dev_quirks)
+ hba->vops->fixup_dev_quirks(hba);
+}
+
static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op)
{
if (hba->vops && hba->vops->suspend)