aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/drivers/ufs
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ufs')
-rw-r--r--drivers/ufs/core/ufs-sysfs.c85
-rw-r--r--drivers/ufs/core/ufshcd-priv.h11
-rw-r--r--drivers/ufs/core/ufshcd.c104
-rw-r--r--drivers/ufs/core/ufshpb.c8
-rw-r--r--drivers/ufs/host/ufs-mediatek-trace.h27
-rw-r--r--drivers/ufs/host/ufs-mediatek.c205
-rw-r--r--drivers/ufs/host/ufs-mediatek.h7
-rw-r--r--drivers/ufs/host/ufs-qcom.c2
8 files changed, 386 insertions, 63 deletions
diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c
index 0a088b47d557..53aea56d1de1 100644
--- a/drivers/ufs/core/ufs-sysfs.c
+++ b/drivers/ufs/core/ufs-sysfs.c
@@ -225,12 +225,13 @@ static ssize_t wb_on_store(struct device *dev, struct device_attribute *attr,
unsigned int wb_enable;
ssize_t res;
- if (!ufshcd_is_wb_allowed(hba) || ufshcd_is_clkscaling_supported(hba)) {
+ if (!ufshcd_is_wb_allowed(hba) || (ufshcd_is_clkscaling_supported(hba)
+ && ufshcd_enable_wb_if_scaling_up(hba))) {
/*
* If the platform supports UFSHCD_CAP_CLK_SCALING, turn WB
* on/off will be done while clock scaling up/down.
*/
- dev_warn(dev, "To control WB through wb_on is not allowed!\n");
+ dev_warn(dev, "It is not allowed to configure WB!\n");
return -EOPNOTSUPP;
}
@@ -254,6 +255,49 @@ out:
return res < 0 ? res : count;
}
+static ssize_t enable_wb_buf_flush_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", hba->dev_info.wb_buf_flush_enabled);
+}
+
+static ssize_t enable_wb_buf_flush_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ unsigned int enable_wb_buf_flush;
+ ssize_t res;
+
+ if (!ufshcd_is_wb_buf_flush_allowed(hba)) {
+ dev_warn(dev, "It is not allowed to configure WB buf flushing!\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (kstrtouint(buf, 0, &enable_wb_buf_flush))
+ return -EINVAL;
+
+ if (enable_wb_buf_flush != 0 && enable_wb_buf_flush != 1)
+ return -EINVAL;
+
+ down(&hba->host_sem);
+ if (!ufshcd_is_user_access_allowed(hba)) {
+ res = -EBUSY;
+ goto out;
+ }
+
+ ufshcd_rpm_get_sync(hba);
+ res = ufshcd_wb_toggle_buf_flush(hba, enable_wb_buf_flush);
+ ufshcd_rpm_put_sync(hba);
+
+out:
+ up(&hba->host_sem);
+ return res < 0 ? res : count;
+}
+
static DEVICE_ATTR_RW(rpm_lvl);
static DEVICE_ATTR_RO(rpm_target_dev_state);
static DEVICE_ATTR_RO(rpm_target_link_state);
@@ -262,6 +306,7 @@ static DEVICE_ATTR_RO(spm_target_dev_state);
static DEVICE_ATTR_RO(spm_target_link_state);
static DEVICE_ATTR_RW(auto_hibern8);
static DEVICE_ATTR_RW(wb_on);
+static DEVICE_ATTR_RW(enable_wb_buf_flush);
static struct attribute *ufs_sysfs_ufshcd_attrs[] = {
&dev_attr_rpm_lvl.attr,
@@ -272,6 +317,7 @@ static struct attribute *ufs_sysfs_ufshcd_attrs[] = {
&dev_attr_spm_target_link_state.attr,
&dev_attr_auto_hibern8.attr,
&dev_attr_wb_on.attr,
+ &dev_attr_enable_wb_buf_flush.attr,
NULL
};
@@ -279,6 +325,40 @@ static const struct attribute_group ufs_sysfs_default_group = {
.attrs = ufs_sysfs_ufshcd_attrs,
};
+static ssize_t clock_scaling_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", ufshcd_is_clkscaling_supported(hba));
+}
+
+static ssize_t write_booster_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", ufshcd_is_wb_allowed(hba));
+}
+
+static DEVICE_ATTR_RO(clock_scaling);
+static DEVICE_ATTR_RO(write_booster);
+
+/*
+ * See Documentation/ABI/testing/sysfs-driver-ufs for the semantics of this
+ * group.
+ */
+static struct attribute *ufs_sysfs_capabilities_attrs[] = {
+ &dev_attr_clock_scaling.attr,
+ &dev_attr_write_booster.attr,
+ NULL
+};
+
+static const struct attribute_group ufs_sysfs_capabilities_group = {
+ .name = "capabilities",
+ .attrs = ufs_sysfs_capabilities_attrs,
+};
+
static ssize_t monitor_enable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1134,6 +1214,7 @@ static const struct attribute_group ufs_sysfs_attributes_group = {
static const struct attribute_group *ufs_sysfs_groups[] = {
&ufs_sysfs_default_group,
+ &ufs_sysfs_capabilities_group,
&ufs_sysfs_monitor_group,
&ufs_sysfs_device_descriptor_group,
&ufs_sysfs_interconnect_descriptor_group,
diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h
index 8f67db202d7b..f68ca33f6ac7 100644
--- a/drivers/ufs/core/ufshcd-priv.h
+++ b/drivers/ufs/core/ufshcd-priv.h
@@ -26,6 +26,12 @@ static inline u8 ufshcd_wb_get_query_index(struct ufs_hba *hba)
return 0;
}
+static inline bool ufshcd_is_wb_buf_flush_allowed(struct ufs_hba *hba)
+{
+ return ufshcd_is_wb_allowed(hba) &&
+ !(hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL);
+}
+
#ifdef CONFIG_SCSI_UFS_HWMON
void ufs_hwmon_probe(struct ufs_hba *hba, u8 mask);
void ufs_hwmon_remove(struct ufs_hba *hba);
@@ -36,6 +42,11 @@ static inline void ufs_hwmon_remove(struct ufs_hba *hba) {}
static inline void ufs_hwmon_notify_event(struct ufs_hba *hba, u8 ee_mask) {}
#endif
+int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
+ enum query_opcode opcode,
+ enum desc_idn idn, u8 index,
+ u8 selector,
+ u8 *desc_buf, int *buf_len);
int ufshcd_read_desc_param(struct ufs_hba *hba,
enum desc_idn desc_id,
int desc_index,
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index a202d7d5240d..7256e6c43ca6 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -21,6 +21,7 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/regulator/consumer.h>
+#include <linux/sched/clock.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_driver.h>
@@ -265,8 +266,8 @@ static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
struct ufs_vreg *vreg);
static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag);
-static void ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set);
-static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable);
+static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba *hba,
+ bool enable);
static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
@@ -286,16 +287,17 @@ static inline void ufshcd_disable_irq(struct ufs_hba *hba)
}
}
-static inline void ufshcd_wb_config(struct ufs_hba *hba)
+static void ufshcd_configure_wb(struct ufs_hba *hba)
{
if (!ufshcd_is_wb_allowed(hba))
return;
ufshcd_wb_toggle(hba, true);
- ufshcd_wb_toggle_flush_during_h8(hba, true);
- if (!(hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL))
- ufshcd_wb_toggle_flush(hba, true);
+ ufshcd_wb_toggle_buf_flush_during_h8(hba, true);
+
+ if (ufshcd_is_wb_buf_flush_allowed(hba))
+ ufshcd_wb_toggle_buf_flush(hba, true);
}
static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
@@ -457,7 +459,7 @@ static void ufshcd_print_evt(struct ufs_hba *hba, u32 id,
if (e->tstamp[p] == 0)
continue;
dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p,
- e->val[p], ktime_to_us(e->tstamp[p]));
+ e->val[p], div_u64(e->tstamp[p], 1000));
found = true;
}
@@ -502,9 +504,9 @@ void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
lrbp = &hba->lrb[tag];
dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
- tag, ktime_to_us(lrbp->issue_time_stamp));
+ tag, div_u64(lrbp->issue_time_stamp_local_clock, 1000));
dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n",
- tag, ktime_to_us(lrbp->compl_time_stamp));
+ tag, div_u64(lrbp->compl_time_stamp_local_clock, 1000));
dev_err(hba->dev,
"UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
tag, (u64)lrbp->utrd_dma_addr);
@@ -566,10 +568,10 @@ static void ufshcd_print_host_state(struct ufs_hba *hba)
dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
dev_err(hba->dev,
"last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt=%d\n",
- ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
+ div_u64(hba->ufs_stats.last_hibern8_exit_tstamp, 1000),
hba->ufs_stats.hibern8_exit_cnt);
dev_err(hba->dev, "last intr at %lld us, last intr status=0x%x\n",
- ktime_to_us(hba->ufs_stats.last_intr_ts),
+ div_u64(hba->ufs_stats.last_intr_ts, 1000),
hba->ufs_stats.last_intr_status);
dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
hba->eh_flags, hba->req_abort_count);
@@ -1298,9 +1300,11 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
}
/* Enable Write Booster if we have scaled up else disable it */
- downgrade_write(&hba->clk_scaling_lock);
- is_writelock = false;
- ufshcd_wb_toggle(hba, scale_up);
+ if (ufshcd_enable_wb_if_scaling_up(hba)) {
+ downgrade_write(&hba->clk_scaling_lock);
+ is_writelock = false;
+ ufshcd_wb_toggle(hba, scale_up);
+ }
out_unprepare:
ufshcd_clock_scaling_unprepare(hba, is_writelock);
@@ -2140,7 +2144,9 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
unsigned long flags;
lrbp->issue_time_stamp = ktime_get();
+ lrbp->issue_time_stamp_local_clock = local_clock();
lrbp->compl_time_stamp = ktime_set(0, 0);
+ lrbp->compl_time_stamp_local_clock = 0;
ufshcd_add_command_trace(hba, task_tag, UFS_CMD_SEND);
ufshcd_clk_scaling_start_busy(hba);
if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
@@ -2701,9 +2707,9 @@ static inline bool is_device_wlun(struct scsi_device *sdev)
* Associate the UFS controller queue with the default and poll HCTX types.
* Initialize the mq_map[] arrays.
*/
-static int ufshcd_map_queues(struct Scsi_Host *shost)
+static void ufshcd_map_queues(struct Scsi_Host *shost)
{
- int i, ret;
+ int i;
for (i = 0; i < shost->nr_maps; i++) {
struct blk_mq_queue_map *map = &shost->tag_set.map[i];
@@ -2720,11 +2726,8 @@ static int ufshcd_map_queues(struct Scsi_Host *shost)
WARN_ON_ONCE(true);
}
map->queue_offset = 0;
- ret = blk_mq_map_queues(map);
- WARN_ON_ONCE(ret);
+ blk_mq_map_queues(map);
}
-
- return 0;
}
static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
@@ -4222,7 +4225,7 @@ int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
} else {
ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
POST_CHANGE);
- hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
+ hba->ufs_stats.last_hibern8_exit_tstamp = local_clock();
hba->ufs_stats.hibern8_exit_cnt++;
}
@@ -4724,7 +4727,7 @@ void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val)
e = &hba->ufs_stats.event[id];
e->val[e->pos] = val;
- e->tstamp[e->pos] = ktime_get();
+ e->tstamp[e->pos] = local_clock();
e->cnt += 1;
e->pos = (e->pos + 1) % UFS_EVENT_HIST_LENGTH;
@@ -5357,6 +5360,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
for_each_set_bit(index, &completed_reqs, hba->nutrs) {
lrbp = &hba->lrb[index];
lrbp->compl_time_stamp = ktime_get();
+ lrbp->compl_time_stamp_local_clock = local_clock();
cmd = lrbp->cmd;
if (cmd) {
if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
@@ -5752,60 +5756,60 @@ int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable)
{
int ret;
- if (!ufshcd_is_wb_allowed(hba))
- return 0;
-
- if (!(enable ^ hba->dev_info.wb_enabled))
+ if (!ufshcd_is_wb_allowed(hba) ||
+ hba->dev_info.wb_enabled == enable)
return 0;
ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_EN);
if (ret) {
- dev_err(hba->dev, "%s Write Booster %s failed %d\n",
- __func__, enable ? "enable" : "disable", ret);
+ dev_err(hba->dev, "%s: Write Booster %s failed %d\n",
+ __func__, enable ? "enabling" : "disabling", ret);
return ret;
}
hba->dev_info.wb_enabled = enable;
- dev_dbg(hba->dev, "%s Write Booster %s\n",
+ dev_dbg(hba->dev, "%s: Write Booster %s\n",
__func__, enable ? "enabled" : "disabled");
return ret;
}
-static void ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set)
+static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba *hba,
+ bool enable)
{
int ret;
- ret = __ufshcd_wb_toggle(hba, set,
+ ret = __ufshcd_wb_toggle(hba, enable,
QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8);
if (ret) {
- dev_err(hba->dev, "%s: WB-Buf Flush during H8 %s failed: %d\n",
- __func__, set ? "enable" : "disable", ret);
+ dev_err(hba->dev, "%s: WB-Buf Flush during H8 %s failed %d\n",
+ __func__, enable ? "enabling" : "disabling", ret);
return;
}
- dev_dbg(hba->dev, "%s WB-Buf Flush during H8 %s\n",
- __func__, set ? "enabled" : "disabled");
+ dev_dbg(hba->dev, "%s: WB-Buf Flush during H8 %s\n",
+ __func__, enable ? "enabled" : "disabled");
}
-static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable)
+int ufshcd_wb_toggle_buf_flush(struct ufs_hba *hba, bool enable)
{
int ret;
if (!ufshcd_is_wb_allowed(hba) ||
hba->dev_info.wb_buf_flush_enabled == enable)
- return;
+ return 0;
ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN);
if (ret) {
- dev_err(hba->dev, "%s WB-Buf Flush %s failed %d\n", __func__,
- enable ? "enable" : "disable", ret);
- return;
+ dev_err(hba->dev, "%s: WB-Buf Flush %s failed %d\n",
+ __func__, enable ? "enabling" : "disabling", ret);
+ return ret;
}
hba->dev_info.wb_buf_flush_enabled = enable;
-
- dev_dbg(hba->dev, "%s WB-Buf Flush %s\n",
+ dev_dbg(hba->dev, "%s: WB-Buf Flush %s\n",
__func__, enable ? "enabled" : "disabled");
+
+ return ret;
}
static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
@@ -5820,7 +5824,7 @@ static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE,
index, 0, &cur_buf);
if (ret) {
- dev_err(hba->dev, "%s dCurWriteBoosterBufferSize read failed %d\n",
+ dev_err(hba->dev, "%s: dCurWriteBoosterBufferSize read failed %d\n",
__func__, ret);
return false;
}
@@ -5836,10 +5840,10 @@ static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
static void ufshcd_wb_force_disable(struct ufs_hba *hba)
{
- if (!(hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL))
- ufshcd_wb_toggle_flush(hba, false);
+ if (ufshcd_is_wb_buf_flush_allowed(hba))
+ ufshcd_wb_toggle_buf_flush(hba, false);
- ufshcd_wb_toggle_flush_during_h8(hba, false);
+ ufshcd_wb_toggle_buf_flush_during_h8(hba, false);
ufshcd_wb_toggle(hba, false);
hba->caps &= ~UFSHCD_CAP_WB_EN;
@@ -5905,7 +5909,7 @@ static bool ufshcd_wb_need_flush(struct ufs_hba *hba)
QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE,
index, 0, &avail_buf);
if (ret) {
- dev_warn(hba->dev, "%s dAvailableWriteBoosterBufferSize read failed %d\n",
+ dev_warn(hba->dev, "%s: dAvailableWriteBoosterBufferSize read failed %d\n",
__func__, ret);
return false;
}
@@ -6645,7 +6649,7 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
hba->ufs_stats.last_intr_status = intr_status;
- hba->ufs_stats.last_intr_ts = ktime_get();
+ hba->ufs_stats.last_intr_ts = local_clock();
/*
* There could be max of hba->nutrs reqs in flight and in worst case
@@ -8236,7 +8240,9 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
*/
ufshcd_set_active_icc_lvl(hba);
- ufshcd_wb_config(hba);
+ /* Enable UFS Write Booster if supported */
+ ufshcd_configure_wb(hba);
+
if (hba->ee_usr_mask)
ufshcd_write_ee_control(hba);
/* Enable Auto-Hibernate if configured */
diff --git a/drivers/ufs/core/ufshpb.c b/drivers/ufs/core/ufshpb.c
index a1a7a1175a5a..3d69a81c5b17 100644
--- a/drivers/ufs/core/ufshpb.c
+++ b/drivers/ufs/core/ufshpb.c
@@ -613,14 +613,17 @@ static void ufshpb_activate_subregion(struct ufshpb_lu *hpb,
srgn->srgn_state = HPB_SRGN_VALID;
}
-static void ufshpb_umap_req_compl_fn(struct request *req, blk_status_t error)
+static enum rq_end_io_ret ufshpb_umap_req_compl_fn(struct request *req,
+ blk_status_t error)
{
struct ufshpb_req *umap_req = (struct ufshpb_req *)req->end_io_data;
ufshpb_put_req(umap_req->hpb, umap_req);
+ return RQ_END_IO_NONE;
}
-static void ufshpb_map_req_compl_fn(struct request *req, blk_status_t error)
+static enum rq_end_io_ret ufshpb_map_req_compl_fn(struct request *req,
+ blk_status_t error)
{
struct ufshpb_req *map_req = (struct ufshpb_req *) req->end_io_data;
struct ufshpb_lu *hpb = map_req->hpb;
@@ -636,6 +639,7 @@ static void ufshpb_map_req_compl_fn(struct request *req, blk_status_t error)
spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
ufshpb_put_map_req(map_req->hpb, map_req);
+ return RQ_END_IO_NONE;
}
static void ufshpb_set_unmap_cmd(unsigned char *cdb, struct ufshpb_region *rgn)
diff --git a/drivers/ufs/host/ufs-mediatek-trace.h b/drivers/ufs/host/ufs-mediatek-trace.h
index 7e010848dc99..b5f2ec314074 100644
--- a/drivers/ufs/host/ufs-mediatek-trace.h
+++ b/drivers/ufs/host/ufs-mediatek-trace.h
@@ -24,9 +24,32 @@ TRACE_EVENT(ufs_mtk_event,
__entry->data = data;
),
- TP_printk("ufs:event=%u data=%u",
+ TP_printk("ufs: event=%u data=%u",
__entry->type, __entry->data)
- );
+);
+
+TRACE_EVENT(ufs_mtk_clk_scale,
+ TP_PROTO(const char *name, bool scale_up, unsigned long clk_rate),
+ TP_ARGS(name, scale_up, clk_rate),
+
+ TP_STRUCT__entry(
+ __field(const char*, name)
+ __field(bool, scale_up)
+ __field(unsigned long, clk_rate)
+ ),
+
+ TP_fast_assign(
+ __entry->name = name;
+ __entry->scale_up = scale_up;
+ __entry->clk_rate = clk_rate;
+ ),
+
+ TP_printk("ufs: clk (%s) scaled %s @ %lu",
+ __entry->name,
+ __entry->scale_up ? "up" : "down",
+ __entry->clk_rate)
+);
+
#endif
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c
index c958279bdd8f..7309f3f87eac 100644
--- a/drivers/ufs/host/ufs-mediatek.c
+++ b/drivers/ufs/host/ufs-mediatek.c
@@ -19,7 +19,6 @@
#include <linux/pm_qos.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
-#include <linux/sched/clock.h>
#include <linux/soc/mediatek/mtk_sip_svc.h>
#include <ufs/ufshcd.h>
@@ -47,6 +46,44 @@ static const struct of_device_id ufs_mtk_of_match[] = {
{},
};
+/*
+ * Details of UIC Errors
+ */
+static const char *const ufs_uic_err_str[] = {
+ "PHY Adapter Layer",
+ "Data Link Layer",
+ "Network Link Layer",
+ "Transport Link Layer",
+ "DME"
+};
+
+static const char *const ufs_uic_pa_err_str[] = {
+ "PHY error on Lane 0",
+ "PHY error on Lane 1",
+ "PHY error on Lane 2",
+ "PHY error on Lane 3",
+ "Generic PHY Adapter Error. This should be the LINERESET indication"
+};
+
+static const char *const ufs_uic_dl_err_str[] = {
+ "NAC_RECEIVED",
+ "TCx_REPLAY_TIMER_EXPIRED",
+ "AFCx_REQUEST_TIMER_EXPIRED",
+ "FCx_PROTECTION_TIMER_EXPIRED",
+ "CRC_ERROR",
+ "RX_BUFFER_OVERFLOW",
+ "MAX_FRAME_LENGTH_EXCEEDED",
+ "WRONG_SEQUENCE_NUMBER",
+ "AFC_FRAME_SYNTAX_ERROR",
+ "NAC_FRAME_SYNTAX_ERROR",
+ "EOF_SYNTAX_ERROR",
+ "FRAME_SYNTAX_ERROR",
+ "BAD_CTRL_SYMBOL_TYPE",
+ "PA_INIT_ERROR",
+ "PA_ERROR_IND_RECEIVED",
+ "PA_INIT"
+};
+
static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
@@ -598,6 +635,12 @@ static void ufs_mtk_boost_pm_qos(struct ufs_hba *hba, bool boost)
boost ? 0 : PM_QOS_DEFAULT_VALUE);
}
+static void ufs_mtk_scale_perf(struct ufs_hba *hba, bool scale_up)
+{
+ ufs_mtk_boost_crypt(hba, scale_up);
+ ufs_mtk_boost_pm_qos(hba, scale_up);
+}
+
static void ufs_mtk_pwr_ctrl(struct ufs_hba *hba, bool on)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
@@ -605,11 +648,11 @@ static void ufs_mtk_pwr_ctrl(struct ufs_hba *hba, bool on)
if (on) {
phy_power_on(host->mphy);
ufs_mtk_setup_ref_clk(hba, on);
- ufs_mtk_boost_crypt(hba, on);
- ufs_mtk_boost_pm_qos(hba, on);
+ if (!ufshcd_is_clkscaling_supported(hba))
+ ufs_mtk_scale_perf(hba, on);
} else {
- ufs_mtk_boost_pm_qos(hba, on);
- ufs_mtk_boost_crypt(hba, on);
+ if (!ufshcd_is_clkscaling_supported(hba))
+ ufs_mtk_scale_perf(hba, on);
ufs_mtk_setup_ref_clk(hba, on);
phy_power_off(host->mphy);
}
@@ -695,6 +738,46 @@ static u32 ufs_mtk_get_ufs_hci_version(struct ufs_hba *hba)
return hba->ufs_version;
}
+/**
+ * ufs_mtk_init_clocks - Init mtk driver private clocks
+ *
+ * @hba: per adapter instance
+ */
+static void ufs_mtk_init_clocks(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ struct list_head *head = &hba->clk_list_head;
+ struct ufs_mtk_clk *mclk = &host->mclk;
+ struct ufs_clk_info *clki, *clki_tmp;
+
+ /*
+ * Find private clocks and store them in struct ufs_mtk_clk.
+ * Remove "ufs_sel_min_src" and "ufs_sel_min_src" from list to avoid
+ * being switched on/off in clock gating.
+ */
+ list_for_each_entry_safe(clki, clki_tmp, head, list) {
+ if (!strcmp(clki->name, "ufs_sel")) {
+ host->mclk.ufs_sel_clki = clki;
+ } else if (!strcmp(clki->name, "ufs_sel_max_src")) {
+ host->mclk.ufs_sel_max_clki = clki;
+ clk_disable_unprepare(clki->clk);
+ list_del(&clki->list);
+ } else if (!strcmp(clki->name, "ufs_sel_min_src")) {
+ host->mclk.ufs_sel_min_clki = clki;
+ clk_disable_unprepare(clki->clk);
+ list_del(&clki->list);
+ }
+ }
+
+ if (!mclk->ufs_sel_clki || !mclk->ufs_sel_max_clki ||
+ !mclk->ufs_sel_min_clki) {
+ hba->caps &= ~UFSHCD_CAP_CLK_SCALING;
+ dev_info(hba->dev,
+ "%s: Clk-scaling not ready. Feature disabled.",
+ __func__);
+ }
+}
+
#define MAX_VCC_NAME 30
static int ufs_mtk_vreg_fix_vcc(struct ufs_hba *hba)
{
@@ -815,12 +898,18 @@ static int ufs_mtk_init(struct ufs_hba *hba)
/* Enable WriteBooster */
hba->caps |= UFSHCD_CAP_WB_EN;
+
+ /* Enable clk scaling*/
+ hba->caps |= UFSHCD_CAP_CLK_SCALING;
+
hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL;
hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
if (host->caps & UFS_MTK_CAP_DISABLE_AH8)
hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
+ ufs_mtk_init_clocks(hba);
+
/*
* ufshcd_vops_init() is invoked after
* ufshcd_setup_clock(true) in ufshcd_hba_init() thus
@@ -833,6 +922,10 @@ static int ufs_mtk_init(struct ufs_hba *hba)
host->ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER);
+ /* Initialize pm-qos request */
+ cpu_latency_qos_add_request(&host->pm_qos_req, PM_QOS_DEFAULT_VALUE);
+ host->pm_qos_init = true;
+
goto out;
out_variant_clear:
@@ -1247,13 +1340,16 @@ fail:
static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
{
- ufshcd_dump_regs(hba, REG_UFS_REFCLK_CTRL, 0x4, "Ref-Clk Ctrl ");
+ /* Dump ufshci register 0x140 ~ 0x14C */
+ ufshcd_dump_regs(hba, REG_UFS_XOUFS_CTRL, 0x10,
+ "XOUFS Ctrl (0x140): ");
ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg ");
+ /* Dump ufshci register 0x2200 ~ 0x22AC */
ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL,
REG_UFS_REJECT_MON - REG_UFS_MPHYCTRL + 4,
- "MPHY Ctrl ");
+ "MPHY Ctrl (0x2200): ");
/* Direct debugging information to REG_MTK_PROBE */
ufs_mtk_dbg_sel(hba);
@@ -1310,8 +1406,101 @@ static void ufs_mtk_event_notify(struct ufs_hba *hba,
enum ufs_event_type evt, void *data)
{
unsigned int val = *(u32 *)data;
+ unsigned long reg;
+ u8 bit;
trace_ufs_mtk_event(evt, val);
+
+ /* Print details of UIC Errors */
+ if (evt <= UFS_EVT_DME_ERR) {
+ dev_info(hba->dev,
+ "Host UIC Error Code (%s): %08x\n",
+ ufs_uic_err_str[evt], val);
+ reg = val;
+ }
+
+ if (evt == UFS_EVT_PA_ERR) {
+ for_each_set_bit(bit, &reg, ARRAY_SIZE(ufs_uic_pa_err_str))
+ dev_info(hba->dev, "%s\n", ufs_uic_pa_err_str[bit]);
+ }
+
+ if (evt == UFS_EVT_DL_ERR) {
+ for_each_set_bit(bit, &reg, ARRAY_SIZE(ufs_uic_dl_err_str))
+ dev_info(hba->dev, "%s\n", ufs_uic_dl_err_str[bit]);
+ }
+}
+
+static void ufs_mtk_config_scaling_param(struct ufs_hba *hba,
+ struct devfreq_dev_profile *profile,
+ struct devfreq_simple_ondemand_data *data)
+{
+ /* Customize min gear in clk scaling */
+ hba->clk_scaling.min_gear = UFS_HS_G4;
+
+ hba->vps->devfreq_profile.polling_ms = 200;
+ hba->vps->ondemand_data.upthreshold = 50;
+ hba->vps->ondemand_data.downdifferential = 20;
+}
+
+/**
+ * ufs_mtk_clk_scale - Internal clk scaling operation
+ *
+ * MTK platform supports clk scaling by switching parent of ufs_sel(mux).
+ * The ufs_sel downstream to ufs_ck which feeds directly to UFS hardware.
+ * Max and min clocks rate of ufs_sel defined in dts should match rate of
+ * "ufs_sel_max_src" and "ufs_sel_min_src" respectively.
+ * This prevent changing rate of pll clock that is shared between modules.
+ *
+ * @hba: per adapter instance
+ * @scale_up: True for scaling up and false for scaling down
+ */
+static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ struct ufs_mtk_clk *mclk = &host->mclk;
+ struct ufs_clk_info *clki = mclk->ufs_sel_clki;
+ int ret = 0;
+
+ ret = clk_prepare_enable(clki->clk);
+ if (ret) {
+ dev_info(hba->dev,
+ "clk_prepare_enable() fail, ret: %d\n", ret);
+ return;
+ }
+
+ if (scale_up) {
+ ret = clk_set_parent(clki->clk, mclk->ufs_sel_max_clki->clk);
+ clki->curr_freq = clki->max_freq;
+ } else {
+ ret = clk_set_parent(clki->clk, mclk->ufs_sel_min_clki->clk);
+ clki->curr_freq = clki->min_freq;
+ }
+
+ if (ret) {
+ dev_info(hba->dev,
+ "Failed to set ufs_sel_clki, ret: %d\n", ret);
+ }
+
+ clk_disable_unprepare(clki->clk);
+
+ trace_ufs_mtk_clk_scale(clki->name, scale_up, clk_get_rate(clki->clk));
+}
+
+static int ufs_mtk_clk_scale_notify(struct ufs_hba *hba, bool scale_up,
+ enum ufs_notify_change_status status)
+{
+ if (!ufshcd_is_clkscaling_supported(hba))
+ return 0;
+
+ if (status == PRE_CHANGE) {
+ /* Switch parent before clk_set_rate() */
+ ufs_mtk_clk_scale(hba, scale_up);
+ } else {
+ /* Request interrupt latency QoS accordingly */
+ ufs_mtk_scale_perf(hba, scale_up);
+ }
+
+ return 0;
}
/*
@@ -1335,6 +1524,8 @@ static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
.dbg_register_dump = ufs_mtk_dbg_register_dump,
.device_reset = ufs_mtk_device_reset,
.event_notify = ufs_mtk_event_notify,
+ .config_scaling_param = ufs_mtk_config_scaling_param,
+ .clk_scale_notify = ufs_mtk_clk_scale_notify,
};
/**
diff --git a/drivers/ufs/host/ufs-mediatek.h b/drivers/ufs/host/ufs-mediatek.h
index aa26d415527b..2fc6d7b87694 100644
--- a/drivers/ufs/host/ufs-mediatek.h
+++ b/drivers/ufs/host/ufs-mediatek.h
@@ -124,6 +124,12 @@ struct ufs_mtk_crypt_cfg {
int vcore_volt;
};
+struct ufs_mtk_clk {
+ struct ufs_clk_info *ufs_sel_clki; /* Mux */
+ struct ufs_clk_info *ufs_sel_max_clki; /* Max src */
+ struct ufs_clk_info *ufs_sel_min_clki; /* Min src */
+};
+
struct ufs_mtk_hw_ver {
u8 step;
u8 minor;
@@ -139,6 +145,7 @@ struct ufs_mtk_host {
struct reset_control *crypto_reset;
struct ufs_hba *hba;
struct ufs_mtk_crypt_cfg *crypt;
+ struct ufs_mtk_clk mclk;
struct ufs_mtk_hw_ver hw_ver;
enum ufs_mtk_host_caps caps;
bool mphy_powered_on;
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 473fad83701e..8ad1415e10b6 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -846,7 +846,7 @@ static void ufs_qcom_set_caps(struct ufs_hba *hba)
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
- hba->caps |= UFSHCD_CAP_CLK_SCALING;
+ hba->caps |= UFSHCD_CAP_CLK_SCALING | UFSHCD_CAP_WB_WITH_CLK_SCALING;
hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
hba->caps |= UFSHCD_CAP_WB_EN;
hba->caps |= UFSHCD_CAP_CRYPTO;