aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-driver-ufs14
-rw-r--r--block/bsg-lib.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c6
-rw-r--r--drivers/scsi/scsi_scan.c3
-rw-r--r--drivers/ufs/core/ufs-mcq.c58
-rw-r--r--drivers/ufs/core/ufs-sysfs.c73
-rw-r--r--drivers/ufs/core/ufshcd.c41
-rw-r--r--drivers/ufs/host/ufs-mediatek.c1
-rw-r--r--drivers/ufs/host/ufs-mediatek.h3
-rw-r--r--drivers/ufs/host/ufshcd-pci.c48
-rw-r--r--include/ufs/ufs.h2
-rw-r--r--include/ufs/ufshcd.h18
-rw-r--r--include/ufs/ufshci.h1
13 files changed, 232 insertions, 39 deletions
diff --git a/Documentation/ABI/testing/sysfs-driver-ufs b/Documentation/ABI/testing/sysfs-driver-ufs
index 5bf7073b4f75..fe943ce76c60 100644
--- a/Documentation/ABI/testing/sysfs-driver-ufs
+++ b/Documentation/ABI/testing/sysfs-driver-ufs
@@ -920,14 +920,16 @@ Description: This file shows whether the configuration descriptor is locked.
What: /sys/bus/platform/drivers/ufshcd/*/attributes/max_number_of_rtt
What: /sys/bus/platform/devices/*.ufs/attributes/max_number_of_rtt
-Date: February 2018
-Contact: Stanislav Nijnikov <stanislav.nijnikov@wdc.com>
+Date: May 2024
+Contact: Avri Altman <avri.altman@wdc.com>
Description: This file provides the maximum current number of
- outstanding RTTs in device that is allowed. The full
- information about the attribute could be found at
- UFS specifications 2.1.
+ outstanding RTTs in device that is allowed. bMaxNumOfRTT is a
+ read-write persistent attribute and is equal to two after device
+ manufacturing. It shall not be set to a value greater than
+ bDeviceRTTCap value, and it may be set only when the hw queues are
+ empty.
- The file is read only.
+ The file is read write.
What: /sys/bus/platform/drivers/ufshcd/*/attributes/exception_event_control
What: /sys/bus/platform/devices/*.ufs/attributes/exception_event_control
diff --git a/block/bsg-lib.c b/block/bsg-lib.c
index ee738d129a9f..32da4a4429ce 100644
--- a/block/bsg-lib.c
+++ b/block/bsg-lib.c
@@ -385,13 +385,12 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
if (blk_mq_alloc_tag_set(set))
goto out_tag_set;
- q = blk_mq_alloc_queue(set, lim, NULL);
+ q = blk_mq_alloc_queue(set, lim, dev);
if (IS_ERR(q)) {
ret = PTR_ERR(q);
goto out_queue;
}
- q->queuedata = dev;
blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
bset->bd = bsg_register_queue(q, dev, name, bsg_transport_sg_io_fn);
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index d48007e18288..fe98c76e9be3 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -3014,12 +3014,6 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
}
}
-struct scsi_dif_tuple {
- __be16 guard; /* Checksum */
- __be16 app_tag; /* APPL identifier */
- __be32 ref_tag; /* Target LBA or indirect LBA */
-};
-
/*
* Checks the guard or meta-data for the type of error
* detected by the HBA. In case of errors, we set the
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 8300fc28cb10..c0b72199b4fa 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -334,7 +334,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
sdev->sg_reserved_size = INT_MAX;
scsi_init_limits(shost, &lim);
- q = blk_mq_alloc_queue(&sdev->host->tag_set, &lim, NULL);
+ q = blk_mq_alloc_queue(&sdev->host->tag_set, &lim, sdev);
if (IS_ERR(q)) {
/* release fn is set up in scsi_sysfs_device_initialise, so
* have to free and put manually here */
@@ -344,7 +344,6 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
}
kref_get(&sdev->host->tagset_refcnt);
sdev->request_queue = q;
- q->queuedata = sdev;
depth = sdev->host->cmd_per_lun ?: 1;
diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
index 005d63ab1f44..4bcae410c268 100644
--- a/drivers/ufs/core/ufs-mcq.c
+++ b/drivers/ufs/core/ufs-mcq.c
@@ -18,6 +18,7 @@
#include <linux/iopoll.h>
#define MAX_QUEUE_SUP GENMASK(7, 0)
+#define QCFGPTR GENMASK(23, 16)
#define UFS_MCQ_MIN_RW_QUEUES 2
#define UFS_MCQ_MIN_READ_QUEUES 0
#define UFS_MCQ_MIN_POLL_QUEUES 0
@@ -25,7 +26,6 @@
#define QUEUE_ID_OFFSET 16
#define MCQ_CFG_MAC_MASK GENMASK(16, 8)
-#define MCQ_QCFG_SIZE 0x40
#define MCQ_ENTRY_SIZE_IN_DWORD 8
#define CQE_UCD_BA GENMASK_ULL(63, 7)
@@ -118,6 +118,19 @@ struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
}
/**
+ * ufshcd_mcq_queue_cfg_addr - get an start address of the MCQ Queue Config
+ * Registers.
+ * @hba: per adapter instance
+ *
+ * Return: Start address of MCQ Queue Config Registers in HCI
+ */
+unsigned int ufshcd_mcq_queue_cfg_addr(struct ufs_hba *hba)
+{
+ return FIELD_GET(QCFGPTR, hba->mcq_capabilities) * 0x200;
+}
+EXPORT_SYMBOL_GPL(ufshcd_mcq_queue_cfg_addr);
+
+/**
* ufshcd_mcq_decide_queue_depth - decide the queue depth
* @hba: per adapter instance
*
@@ -166,6 +179,15 @@ static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba)
return -EOPNOTSUPP;
}
+ /*
+ * Device should support at least one I/O queue to handle device
+ * commands via hba->dev_cmd_queue.
+ */
+ if (hba_maxq == poll_queues) {
+ dev_err(hba->dev, "At least one non-poll queue required\n");
+ return -EOPNOTSUPP;
+ }
+
rem = hba_maxq;
if (rw_queues) {
@@ -228,12 +250,6 @@ int ufshcd_mcq_memory_alloc(struct ufs_hba *hba)
return 0;
}
-
-/* Operation and runtime registers configuration */
-#define MCQ_CFG_n(r, i) ((r) + MCQ_QCFG_SIZE * (i))
-#define MCQ_OPR_OFFSET_n(p, i) \
- (hba->mcq_opr[(p)].offset + hba->mcq_opr[(p)].stride * (i))
-
static void __iomem *mcq_opr_base(struct ufs_hba *hba,
enum ufshcd_mcq_opr n, int i)
{
@@ -338,29 +354,29 @@ void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba)
/* Submission Queue Lower Base Address */
ufsmcq_writelx(hba, lower_32_bits(hwq->sqe_dma_addr),
- MCQ_CFG_n(REG_SQLBA, i));
+ ufshcd_mcq_cfg_offset(REG_SQLBA, i));
/* Submission Queue Upper Base Address */
ufsmcq_writelx(hba, upper_32_bits(hwq->sqe_dma_addr),
- MCQ_CFG_n(REG_SQUBA, i));
+ ufshcd_mcq_cfg_offset(REG_SQUBA, i));
/* Submission Queue Doorbell Address Offset */
- ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_SQD, i),
- MCQ_CFG_n(REG_SQDAO, i));
+ ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_SQD, i),
+ ufshcd_mcq_cfg_offset(REG_SQDAO, i));
/* Submission Queue Interrupt Status Address Offset */
- ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_SQIS, i),
- MCQ_CFG_n(REG_SQISAO, i));
+ ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_SQIS, i),
+ ufshcd_mcq_cfg_offset(REG_SQISAO, i));
/* Completion Queue Lower Base Address */
ufsmcq_writelx(hba, lower_32_bits(hwq->cqe_dma_addr),
- MCQ_CFG_n(REG_CQLBA, i));
+ ufshcd_mcq_cfg_offset(REG_CQLBA, i));
/* Completion Queue Upper Base Address */
ufsmcq_writelx(hba, upper_32_bits(hwq->cqe_dma_addr),
- MCQ_CFG_n(REG_CQUBA, i));
+ ufshcd_mcq_cfg_offset(REG_CQUBA, i));
/* Completion Queue Doorbell Address Offset */
- ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_CQD, i),
- MCQ_CFG_n(REG_CQDAO, i));
+ ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_CQD, i),
+ ufshcd_mcq_cfg_offset(REG_CQDAO, i));
/* Completion Queue Interrupt Status Address Offset */
- ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_CQIS, i),
- MCQ_CFG_n(REG_CQISAO, i));
+ ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_CQIS, i),
+ ufshcd_mcq_cfg_offset(REG_CQISAO, i));
/* Save the base addresses for quicker access */
hwq->mcq_sq_head = mcq_opr_base(hba, OPR_SQD, i) + REG_SQHP;
@@ -377,7 +393,7 @@ void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba)
/* Completion Queue Enable|Size to Completion Queue Attribute */
ufsmcq_writel(hba, (1 << QUEUE_EN_OFFSET) | qsize,
- MCQ_CFG_n(REG_CQATTR, i));
+ ufshcd_mcq_cfg_offset(REG_CQATTR, i));
/*
* Submission Qeueue Enable|Size|Completion Queue ID to
@@ -385,7 +401,7 @@ void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba)
*/
ufsmcq_writel(hba, (1 << QUEUE_EN_OFFSET) | qsize |
(i << QUEUE_ID_OFFSET),
- MCQ_CFG_n(REG_SQATTR, i));
+ ufshcd_mcq_cfg_offset(REG_SQATTR, i));
}
}
EXPORT_SYMBOL_GPL(ufshcd_mcq_make_queues_operational);
diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c
index 3d049967f6bc..e80a32421a8c 100644
--- a/drivers/ufs/core/ufs-sysfs.c
+++ b/drivers/ufs/core/ufs-sysfs.c
@@ -1340,6 +1340,78 @@ static const struct attribute_group ufs_sysfs_flags_group = {
.attrs = ufs_sysfs_device_flags,
};
+static ssize_t max_number_of_rtt_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ u32 rtt;
+ int ret;
+
+ down(&hba->host_sem);
+ if (!ufshcd_is_user_access_allowed(hba)) {
+ up(&hba->host_sem);
+ return -EBUSY;
+ }
+
+ ufshcd_rpm_get_sync(hba);
+ ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_MAX_NUM_OF_RTT, 0, 0, &rtt);
+ ufshcd_rpm_put_sync(hba);
+
+ if (ret)
+ goto out;
+
+ ret = sysfs_emit(buf, "0x%08X\n", rtt);
+
+out:
+ up(&hba->host_sem);
+ return ret;
+}
+
+static ssize_t max_number_of_rtt_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct ufs_dev_info *dev_info = &hba->dev_info;
+ struct scsi_device *sdev;
+ unsigned int rtt;
+ int ret;
+
+ if (kstrtouint(buf, 0, &rtt))
+ return -EINVAL;
+
+ if (rtt > dev_info->rtt_cap) {
+ dev_err(dev, "rtt can be at most bDeviceRTTCap\n");
+ return -EINVAL;
+ }
+
+ down(&hba->host_sem);
+ if (!ufshcd_is_user_access_allowed(hba)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ufshcd_rpm_get_sync(hba);
+
+ shost_for_each_device(sdev, hba->host)
+ blk_mq_freeze_queue(sdev->request_queue);
+
+ ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_MAX_NUM_OF_RTT, 0, 0, &rtt);
+
+ shost_for_each_device(sdev, hba->host)
+ blk_mq_unfreeze_queue(sdev->request_queue);
+
+ ufshcd_rpm_put_sync(hba);
+
+out:
+ up(&hba->host_sem);
+ return ret < 0 ? ret : count;
+}
+
+static DEVICE_ATTR_RW(max_number_of_rtt);
+
static inline bool ufshcd_is_wb_attrs(enum attr_idn idn)
{
return idn >= QUERY_ATTR_IDN_WB_FLUSH_STATUS &&
@@ -1387,7 +1459,6 @@ UFS_ATTRIBUTE(max_data_in_size, _MAX_DATA_IN);
UFS_ATTRIBUTE(max_data_out_size, _MAX_DATA_OUT);
UFS_ATTRIBUTE(reference_clock_frequency, _REF_CLK_FREQ);
UFS_ATTRIBUTE(configuration_descriptor_lock, _CONF_DESC_LOCK);
-UFS_ATTRIBUTE(max_number_of_rtt, _MAX_NUM_OF_RTT);
UFS_ATTRIBUTE(exception_event_control, _EE_CONTROL);
UFS_ATTRIBUTE(exception_event_status, _EE_STATUS);
UFS_ATTRIBUTE(ffu_status, _FFU_STATUS);
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 0cf07194bbe8..41bf2e249c83 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -102,6 +102,9 @@
/* Default RTC update every 10 seconds */
#define UFS_RTC_UPDATE_INTERVAL_MS (10 * MSEC_PER_SEC)
+/* bMaxNumOfRTT is equal to two after device manufacturing */
+#define DEFAULT_MAX_NUM_RTT 2
+
/* UFSHC 4.0 compliant HC support this mode. */
static bool use_mcq_mode = true;
@@ -2405,6 +2408,8 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
hba->reserved_slot = hba->nutrs - 1;
+ hba->nortt = FIELD_GET(MASK_NUMBER_OUTSTANDING_RTT, hba->capabilities) + 1;
+
/* Read crypto capabilities */
err = ufshcd_hba_init_crypto_capabilities(hba);
if (err) {
@@ -8121,6 +8126,38 @@ out:
dev_info->b_ext_iid_en = ext_iid_en;
}
+static void ufshcd_set_rtt(struct ufs_hba *hba)
+{
+ struct ufs_dev_info *dev_info = &hba->dev_info;
+ u32 rtt = 0;
+ u32 dev_rtt = 0;
+ int host_rtt_cap = hba->vops && hba->vops->max_num_rtt ?
+ hba->vops->max_num_rtt : hba->nortt;
+
+ /* RTT override makes sense only for UFS-4.0 and above */
+ if (dev_info->wspecversion < 0x400)
+ return;
+
+ if (ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_MAX_NUM_OF_RTT, 0, 0, &dev_rtt)) {
+ dev_err(hba->dev, "failed reading bMaxNumOfRTT\n");
+ return;
+ }
+
+ /* do not override if it was already written */
+ if (dev_rtt != DEFAULT_MAX_NUM_RTT)
+ return;
+
+ rtt = min_t(int, dev_info->rtt_cap, host_rtt_cap);
+
+ if (rtt == dev_rtt)
+ return;
+
+ if (ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_MAX_NUM_OF_RTT, 0, 0, &rtt))
+ dev_err(hba->dev, "failed writing bMaxNumOfRTT\n");
+}
+
void ufshcd_fixup_dev_quirks(struct ufs_hba *hba,
const struct ufs_dev_quirk *fixups)
{
@@ -8256,6 +8293,8 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
dev_info->bqueuedepth = desc_buf[DEVICE_DESC_PARAM_Q_DPTH];
+ dev_info->rtt_cap = desc_buf[DEVICE_DESC_PARAM_RTT_CAP];
+
model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
err = ufshcd_read_string_desc(hba, model_index,
@@ -8508,6 +8547,8 @@ static int ufshcd_device_params_init(struct ufs_hba *hba)
goto out;
}
+ ufshcd_set_rtt(hba);
+
ufshcd_get_ref_clk_gating_wait(hba);
if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c
index c4f997196c57..c7a0ab9b1f59 100644
--- a/drivers/ufs/host/ufs-mediatek.c
+++ b/drivers/ufs/host/ufs-mediatek.c
@@ -1785,6 +1785,7 @@ static int ufs_mtk_config_esi(struct ufs_hba *hba)
*/
static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
.name = "mediatek.ufshci",
+ .max_num_rtt = MTK_MAX_NUM_RTT,
.init = ufs_mtk_init,
.get_ufs_hci_version = ufs_mtk_get_ufs_hci_version,
.setup_clocks = ufs_mtk_setup_clocks,
diff --git a/drivers/ufs/host/ufs-mediatek.h b/drivers/ufs/host/ufs-mediatek.h
index 3ff17e95afab..05d76a6bd772 100644
--- a/drivers/ufs/host/ufs-mediatek.h
+++ b/drivers/ufs/host/ufs-mediatek.h
@@ -189,4 +189,7 @@ struct ufs_mtk_host {
/* MTK delay of autosuspend: 500 ms */
#define MTK_RPM_AUTOSUSPEND_DELAY_MS 500
+/* MTK RTT support number */
+#define MTK_MAX_NUM_RTT 2
+
#endif /* !_UFS_MEDIATEK_H */
diff --git a/drivers/ufs/host/ufshcd-pci.c b/drivers/ufs/host/ufshcd-pci.c
index 0aca666d2199..ba8af9c0e77f 100644
--- a/drivers/ufs/host/ufshcd-pci.c
+++ b/drivers/ufs/host/ufshcd-pci.c
@@ -20,6 +20,8 @@
#include <linux/acpi.h>
#include <linux/gpio/consumer.h>
+#define MAX_SUPP_MAC 64
+
struct ufs_host {
void (*late_init)(struct ufs_hba *hba);
};
@@ -446,6 +448,49 @@ static int ufs_intel_mtl_init(struct ufs_hba *hba)
return ufs_intel_common_init(hba);
}
+static int ufs_qemu_get_hba_mac(struct ufs_hba *hba)
+{
+ return MAX_SUPP_MAC;
+}
+
+static int ufs_qemu_mcq_config_resource(struct ufs_hba *hba)
+{
+ hba->mcq_base = hba->mmio_base + ufshcd_mcq_queue_cfg_addr(hba);
+
+ return 0;
+}
+
+static int ufs_qemu_op_runtime_config(struct ufs_hba *hba)
+{
+ struct ufshcd_mcq_opr_info_t *opr;
+ int i;
+
+ u32 sqdao = ufsmcq_readl(hba, ufshcd_mcq_cfg_offset(REG_SQDAO, 0));
+ u32 sqisao = ufsmcq_readl(hba, ufshcd_mcq_cfg_offset(REG_SQISAO, 0));
+ u32 cqdao = ufsmcq_readl(hba, ufshcd_mcq_cfg_offset(REG_CQDAO, 0));
+ u32 cqisao = ufsmcq_readl(hba, ufshcd_mcq_cfg_offset(REG_CQISAO, 0));
+
+ hba->mcq_opr[OPR_SQD].offset = sqdao;
+ hba->mcq_opr[OPR_SQIS].offset = sqisao;
+ hba->mcq_opr[OPR_CQD].offset = cqdao;
+ hba->mcq_opr[OPR_CQIS].offset = cqisao;
+
+ for (i = 0; i < OPR_MAX; i++) {
+ opr = &hba->mcq_opr[i];
+ opr->stride = 48;
+ opr->base = hba->mmio_base + opr->offset;
+ }
+
+ return 0;
+}
+
+static struct ufs_hba_variant_ops ufs_qemu_hba_vops = {
+ .name = "qemu-pci",
+ .get_hba_mac = ufs_qemu_get_hba_mac,
+ .mcq_config_resource = ufs_qemu_mcq_config_resource,
+ .op_runtime_config = ufs_qemu_op_runtime_config,
+};
+
static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = {
.name = "intel-pci",
.init = ufs_intel_common_init,
@@ -591,7 +636,8 @@ static const struct dev_pm_ops ufshcd_pci_pm_ops = {
};
static const struct pci_device_id ufshcd_pci_tbl[] = {
- { PCI_VENDOR_ID_REDHAT, 0x0013, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VENDOR_ID_REDHAT, 0x0013, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ (kernel_ulong_t)&ufs_qemu_hba_vops },
{ PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VDEVICE(INTEL, 0x9DFA), (kernel_ulong_t)&ufs_intel_cnl_hba_vops },
{ PCI_VDEVICE(INTEL, 0x4B41), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
diff --git a/include/ufs/ufs.h b/include/ufs/ufs.h
index b6003749bc83..853e95957c31 100644
--- a/include/ufs/ufs.h
+++ b/include/ufs/ufs.h
@@ -592,6 +592,8 @@ struct ufs_dev_info {
enum ufs_rtc_time rtc_type;
time64_t rtc_time_baseline;
u32 rtc_update_period;
+
+ u8 rtt_cap; /* bDeviceRTTCap */
};
/*
diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
index bad88bd91995..9e0581115b34 100644
--- a/include/ufs/ufshcd.h
+++ b/include/ufs/ufshcd.h
@@ -295,6 +295,7 @@ struct ufs_pwr_mode_info {
/**
* struct ufs_hba_variant_ops - variant specific callbacks
* @name: variant name
+ * @max_num_rtt: maximum RTT supported by the host
* @init: called when the driver is initialized
* @exit: called to cleanup everything done in init
* @get_ufs_hci_version: called to get UFS HCI version
@@ -332,6 +333,7 @@ struct ufs_pwr_mode_info {
*/
struct ufs_hba_variant_ops {
const char *name;
+ int max_num_rtt;
int (*init)(struct ufs_hba *);
void (*exit)(struct ufs_hba *);
u32 (*get_ufs_hci_version)(struct ufs_hba *);
@@ -819,6 +821,7 @@ enum ufshcd_mcq_opr {
* @capabilities: UFS Controller Capabilities
* @mcq_capabilities: UFS Multi Circular Queue capabilities
* @nutrs: Transfer Request Queue depth supported by controller
+ * @nortt - Max outstanding RTTs supported by controller
* @nutmrs: Task Management Queue depth supported by controller
* @reserved_slot: Used to submit device commands. Protected by @dev_cmd.lock.
* @ufs_version: UFS Version to which controller complies
@@ -957,6 +960,7 @@ struct ufs_hba {
u32 capabilities;
int nutrs;
+ int nortt;
u32 mcq_capabilities;
int nutmrs;
u32 reserved_slot;
@@ -1126,11 +1130,24 @@ struct ufs_hw_queue {
struct mutex sq_mutex;
};
+#define MCQ_QCFG_SIZE 0x40
+
static inline bool is_mcq_enabled(struct ufs_hba *hba)
{
return hba->mcq_enabled;
}
+static inline unsigned int ufshcd_mcq_opr_offset(struct ufs_hba *hba,
+ enum ufshcd_mcq_opr opr, int idx)
+{
+ return hba->mcq_opr[opr].offset + hba->mcq_opr[opr].stride * idx;
+}
+
+static inline unsigned int ufshcd_mcq_cfg_offset(unsigned int reg, int idx)
+{
+ return reg + MCQ_QCFG_SIZE * idx;
+}
+
#ifdef CONFIG_SCSI_UFS_VARIABLE_SG_ENTRY_SIZE
static inline size_t ufshcd_sg_entry_size(const struct ufs_hba *hba)
{
@@ -1261,6 +1278,7 @@ void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val);
void ufshcd_hba_stop(struct ufs_hba *hba);
void ufshcd_schedule_eh_work(struct ufs_hba *hba);
void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds);
+unsigned int ufshcd_mcq_queue_cfg_addr(struct ufs_hba *hba);
u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i);
void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i);
unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
diff --git a/include/ufs/ufshci.h b/include/ufs/ufshci.h
index 385e1c6b8d60..c50f92bf2e1d 100644
--- a/include/ufs/ufshci.h
+++ b/include/ufs/ufshci.h
@@ -68,6 +68,7 @@ enum {
/* Controller capability masks */
enum {
MASK_TRANSFER_REQUESTS_SLOTS = 0x0000001F,
+ MASK_NUMBER_OUTSTANDING_RTT = 0x0000FF00,
MASK_TASK_MANAGEMENT_REQUEST_SLOTS = 0x00070000,
MASK_EHSLUTRD_SUPPORTED = 0x00400000,
MASK_AUTO_HIBERN8_SUPPORT = 0x00800000,