aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/phy/qualcomm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/phy/qualcomm')
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ufs-i.h5
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ufs-qmp-14nm.c29
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.c29
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ufs.c42
4 files changed, 71 insertions, 34 deletions
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs-i.h b/drivers/phy/qualcomm/phy-qcom-ufs-i.h
index 13b02b7de30b..822c83b8efcd 100644
--- a/drivers/phy/qualcomm/phy-qcom-ufs-i.h
+++ b/drivers/phy/qualcomm/phy-qcom-ufs-i.h
@@ -114,14 +114,16 @@ struct ufs_qcom_phy {
struct ufs_qcom_phy_calibration *cached_regs;
int cached_regs_table_size;
bool is_powered_on;
+ bool is_started;
struct ufs_qcom_phy_specific_ops *phy_spec_ops;
+
+ enum phy_mode mode;
};
/**
* struct ufs_qcom_phy_specific_ops - set of pointers to functions which have a
* specific implementation per phy. Each UFS phy, should implement
* those functions according to its spec and requirements
- * @calibrate_phy: pointer to a function that calibrate the phy
* @start_serdes: pointer to a function that starts the serdes
* @is_physical_coding_sublayer_ready: pointer to a function that
* checks pcs readiness. returns 0 for success and non-zero for error.
@@ -130,7 +132,6 @@ struct ufs_qcom_phy {
* and writes to QSERDES_RX_SIGDET_CNTRL attribute
*/
struct ufs_qcom_phy_specific_ops {
- int (*calibrate_phy)(struct ufs_qcom_phy *phy, bool is_rate_B);
void (*start_serdes)(struct ufs_qcom_phy *phy);
int (*is_physical_coding_sublayer_ready)(struct ufs_qcom_phy *phy);
void (*set_tx_lane_enable)(struct ufs_qcom_phy *phy, u32 val);
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-14nm.c b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-14nm.c
index 12a1b498dc4b..ba1895b76a5d 100644
--- a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-14nm.c
+++ b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-14nm.c
@@ -44,7 +44,19 @@ void ufs_qcom_phy_qmp_14nm_advertise_quirks(struct ufs_qcom_phy *phy_common)
static int ufs_qcom_phy_qmp_14nm_init(struct phy *generic_phy)
{
- return 0;
+ struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy);
+ bool is_rate_B = false;
+ int ret;
+
+ if (phy_common->mode == PHY_MODE_UFS_HS_B)
+ is_rate_B = true;
+
+ ret = ufs_qcom_phy_qmp_14nm_phy_calibrate(phy_common, is_rate_B);
+ if (!ret)
+ /* phy calibrated, but yet to be started */
+ phy_common->is_started = false;
+
+ return ret;
}
static int ufs_qcom_phy_qmp_14nm_exit(struct phy *generic_phy)
@@ -53,6 +65,19 @@ static int ufs_qcom_phy_qmp_14nm_exit(struct phy *generic_phy)
}
static
+int ufs_qcom_phy_qmp_14nm_set_mode(struct phy *generic_phy, enum phy_mode mode)
+{
+ struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy);
+
+ phy_common->mode = PHY_MODE_INVALID;
+
+ if (mode > 0)
+ phy_common->mode = mode;
+
+ return 0;
+}
+
+static
void ufs_qcom_phy_qmp_14nm_power_control(struct ufs_qcom_phy *phy, bool val)
{
writel_relaxed(val ? 0x1 : 0x0, phy->mmio + UFS_PHY_POWER_DOWN_CONTROL);
@@ -102,11 +127,11 @@ static const struct phy_ops ufs_qcom_phy_qmp_14nm_phy_ops = {
.exit = ufs_qcom_phy_qmp_14nm_exit,
.power_on = ufs_qcom_phy_power_on,
.power_off = ufs_qcom_phy_power_off,
+ .set_mode = ufs_qcom_phy_qmp_14nm_set_mode,
.owner = THIS_MODULE,
};
static struct ufs_qcom_phy_specific_ops phy_14nm_ops = {
- .calibrate_phy = ufs_qcom_phy_qmp_14nm_phy_calibrate,
.start_serdes = ufs_qcom_phy_qmp_14nm_start_serdes,
.is_physical_coding_sublayer_ready = ufs_qcom_phy_qmp_14nm_is_pcs_ready,
.set_tx_lane_enable = ufs_qcom_phy_qmp_14nm_set_tx_lane_enable,
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.c b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.c
index 4f68acb58b73..49f435c71147 100644
--- a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.c
+++ b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.c
@@ -63,7 +63,19 @@ void ufs_qcom_phy_qmp_20nm_advertise_quirks(struct ufs_qcom_phy *phy_common)
static int ufs_qcom_phy_qmp_20nm_init(struct phy *generic_phy)
{
- return 0;
+ struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy);
+ bool is_rate_B = false;
+ int ret;
+
+ if (phy_common->mode == PHY_MODE_UFS_HS_B)
+ is_rate_B = true;
+
+ ret = ufs_qcom_phy_qmp_20nm_phy_calibrate(phy_common, is_rate_B);
+ if (!ret)
+ /* phy calibrated, but yet to be started */
+ phy_common->is_started = false;
+
+ return ret;
}
static int ufs_qcom_phy_qmp_20nm_exit(struct phy *generic_phy)
@@ -72,6 +84,19 @@ static int ufs_qcom_phy_qmp_20nm_exit(struct phy *generic_phy)
}
static
+int ufs_qcom_phy_qmp_20nm_set_mode(struct phy *generic_phy, enum phy_mode mode)
+{
+ struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy);
+
+ phy_common->mode = PHY_MODE_INVALID;
+
+ if (mode > 0)
+ phy_common->mode = mode;
+
+ return 0;
+}
+
+static
void ufs_qcom_phy_qmp_20nm_power_control(struct ufs_qcom_phy *phy, bool val)
{
bool hibern8_exit_after_pwr_collapse = phy->quirks &
@@ -160,11 +185,11 @@ static const struct phy_ops ufs_qcom_phy_qmp_20nm_phy_ops = {
.exit = ufs_qcom_phy_qmp_20nm_exit,
.power_on = ufs_qcom_phy_power_on,
.power_off = ufs_qcom_phy_power_off,
+ .set_mode = ufs_qcom_phy_qmp_20nm_set_mode,
.owner = THIS_MODULE,
};
static struct ufs_qcom_phy_specific_ops phy_20nm_ops = {
- .calibrate_phy = ufs_qcom_phy_qmp_20nm_phy_calibrate,
.start_serdes = ufs_qcom_phy_qmp_20nm_start_serdes,
.is_physical_coding_sublayer_ready = ufs_qcom_phy_qmp_20nm_is_pcs_ready,
.set_tx_lane_enable = ufs_qcom_phy_qmp_20nm_set_tx_lane_enable,
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs.c b/drivers/phy/qualcomm/phy-qcom-ufs.c
index 43865ef340e2..c5ff4525edef 100644
--- a/drivers/phy/qualcomm/phy-qcom-ufs.c
+++ b/drivers/phy/qualcomm/phy-qcom-ufs.c
@@ -518,9 +518,8 @@ void ufs_qcom_phy_disable_iface_clk(struct ufs_qcom_phy *phy)
}
}
-int ufs_qcom_phy_start_serdes(struct phy *generic_phy)
+static int ufs_qcom_phy_start_serdes(struct ufs_qcom_phy *ufs_qcom_phy)
{
- struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
int ret = 0;
if (!ufs_qcom_phy->phy_spec_ops->start_serdes) {
@@ -533,7 +532,6 @@ int ufs_qcom_phy_start_serdes(struct phy *generic_phy)
return ret;
}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_start_serdes);
int ufs_qcom_phy_set_tx_lane_enable(struct phy *generic_phy, u32 tx_lanes)
{
@@ -564,31 +562,8 @@ void ufs_qcom_phy_save_controller_version(struct phy *generic_phy,
}
EXPORT_SYMBOL_GPL(ufs_qcom_phy_save_controller_version);
-int ufs_qcom_phy_calibrate_phy(struct phy *generic_phy, bool is_rate_B)
-{
- struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
- int ret = 0;
-
- if (!ufs_qcom_phy->phy_spec_ops->calibrate_phy) {
- dev_err(ufs_qcom_phy->dev, "%s: calibrate_phy() callback is not supported\n",
- __func__);
- ret = -ENOTSUPP;
- } else {
- ret = ufs_qcom_phy->phy_spec_ops->
- calibrate_phy(ufs_qcom_phy, is_rate_B);
- if (ret)
- dev_err(ufs_qcom_phy->dev, "%s: calibrate_phy() failed %d\n",
- __func__, ret);
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_calibrate_phy);
-
-int ufs_qcom_phy_is_pcs_ready(struct phy *generic_phy)
+static int ufs_qcom_phy_is_pcs_ready(struct ufs_qcom_phy *ufs_qcom_phy)
{
- struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
-
if (!ufs_qcom_phy->phy_spec_ops->is_physical_coding_sublayer_ready) {
dev_err(ufs_qcom_phy->dev, "%s: is_physical_coding_sublayer_ready() callback is not supported\n",
__func__);
@@ -598,7 +573,6 @@ int ufs_qcom_phy_is_pcs_ready(struct phy *generic_phy)
return ufs_qcom_phy->phy_spec_ops->
is_physical_coding_sublayer_ready(ufs_qcom_phy);
}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_is_pcs_ready);
int ufs_qcom_phy_power_on(struct phy *generic_phy)
{
@@ -609,6 +583,18 @@ int ufs_qcom_phy_power_on(struct phy *generic_phy)
if (phy_common->is_powered_on)
return 0;
+ if (!phy_common->is_started) {
+ err = ufs_qcom_phy_start_serdes(phy_common);
+ if (err)
+ return err;
+
+ err = ufs_qcom_phy_is_pcs_ready(phy_common);
+ if (err)
+ return err;
+
+ phy_common->is_started = true;
+ }
+
err = ufs_qcom_phy_enable_vreg(dev, &phy_common->vdda_phy);
if (err) {
dev_err(dev, "%s enable vdda_phy failed, err=%d\n",