aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/ci_dpm.c
diff options
context:
space:
mode:
authorAlex Deucher <alexander.deucher@amd.com>2014-11-18 14:40:26 -0500
committerAlex Deucher <alexander.deucher@amd.com>2014-11-20 13:00:13 -0500
commit7f6233ca8769a92cf4f23a0bc18bf241e9c50606 (patch)
tree905459b31a05de63ea526e1ff13ac985f4cacc45 /drivers/gpu/drm/radeon/ci_dpm.c
parentdrm/radeon/ci: use different smc command for pcie dpm (diff)
downloadlinux-dev-7f6233ca8769a92cf4f23a0bc18bf241e9c50606.tar.xz
linux-dev-7f6233ca8769a92cf4f23a0bc18bf241e9c50606.zip
drm/radeon/ci: force pcie level before sclk and mclk
Preferred ordering. Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/radeon/ci_dpm.c')
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c34
1 files changed, 17 insertions, 17 deletions
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 73f8c4b5bc9c..630434cba22d 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -4143,57 +4143,57 @@ int ci_dpm_force_performance_level(struct radeon_device *rdev,
int ret;
if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
- if ((!pi->sclk_dpm_key_disabled) &&
- pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
+ if ((!pi->pcie_dpm_key_disabled) &&
+ pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
levels = 0;
- tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
+ tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
while (tmp >>= 1)
levels++;
if (levels) {
- ret = ci_dpm_force_state_sclk(rdev, levels);
+ ret = ci_dpm_force_state_pcie(rdev, level);
if (ret)
return ret;
for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
- CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
+ tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
+ CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
if (tmp == levels)
break;
udelay(1);
}
}
}
- if ((!pi->mclk_dpm_key_disabled) &&
- pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
+ if ((!pi->sclk_dpm_key_disabled) &&
+ pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
levels = 0;
- tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
+ tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
while (tmp >>= 1)
levels++;
if (levels) {
- ret = ci_dpm_force_state_mclk(rdev, levels);
+ ret = ci_dpm_force_state_sclk(rdev, levels);
if (ret)
return ret;
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
- CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
+ CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
if (tmp == levels)
break;
udelay(1);
}
}
}
- if ((!pi->pcie_dpm_key_disabled) &&
- pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
+ if ((!pi->mclk_dpm_key_disabled) &&
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
levels = 0;
- tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
+ tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
while (tmp >>= 1)
levels++;
if (levels) {
- ret = ci_dpm_force_state_pcie(rdev, level);
+ ret = ci_dpm_force_state_mclk(rdev, levels);
if (ret)
return ret;
for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
- CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
+ tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
+ CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
if (tmp == levels)
break;
udelay(1);