aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/powerplay/amdgpu_smu.c')
-rw-r--r--drivers/gpu/drm/amd/powerplay/amdgpu_smu.c555
1 files changed, 443 insertions, 112 deletions
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index eec329ab6037..0685a3388e38 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -20,9 +20,9 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "pp_debug.h"
#include <linux/firmware.h>
-#include <drm/drmP.h>
+
+#include "pp_debug.h"
#include "amdgpu.h"
#include "amdgpu_smu.h"
#include "soc15_common.h"
@@ -30,6 +30,246 @@
#include "atom.h"
#include "amd_pcie.h"
+int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version)
+{
+ int ret = 0;
+
+ if (!if_version && !smu_version)
+ return -EINVAL;
+
+ if (if_version) {
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion);
+ if (ret)
+ return ret;
+
+ ret = smu_read_smc_arg(smu, if_version);
+ if (ret)
+ return ret;
+ }
+
+ if (smu_version) {
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion);
+ if (ret)
+ return ret;
+
+ ret = smu_read_smc_arg(smu, smu_version);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t min, uint32_t max)
+{
+ int ret = 0, clk_id = 0;
+ uint32_t param;
+
+ if (min <= 0 && max <= 0)
+ return -EINVAL;
+
+ if (!smu_clk_dpm_is_enabled(smu, clk_type))
+ return 0;
+
+ clk_id = smu_clk_get_index(smu, clk_type);
+ if (clk_id < 0)
+ return clk_id;
+
+ if (max > 0) {
+ param = (uint32_t)((clk_id << 16) | (max & 0xffff));
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
+ param);
+ if (ret)
+ return ret;
+ }
+
+ if (min > 0) {
+ param = (uint32_t)((clk_id << 16) | (min & 0xffff));
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
+ param);
+ if (ret)
+ return ret;
+ }
+
+
+ return ret;
+}
+
+int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t min, uint32_t max)
+{
+ int ret = 0, clk_id = 0;
+ uint32_t param;
+
+ if (min <= 0 && max <= 0)
+ return -EINVAL;
+
+ if (!smu_clk_dpm_is_enabled(smu, clk_type))
+ return 0;
+
+ clk_id = smu_clk_get_index(smu, clk_type);
+ if (clk_id < 0)
+ return clk_id;
+
+ if (max > 0) {
+ param = (uint32_t)((clk_id << 16) | (max & 0xffff));
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
+ param);
+ if (ret)
+ return ret;
+ }
+
+ if (min > 0) {
+ param = (uint32_t)((clk_id << 16) | (min & 0xffff));
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
+ param);
+ if (ret)
+ return ret;
+ }
+
+
+ return ret;
+}
+
+int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t *min, uint32_t *max)
+{
+ int ret = 0, clk_id = 0;
+ uint32_t param = 0;
+ uint32_t clock_limit;
+
+ if (!min && !max)
+ return -EINVAL;
+
+ if (!smu_clk_dpm_is_enabled(smu, clk_type)) {
+ switch (clk_type) {
+ case SMU_MCLK:
+ case SMU_UCLK:
+ clock_limit = smu->smu_table.boot_values.uclk;
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ clock_limit = smu->smu_table.boot_values.gfxclk;
+ break;
+ case SMU_SOCCLK:
+ clock_limit = smu->smu_table.boot_values.socclk;
+ break;
+ default:
+ clock_limit = 0;
+ break;
+ }
+
+ /* clock in Mhz unit */
+ if (min)
+ *min = clock_limit / 100;
+ if (max)
+ *max = clock_limit / 100;
+
+ return 0;
+ }
+
+ mutex_lock(&smu->mutex);
+ clk_id = smu_clk_get_index(smu, clk_type);
+ if (clk_id < 0) {
+ ret = -EINVAL;
+ goto failed;
+ }
+
+ param = (clk_id & 0xffff) << 16;
+
+ if (max) {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param);
+ if (ret)
+ goto failed;
+ ret = smu_read_smc_arg(smu, max);
+ if (ret)
+ goto failed;
+ }
+
+ if (min) {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param);
+ if (ret)
+ goto failed;
+ ret = smu_read_smc_arg(smu, min);
+ if (ret)
+ goto failed;
+ }
+
+failed:
+ mutex_unlock(&smu->mutex);
+ return ret;
+}
+
+int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint16_t level, uint32_t *value)
+{
+ int ret = 0, clk_id = 0;
+ uint32_t param;
+
+ if (!value)
+ return -EINVAL;
+
+ if (!smu_clk_dpm_is_enabled(smu, clk_type))
+ return 0;
+
+ clk_id = smu_clk_get_index(smu, clk_type);
+ if (clk_id < 0)
+ return clk_id;
+
+ param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
+
+ ret = smu_send_smc_msg_with_param(smu,SMU_MSG_GetDpmFreqByIndex,
+ param);
+ if (ret)
+ return ret;
+
+ ret = smu_read_smc_arg(smu, &param);
+ if (ret)
+ return ret;
+
+ /* BIT31: 0 - Fine grained DPM, 1 - Dicrete DPM
+ * now, we un-support it */
+ *value = param & 0x7fffffff;
+
+ return ret;
+}
+
+int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t *value)
+{
+ return smu_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
+}
+
+bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
+{
+ enum smu_feature_mask feature_id = 0;
+
+ switch (clk_type) {
+ case SMU_MCLK:
+ case SMU_UCLK:
+ feature_id = SMU_FEATURE_DPM_UCLK_BIT;
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
+ break;
+ case SMU_SOCCLK:
+ feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
+ break;
+ default:
+ return true;
+ }
+
+ if(!smu_feature_is_enabled(smu, feature_id)) {
+ pr_warn("smu %d clk dpm feature %d is not enabled\n", clk_type, feature_id);
+ return false;
+ }
+
+ return true;
+}
+
+
int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
bool gate)
{
@@ -42,6 +282,9 @@ int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
case AMD_IP_BLOCK_TYPE_VCE:
ret = smu_dpm_set_vce_enable(smu, gate);
break;
+ case AMD_IP_BLOCK_TYPE_GFX:
+ ret = smu_gfx_off_control(smu, gate);
+ break;
default:
break;
}
@@ -63,7 +306,8 @@ int smu_get_power_num_states(struct smu_context *smu,
/* not support power state */
memset(state_info, 0, sizeof(struct pp_states_info));
- state_info->nums = 0;
+ state_info->nums = 1;
+ state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
return 0;
}
@@ -86,6 +330,18 @@ int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
*size = 8;
break;
+ case AMDGPU_PP_SENSOR_UVD_POWER:
+ *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_VCE_POWER:
+ *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
+ *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT) ? 1 : 0;
+ *size = 4;
+ break;
default:
ret = -EINVAL;
break;
@@ -97,20 +353,18 @@ int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
return ret;
}
-int smu_update_table_with_arg(struct smu_context *smu, uint16_t table_id, uint16_t exarg,
+int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int argument,
void *table_data, bool drv2smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *table = NULL;
int ret = 0;
- uint32_t table_index;
+ int table_id = smu_table_get_index(smu, table_index);
if (!table_data || table_id >= smu_table->table_count)
return -EINVAL;
- table_index = (exarg << 16) | table_id;
-
- table = &smu_table->tables[table_id];
+ table = &smu_table->tables[table_index];
if (drv2smu)
memcpy(table->cpu_addr, table_data, table->size);
@@ -126,7 +380,7 @@ int smu_update_table_with_arg(struct smu_context *smu, uint16_t table_id, uint16
ret = smu_send_smc_msg_with_param(smu, drv2smu ?
SMU_MSG_TransferTableDram2Smu :
SMU_MSG_TransferTableSmu2Dram,
- table_index);
+ table_id | ((argument & 0xFFFF) << 16));
if (ret)
return ret;
@@ -138,13 +392,12 @@ int smu_update_table_with_arg(struct smu_context *smu, uint16_t table_id, uint16
bool is_support_sw_smu(struct amdgpu_device *adev)
{
- if (amdgpu_dpm != 1)
- return false;
-
- if (adev->asic_type >= CHIP_VEGA20 && adev->asic_type != CHIP_RAVEN)
+ if (adev->asic_type == CHIP_VEGA20)
+ return (amdgpu_dpm == 2) ? true : false;
+ else if (adev->asic_type >= CHIP_NAVI10)
return true;
-
- return false;
+ else
+ return false;
}
int smu_sys_get_pp_table(struct smu_context *smu, void **table)
@@ -168,6 +421,8 @@ int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size)
ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
int ret = 0;
+ if (!smu->pm_enabled)
+ return -EINVAL;
if (header->usStructureSize != size) {
pr_err("pp table size not matched !\n");
return -EIO;
@@ -201,31 +456,36 @@ int smu_feature_init_dpm(struct smu_context *smu)
{
struct smu_feature *feature = &smu->smu_feature;
int ret = 0;
- uint32_t unallowed_feature_mask[SMU_FEATURE_MAX/32];
+ uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
+ if (!smu->pm_enabled)
+ return ret;
mutex_lock(&feature->mutex);
- bitmap_fill(feature->allowed, SMU_FEATURE_MAX);
+ bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
mutex_unlock(&feature->mutex);
- ret = smu_get_unallowed_feature_mask(smu, unallowed_feature_mask,
+ ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
SMU_FEATURE_MAX/32);
if (ret)
return ret;
mutex_lock(&feature->mutex);
- bitmap_andnot(feature->allowed, feature->allowed,
- (unsigned long *)unallowed_feature_mask,
+ bitmap_or(feature->allowed, feature->allowed,
+ (unsigned long *)allowed_feature_mask,
feature->feature_num);
mutex_unlock(&feature->mutex);
return ret;
}
-int smu_feature_is_enabled(struct smu_context *smu, int feature_id)
+int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
{
struct smu_feature *feature = &smu->smu_feature;
+ uint32_t feature_id;
int ret = 0;
+ feature_id = smu_feature_get_index(smu, mask);
+
WARN_ON(feature_id > feature->feature_num);
mutex_lock(&feature->mutex);
@@ -235,11 +495,15 @@ int smu_feature_is_enabled(struct smu_context *smu, int feature_id)
return ret;
}
-int smu_feature_set_enabled(struct smu_context *smu, int feature_id, bool enable)
+int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
+ bool enable)
{
struct smu_feature *feature = &smu->smu_feature;
+ uint32_t feature_id;
int ret = 0;
+ feature_id = smu_feature_get_index(smu, mask);
+
WARN_ON(feature_id > feature->feature_num);
mutex_lock(&feature->mutex);
@@ -258,11 +522,14 @@ failed:
return ret;
}
-int smu_feature_is_supported(struct smu_context *smu, int feature_id)
+int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask)
{
struct smu_feature *feature = &smu->smu_feature;
+ uint32_t feature_id;
int ret = 0;
+ feature_id = smu_feature_get_index(smu, mask);
+
WARN_ON(feature_id > feature->feature_num);
mutex_lock(&feature->mutex);
@@ -272,12 +539,16 @@ int smu_feature_is_supported(struct smu_context *smu, int feature_id)
return ret;
}
-int smu_feature_set_supported(struct smu_context *smu, int feature_id,
+int smu_feature_set_supported(struct smu_context *smu,
+ enum smu_feature_mask mask,
bool enable)
{
struct smu_feature *feature = &smu->smu_feature;
+ uint32_t feature_id;
int ret = 0;
+ feature_id = smu_feature_get_index(smu, mask);
+
WARN_ON(feature_id > feature->feature_num);
mutex_lock(&feature->mutex);
@@ -296,7 +567,7 @@ static int smu_set_funcs(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_VEGA20:
- adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+ case CHIP_NAVI10:
if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
smu->od_enabled = true;
smu_v11_0_set_smu_funcs(smu);
@@ -314,6 +585,7 @@ static int smu_early_init(void *handle)
struct smu_context *smu = &adev->smu;
smu->adev = adev;
+ smu->pm_enabled = !!amdgpu_dpm;
mutex_init(&smu->mutex);
return smu_set_funcs(adev);
@@ -323,6 +595,9 @@ static int smu_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = &adev->smu;
+
+ if (!smu->pm_enabled)
+ return 0;
mutex_lock(&smu->mutex);
smu_handle_task(&adev->smu,
smu->smu_dpm.dpm_level,
@@ -406,15 +681,17 @@ static int smu_sw_init(void *handle)
struct smu_context *smu = &adev->smu;
int ret;
- if (!is_support_sw_smu(adev))
- return -EINVAL;
-
smu->pool_size = adev->pm.smu_prv_buffer_size;
smu->smu_feature.feature_num = SMU_FEATURE_MAX;
mutex_init(&smu->smu_feature.mutex);
bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
+
+ mutex_init(&smu->smu_baco.mutex);
+ smu->smu_baco.state = SMU_BACO_STATE_EXIT;
+ smu->smu_baco.platform_support = false;
+
smu->watermarks_bitmap = 0;
smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
@@ -451,6 +728,12 @@ static int smu_sw_init(void *handle)
return ret;
}
+ ret = smu_register_irq_handler(smu);
+ if (ret) {
+ pr_err("Failed to register smc irq handler!\n");
+ return ret;
+ }
+
return 0;
}
@@ -460,8 +743,8 @@ static int smu_sw_fini(void *handle)
struct smu_context *smu = &adev->smu;
int ret;
- if (!is_support_sw_smu(adev))
- return -EINVAL;
+ kfree(smu->irq_source);
+ smu->irq_source = NULL;
ret = smu_smc_table_sw_fini(smu);
if (ret) {
@@ -590,17 +873,17 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
return 0;
}
- ret = smu_init_display(smu);
+ ret = smu_init_display_count(smu, 0);
if (ret)
return ret;
if (initialize) {
- ret = smu_read_pptable_from_vbios(smu);
+ /* get boot_values from vbios to set revision, gfxclk, and etc. */
+ ret = smu_get_vbios_bootup_values(smu);
if (ret)
return ret;
- /* get boot_values from vbios to set revision, gfxclk, and etc. */
- ret = smu_get_vbios_bootup_values(smu);
+ ret = smu_setup_pptable(smu);
if (ret)
return ret;
@@ -612,10 +895,6 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
* check if the format_revision in vbios is up to pptable header
* version, and the structure size is not 0.
*/
- ret = smu_get_clk_info_from_vbios(smu);
- if (ret)
- return ret;
-
ret = smu_check_pptable(smu);
if (ret)
return ret;
@@ -697,7 +976,7 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
return ret;
}
- ret = smu_set_od8_default_settings(smu, initialize);
+ ret = smu_set_default_od_settings(smu, initialize);
if (ret)
return ret;
@@ -716,6 +995,9 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
*/
ret = smu_set_tool_table_location(smu);
+ if (!smu_is_dpm_running(smu))
+ pr_info("dpm has been disabled\n");
+
return ret;
}
@@ -788,23 +1070,14 @@ static int smu_hw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = &adev->smu;
- if (!is_support_sw_smu(adev))
- return -EINVAL;
-
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
- ret = smu_load_microcode(smu);
- if (ret)
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ ret = smu_check_fw_status(smu);
+ if (ret) {
+ pr_err("SMC firmware status is not correct\n");
return ret;
+ }
}
- ret = smu_check_fw_status(smu);
- if (ret) {
- pr_err("SMC firmware status is not correct\n");
- return ret;
- }
-
- mutex_lock(&smu->mutex);
-
ret = smu_feature_init_dpm(smu);
if (ret)
goto failed;
@@ -829,16 +1102,16 @@ static int smu_hw_init(void *handle)
if (ret)
goto failed;
- mutex_unlock(&smu->mutex);
-
- adev->pm.dpm_enabled = true;
+ if (!smu->pm_enabled)
+ adev->pm.dpm_enabled = false;
+ else
+ adev->pm.dpm_enabled = true; /* TODO: will set dpm_enabled flag while VCN and DAL DPM is workable */
pr_info("SMU is initialized successfully!\n");
return 0;
failed:
- mutex_unlock(&smu->mutex);
return ret;
}
@@ -849,30 +1122,15 @@ static int smu_hw_fini(void *handle)
struct smu_table_context *table_context = &smu->smu_table;
int ret = 0;
- if (!is_support_sw_smu(adev))
- return -EINVAL;
-
kfree(table_context->driver_pptable);
table_context->driver_pptable = NULL;
kfree(table_context->max_sustainable_clocks);
table_context->max_sustainable_clocks = NULL;
- kfree(table_context->od_feature_capabilities);
- table_context->od_feature_capabilities = NULL;
-
- kfree(table_context->od_settings_max);
- table_context->od_settings_max = NULL;
-
- kfree(table_context->od_settings_min);
- table_context->od_settings_min = NULL;
-
kfree(table_context->overdrive_table);
table_context->overdrive_table = NULL;
- kfree(table_context->od8_settings);
- table_context->od8_settings = NULL;
-
ret = smu_fini_fb_allocations(smu);
if (ret)
return ret;
@@ -905,16 +1163,26 @@ static int smu_suspend(void *handle)
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = &adev->smu;
-
- if (!is_support_sw_smu(adev))
- return -EINVAL;
+ bool baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT);
ret = smu_system_features_control(smu, false);
if (ret)
return ret;
+ if (adev->in_gpu_reset && baco_feature_is_enabled) {
+ ret = smu_feature_set_enabled(smu, SMU_FEATURE_BACO_BIT, true);
+ if (ret) {
+ pr_warn("set BACO feature enabled failed, return %d\n", ret);
+ return ret;
+ }
+ }
+
smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
+ if (adev->asic_type >= CHIP_NAVI10 &&
+ adev->gfx.rlc.funcs->stop)
+ adev->gfx.rlc.funcs->stop(adev);
+
return 0;
}
@@ -924,9 +1192,6 @@ static int smu_resume(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = &adev->smu;
- if (!is_support_sw_smu(adev))
- return -EINVAL;
-
pr_info("SMU is resuming...\n");
mutex_lock(&smu->mutex);
@@ -955,7 +1220,7 @@ int smu_display_configuration_change(struct smu_context *smu,
int index = 0;
int num_of_active_display = 0;
- if (!is_support_sw_smu(smu->adev))
+ if (!smu->pm_enabled || !is_support_sw_smu(smu->adev))
return -EINVAL;
if (!display_config)
@@ -1083,7 +1348,7 @@ static int smu_enable_umd_pstate(void *handle,
struct smu_context *smu = (struct smu_context*)(handle);
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
- if (!smu_dpm_ctx->dpm_context)
+ if (!smu->pm_enabled || !smu_dpm_ctx->dpm_context)
return -EINVAL;
if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
@@ -1116,16 +1381,54 @@ static int smu_enable_umd_pstate(void *handle,
return 0;
}
+static int smu_default_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
+{
+ int ret = 0;
+ uint32_t sclk_mask, mclk_mask, soc_mask;
+
+ switch (level) {
+ case AMD_DPM_FORCED_LEVEL_HIGH:
+ ret = smu_force_dpm_limit_value(smu, true);
+ break;
+ case AMD_DPM_FORCED_LEVEL_LOW:
+ ret = smu_force_dpm_limit_value(smu, false);
+ break;
+ case AMD_DPM_FORCED_LEVEL_AUTO:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ ret = smu_unforce_dpm_levels(smu);
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+ ret = smu_get_profiling_clk_mask(smu, level,
+ &sclk_mask,
+ &mclk_mask,
+ &soc_mask);
+ if (ret)
+ return ret;
+ smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask);
+ smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask);
+ smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask);
+ break;
+ case AMD_DPM_FORCED_LEVEL_MANUAL:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
+ default:
+ break;
+ }
+ return ret;
+}
+
int smu_adjust_power_state_dynamic(struct smu_context *smu,
enum amd_dpm_forced_level level,
bool skip_display_settings)
{
int ret = 0;
int index = 0;
- uint32_t sclk_mask, mclk_mask, soc_mask;
long workload;
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ if (!smu->pm_enabled)
+ return -EINVAL;
if (!skip_display_settings) {
ret = smu_display_config_changed(smu);
if (ret) {
@@ -1134,6 +1437,8 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu,
}
}
+ if (!smu->pm_enabled)
+ return -EINVAL;
ret = smu_apply_clocks_adjust_rules(smu);
if (ret) {
pr_err("Failed to apply clocks adjust rules!");
@@ -1149,38 +1454,10 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu,
}
if (smu_dpm_ctx->dpm_level != level) {
- switch (level) {
- case AMD_DPM_FORCED_LEVEL_HIGH:
- ret = smu_force_dpm_limit_value(smu, true);
- break;
- case AMD_DPM_FORCED_LEVEL_LOW:
- ret = smu_force_dpm_limit_value(smu, false);
- break;
-
- case AMD_DPM_FORCED_LEVEL_AUTO:
- ret = smu_unforce_dpm_levels(smu);
- break;
-
- case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
- case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
- case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
- case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
- ret = smu_get_profiling_clk_mask(smu, level,
- &sclk_mask,
- &mclk_mask,
- &soc_mask);
- if (ret)
- return ret;
- smu_force_clk_levels(smu, PP_SCLK, 1 << sclk_mask);
- smu_force_clk_levels(smu, PP_MCLK, 1 << mclk_mask);
- break;
-
- case AMD_DPM_FORCED_LEVEL_MANUAL:
- case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
- default:
- break;
+ ret = smu_asic_set_performance_level(smu, level);
+ if (ret) {
+ ret = smu_default_set_performance_level(smu, level);
}
-
if (!ret)
smu_dpm_ctx->dpm_level = level;
}
@@ -1224,6 +1501,60 @@ int smu_handle_task(struct smu_context *smu,
return ret;
}
+enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
+{
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ enum amd_dpm_forced_level level;
+
+ if (!smu_dpm_ctx->dpm_context)
+ return -EINVAL;
+
+ mutex_lock(&(smu->mutex));
+ level = smu_dpm_ctx->dpm_level;
+ mutex_unlock(&(smu->mutex));
+
+ return level;
+}
+
+int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
+{
+ int ret = 0;
+ int i;
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+
+ if (!smu_dpm_ctx->dpm_context)
+ return -EINVAL;
+
+ for (i = 0; i < smu->adev->num_ip_blocks; i++) {
+ if (smu->adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)
+ break;
+ }
+
+
+ smu->adev->ip_blocks[i].version->funcs->enable_umd_pstate(smu, &level);
+ ret = smu_handle_task(smu, level,
+ AMD_PP_TASK_READJUST_POWER_STATE);
+ if (ret)
+ return ret;
+
+ mutex_lock(&smu->mutex);
+ smu_dpm_ctx->dpm_level = level;
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_display_count(struct smu_context *smu, uint32_t count)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+ ret = smu_init_display_count(smu, count);
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
const struct amd_ip_funcs smu_ip_funcs = {
.name = "smu",
.early_init = smu_early_init,