aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/powerplay
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/powerplay')
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c45
-rw-r--r--drivers/gpu/drm/amd/powerplay/amdgpu_smu.c1144
-rw-r--r--drivers/gpu/drm/amd/powerplay/arcturus_ppt.c525
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/Makefile3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.c195
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.h29
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c19
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h13
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.c196
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.h29
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c9
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.c222
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.h29
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.c91
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.h32
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c40
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.c231
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.h29
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c72
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c23
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c41
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h371
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h51
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_types.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h134
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h41
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/navi10_ppt.c555
-rw-r--r--drivers/gpu/drm/amd/powerplay/navi10_ppt.h11
-rw-r--r--drivers/gpu/drm/amd/powerplay/renoir_ppt.c483
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_internal.h204
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v11_0.c370
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v12_0.c153
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/vega20_ppt.c138
44 files changed, 4581 insertions, 978 deletions
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index fa8ad7db2b3a..7932eb163a00 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -969,6 +969,14 @@ static int pp_dpm_switch_power_profile(void *handle,
workload = hwmgr->workload_setting[index];
}
+ if (type == PP_SMC_POWER_PROFILE_COMPUTE &&
+ hwmgr->hwmgr_func->disable_power_features_for_compute_performance) {
+ if (hwmgr->hwmgr_func->disable_power_features_for_compute_performance(hwmgr, en)) {
+ mutex_unlock(&hwmgr->smu_lock);
+ return -EINVAL;
+ }
+ }
+
if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
mutex_unlock(&hwmgr->smu_lock);
@@ -1421,6 +1429,7 @@ static int pp_get_asic_baco_capability(void *handle, bool *cap)
{
struct pp_hwmgr *hwmgr = handle;
+ *cap = false;
if (!hwmgr)
return -EINVAL;
@@ -1548,6 +1557,40 @@ static int pp_smu_i2c_bus_access(void *handle, bool acquire)
return ret;
}
+static int pp_set_df_cstate(void *handle, enum pp_df_cstate state)
+{
+ struct pp_hwmgr *hwmgr = handle;
+
+ if (!hwmgr)
+ return -EINVAL;
+
+ if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_df_cstate)
+ return 0;
+
+ mutex_lock(&hwmgr->smu_lock);
+ hwmgr->hwmgr_func->set_df_cstate(hwmgr, state);
+ mutex_unlock(&hwmgr->smu_lock);
+
+ return 0;
+}
+
+static int pp_set_xgmi_pstate(void *handle, uint32_t pstate)
+{
+ struct pp_hwmgr *hwmgr = handle;
+
+ if (!hwmgr)
+ return -EINVAL;
+
+ if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_xgmi_pstate)
+ return 0;
+
+ mutex_lock(&hwmgr->smu_lock);
+ hwmgr->hwmgr_func->set_xgmi_pstate(hwmgr, pstate);
+ mutex_unlock(&hwmgr->smu_lock);
+
+ return 0;
+}
+
static const struct amd_pm_funcs pp_dpm_funcs = {
.load_firmware = pp_dpm_load_fw,
.wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
@@ -1606,4 +1649,6 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
.set_ppfeature_status = pp_set_ppfeature_status,
.asic_reset_mode_2 = pp_asic_reset_mode_2,
.smu_i2c_bus_access = pp_smu_i2c_bus_access,
+ .set_df_cstate = pp_set_df_cstate,
+ .set_xgmi_pstate = pp_set_xgmi_pstate,
};
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 33960fb38a5d..40b546c75fc2 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -25,11 +25,16 @@
#include "pp_debug.h"
#include "amdgpu.h"
#include "amdgpu_smu.h"
+#include "smu_internal.h"
#include "soc15_common.h"
#include "smu_v11_0.h"
#include "smu_v12_0.h"
#include "atom.h"
#include "amd_pcie.h"
+#include "vega20_ppt.h"
+#include "arcturus_ppt.h"
+#include "navi10_ppt.h"
+#include "renoir_ppt.h"
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) #type
@@ -67,6 +72,8 @@ size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
uint32_t sort_feature[SMU_FEATURE_COUNT];
uint64_t hw_feature_count = 0;
+ mutex_lock(&smu->mutex);
+
ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
if (ret)
goto failed;
@@ -92,9 +99,57 @@ size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
}
failed:
+ mutex_unlock(&smu->mutex);
+
return size;
}
+static int smu_feature_update_enable_state(struct smu_context *smu,
+ uint64_t feature_mask,
+ bool enabled)
+{
+ struct smu_feature *feature = &smu->smu_feature;
+ uint32_t feature_low = 0, feature_high = 0;
+ int ret = 0;
+
+ if (!smu->pm_enabled)
+ return ret;
+
+ feature_low = (feature_mask >> 0 ) & 0xffffffff;
+ feature_high = (feature_mask >> 32) & 0xffffffff;
+
+ if (enabled) {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
+ feature_low);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
+ feature_high);
+ if (ret)
+ return ret;
+ } else {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
+ feature_low);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
+ feature_high);
+ if (ret)
+ return ret;
+ }
+
+ mutex_lock(&feature->mutex);
+ if (enabled)
+ bitmap_or(feature->enabled, feature->enabled,
+ (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
+ else
+ bitmap_andnot(feature->enabled, feature->enabled,
+ (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
+ mutex_unlock(&feature->mutex);
+
+ return ret;
+}
+
int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
{
int ret = 0;
@@ -103,9 +158,11 @@ int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
uint64_t feature_2_disabled = 0;
uint64_t feature_enables = 0;
+ mutex_lock(&smu->mutex);
+
ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
if (ret)
- return ret;
+ goto out;
feature_enables = ((uint64_t)feature_mask[1] << 32 | (uint64_t)feature_mask[0]);
@@ -115,14 +172,17 @@ int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
if (feature_2_enabled) {
ret = smu_feature_update_enable_state(smu, feature_2_enabled, true);
if (ret)
- return ret;
+ goto out;
}
if (feature_2_disabled) {
ret = smu_feature_update_enable_state(smu, feature_2_disabled, false);
if (ret)
- return ret;
+ goto out;
}
+out:
+ mutex_unlock(&smu->mutex);
+
return ret;
}
@@ -159,8 +219,7 @@ int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t
int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t min, uint32_t max)
{
- int ret = 0, clk_id = 0;
- uint32_t param;
+ int ret = 0;
if (min <= 0 && max <= 0)
return -EINVAL;
@@ -168,27 +227,7 @@ int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
if (!smu_clk_dpm_is_enabled(smu, clk_type))
return 0;
- clk_id = smu_clk_get_index(smu, clk_type);
- if (clk_id < 0)
- return clk_id;
-
- if (max > 0) {
- param = (uint32_t)((clk_id << 16) | (max & 0xffff));
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
- param);
- if (ret)
- return ret;
- }
-
- if (min > 0) {
- param = (uint32_t)((clk_id << 16) | (min & 0xffff));
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
- param);
- if (ret)
- return ret;
- }
-
-
+ ret = smu_set_soft_freq_limited_range(smu, clk_type, min, max);
return ret;
}
@@ -229,7 +268,7 @@ int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
}
int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
- uint32_t *min, uint32_t *max)
+ uint32_t *min, uint32_t *max, bool lock_needed)
{
uint32_t clock_limit;
int ret = 0;
@@ -237,6 +276,9 @@ int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
if (!min && !max)
return -EINVAL;
+ if (lock_needed)
+ mutex_lock(&smu->mutex);
+
if (!smu_clk_dpm_is_enabled(smu, clk_type)) {
switch (clk_type) {
case SMU_MCLK:
@@ -260,14 +302,17 @@ int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
*min = clock_limit / 100;
if (max)
*max = clock_limit / 100;
-
- return 0;
+ } else {
+ /*
+ * Todo: Use each asic(ASIC_ppt funcs) control the callbacks exposed to the
+ * core driver and then have helpers for stuff that is common(SMU_v11_x | SMU_v12_x funcs).
+ */
+ ret = smu_get_dpm_ultimate_freq(smu, clk_type, min, max);
}
- /*
- * Todo: Use each asic(ASIC_ppt funcs) control the callbacks exposed to the
- * core driver and then have helpers for stuff that is common(SMU_v11_x | SMU_v12_x funcs).
- */
- ret = smu_get_dpm_ultimate_freq(smu, clk_type, min, max);
+
+ if (lock_needed)
+ mutex_unlock(&smu->mutex);
+
return ret;
}
@@ -338,7 +383,20 @@ bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
return true;
}
-
+/**
+ * smu_dpm_set_power_gate - power gate/ungate the specific IP block
+ *
+ * @smu: smu_context pointer
+ * @block_type: the IP block to power gate/ungate
+ * @gate: to power gate if true, ungate otherwise
+ *
+ * This API uses no smu->mutex lock protection due to:
+ * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
+ * This is guarded to be race condition free by the caller.
+ * 2. Or get called on user setting request of power_dpm_force_performance_level.
+ * Under this case, the smu->mutex lock protection is already enforced on
+ * the parent API smu_force_performance_level of the call path.
+ */
int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
bool gate)
{
@@ -364,12 +422,6 @@ int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
return ret;
}
-enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
-{
- /* not support power state */
- return POWER_STATE_TYPE_DEFAULT;
-}
-
int smu_get_power_num_states(struct smu_context *smu,
struct pp_states_info *state_info)
{
@@ -439,7 +491,7 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int
int ret = 0;
int table_id = smu_table_get_index(smu, table_index);
- if (!table_data || table_id >= smu_table->table_count || table_id < 0)
+ if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
return -EINVAL;
table = &smu_table->tables[table_index];
@@ -463,7 +515,7 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int
return ret;
/* flush hdp cache */
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
if (!drv2smu)
memcpy(table_data, table->cpu_addr, table->size);
@@ -483,7 +535,7 @@ bool is_support_sw_smu(struct amdgpu_device *adev)
bool is_support_sw_smu_xgmi(struct amdgpu_device *adev)
{
- if (amdgpu_dpm != 1)
+ if (!is_support_sw_smu(adev))
return false;
if (adev->asic_type == CHIP_VEGA20)
@@ -495,16 +547,23 @@ bool is_support_sw_smu_xgmi(struct amdgpu_device *adev)
int smu_sys_get_pp_table(struct smu_context *smu, void **table)
{
struct smu_table_context *smu_table = &smu->smu_table;
+ uint32_t powerplay_table_size;
if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
return -EINVAL;
+ mutex_lock(&smu->mutex);
+
if (smu_table->hardcode_pptable)
*table = smu_table->hardcode_pptable;
else
*table = smu_table->power_play_table;
- return smu_table->power_play_table_size;
+ powerplay_table_size = smu_table->power_play_table_size;
+
+ mutex_unlock(&smu->mutex);
+
+ return powerplay_table_size;
}
int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size)
@@ -531,13 +590,18 @@ int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size)
memcpy(smu_table->hardcode_pptable, buf, size);
smu_table->power_play_table = smu_table->hardcode_pptable;
smu_table->power_play_table_size = size;
- mutex_unlock(&smu->mutex);
+
+ /*
+ * Special hw_fini action(for Navi1x, the DPMs disablement will be
+ * skipped) may be needed for custom pptable uploading.
+ */
+ smu->uploading_custom_pp_table = true;
ret = smu_reset(smu);
if (ret)
pr_info("smu reset failed, ret = %d\n", ret);
- return ret;
+ smu->uploading_custom_pp_table = false;
failed:
mutex_unlock(&smu->mutex);
@@ -569,41 +633,7 @@ int smu_feature_init_dpm(struct smu_context *smu)
return ret;
}
-int smu_feature_update_enable_state(struct smu_context *smu, uint64_t feature_mask, bool enabled)
-{
- uint32_t feature_low = 0, feature_high = 0;
- int ret = 0;
-
- if (!smu->pm_enabled)
- return ret;
- feature_low = (feature_mask >> 0 ) & 0xffffffff;
- feature_high = (feature_mask >> 32) & 0xffffffff;
-
- if (enabled) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
- feature_low);
- if (ret)
- return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
- feature_high);
- if (ret)
- return ret;
-
- } else {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
- feature_low);
- if (ret)
- return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
- feature_high);
- if (ret)
- return ret;
-
- }
-
- return ret;
-}
int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
{
@@ -633,8 +663,6 @@ int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
{
struct smu_feature *feature = &smu->smu_feature;
int feature_id;
- uint64_t feature_mask = 0;
- int ret = 0;
feature_id = smu_feature_get_index(smu, mask);
if (feature_id < 0)
@@ -642,22 +670,9 @@ int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
WARN_ON(feature_id > feature->feature_num);
- feature_mask = 1ULL << feature_id;
-
- mutex_lock(&feature->mutex);
- ret = smu_feature_update_enable_state(smu, feature_mask, enable);
- if (ret)
- goto failed;
-
- if (enable)
- test_and_set_bit(feature_id, feature->enabled);
- else
- test_and_clear_bit(feature_id, feature->enabled);
-
-failed:
- mutex_unlock(&feature->mutex);
-
- return ret;
+ return smu_feature_update_enable_state(smu,
+ 1ULL << feature_id,
+ enable);
}
int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask)
@@ -707,20 +722,27 @@ static int smu_set_funcs(struct amdgpu_device *adev)
{
struct smu_context *smu = &adev->smu;
+ if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
+ smu->od_enabled = true;
+
switch (adev->asic_type) {
case CHIP_VEGA20:
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+ vega20_set_ppt_funcs(smu);
+ break;
case CHIP_NAVI10:
case CHIP_NAVI14:
case CHIP_NAVI12:
+ navi10_set_ppt_funcs(smu);
+ break;
case CHIP_ARCTURUS:
- if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
- smu->od_enabled = true;
- smu_v11_0_set_smu_funcs(smu);
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+ arcturus_set_ppt_funcs(smu);
+ /* OD is not supported on Arcturus */
+ smu->od_enabled =false;
break;
case CHIP_RENOIR:
- if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
- smu->od_enabled = true;
- smu_v12_0_set_smu_funcs(smu);
+ renoir_set_ppt_funcs(smu);
break;
default:
return -EINVAL;
@@ -736,6 +758,7 @@ static int smu_early_init(void *handle)
smu->adev = adev;
smu->pm_enabled = !!amdgpu_dpm;
+ smu->is_apu = false;
mutex_init(&smu->mutex);
return smu_set_funcs(adev);
@@ -749,11 +772,10 @@ static int smu_late_init(void *handle)
if (!smu->pm_enabled)
return 0;
- mutex_lock(&smu->mutex);
smu_handle_task(&adev->smu,
smu->smu_dpm.dpm_level,
- AMD_PP_TASK_COMPLETE_INIT);
- mutex_unlock(&smu->mutex);
+ AMD_PP_TASK_COMPLETE_INIT,
+ false);
return 0;
}
@@ -843,6 +865,8 @@ static int smu_sw_init(void *handle)
smu->smu_baco.state = SMU_BACO_STATE_EXIT;
smu->smu_baco.platform_support = false;
+ mutex_init(&smu->sensor_lock);
+
smu->watermarks_bitmap = 0;
smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
@@ -917,14 +941,9 @@ static int smu_init_fb_allocations(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *tables = smu_table->tables;
- uint32_t table_count = smu_table->table_count;
- uint32_t i = 0;
- int32_t ret = 0;
+ int ret, i;
- if (table_count <= 0)
- return -EINVAL;
-
- for (i = 0 ; i < table_count; i++) {
+ for (i = 0; i < SMU_TABLE_COUNT; i++) {
if (tables[i].size == 0)
continue;
ret = amdgpu_bo_create_kernel(adev,
@@ -940,7 +959,7 @@ static int smu_init_fb_allocations(struct smu_context *smu)
return 0;
failed:
- for (; i > 0; i--) {
+ while (--i >= 0) {
if (tables[i].size == 0)
continue;
amdgpu_bo_free_kernel(&tables[i].bo,
@@ -955,13 +974,12 @@ static int smu_fini_fb_allocations(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *tables = smu_table->tables;
- uint32_t table_count = smu_table->table_count;
uint32_t i = 0;
- if (table_count == 0 || tables == NULL)
+ if (!tables)
return 0;
- for (i = 0 ; i < table_count; i++) {
+ for (i = 0; i < SMU_TABLE_COUNT; i++) {
if (tables[i].size == 0)
continue;
amdgpu_bo_free_kernel(&tables[i].bo,
@@ -972,50 +990,6 @@ static int smu_fini_fb_allocations(struct smu_context *smu)
return 0;
}
-static int smu_override_pcie_parameters(struct smu_context *smu)
-{
- struct amdgpu_device *adev = smu->adev;
- uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg;
- int ret;
-
- if (adev->flags & AMD_IS_APU)
- return 0;
-
- if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
- pcie_gen = 3;
- else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
- pcie_gen = 2;
- else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
- pcie_gen = 1;
- else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
- pcie_gen = 0;
-
- /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
- * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
- * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
- */
- if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
- pcie_width = 6;
- else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
- pcie_width = 5;
- else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
- pcie_width = 4;
- else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
- pcie_width = 3;
- else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
- pcie_width = 2;
- else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
- pcie_width = 1;
-
- smu_pcie_arg = (1 << 16) | (pcie_gen << 8) | pcie_width;
- ret = smu_send_smc_msg_with_param(smu,
- SMU_MSG_OverridePcieParameters,
- smu_pcie_arg);
- if (ret)
- pr_err("[%s] Attempt to override pcie params failed!\n", __func__);
- return ret;
-}
-
static int smu_smc_table_hw_init(struct smu_context *smu,
bool initialize)
{
@@ -1090,8 +1064,8 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
if (ret)
return ret;
- /* issue RunAfllBtc msg */
- ret = smu_run_afll_btc(smu);
+ /* issue Run*Btc msg */
+ ret = smu_run_btc(smu);
if (ret)
return ret;
@@ -1104,10 +1078,6 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
return ret;
if (adev->asic_type != CHIP_ARCTURUS) {
- ret = smu_override_pcie_parameters(smu);
- if (ret)
- return ret;
-
ret = smu_notify_display_change(smu);
if (ret)
return ret;
@@ -1136,6 +1106,12 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
return ret;
}
+ if (adev->asic_type != CHIP_ARCTURUS) {
+ ret = smu_override_pcie_parameters(smu);
+ if (ret)
+ return ret;
+ }
+
ret = smu_set_default_od_settings(smu, initialize);
if (ret)
return ret;
@@ -1145,7 +1121,7 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
if (ret)
return ret;
- ret = smu_get_power_limit(smu, &smu->default_power_limit, true);
+ ret = smu_get_power_limit(smu, &smu->default_power_limit, false, false);
if (ret)
return ret;
}
@@ -1224,29 +1200,46 @@ static int smu_free_memory_pool(struct smu_context *smu)
return ret;
}
-static int smu_hw_init(void *handle)
+static int smu_start_smc_engine(struct smu_context *smu)
{
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct smu_context *smu = &adev->smu;
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
if (adev->asic_type < CHIP_NAVI10) {
- ret = smu_load_microcode(smu);
- if (ret)
- return ret;
+ if (smu->ppt_funcs->load_microcode) {
+ ret = smu->ppt_funcs->load_microcode(smu);
+ if (ret)
+ return ret;
+ }
}
}
- ret = smu_check_fw_status(smu);
+ if (smu->ppt_funcs->check_fw_status) {
+ ret = smu->ppt_funcs->check_fw_status(smu);
+ if (ret)
+ pr_err("SMC is not ready\n");
+ }
+
+ return ret;
+}
+
+static int smu_hw_init(void *handle)
+{
+ int ret;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct smu_context *smu = &adev->smu;
+
+ ret = smu_start_smc_engine(smu);
if (ret) {
- pr_err("SMC firmware status is not correct\n");
+ pr_err("SMU is not ready yet!\n");
return ret;
}
if (adev->flags & AMD_IS_APU) {
smu_powergate_sdma(&adev->smu, false);
smu_powergate_vcn(&adev->smu, false);
+ smu_set_gfx_cgpg(&adev->smu, true);
}
if (!smu->pm_enabled)
@@ -1289,6 +1282,11 @@ failed:
return ret;
}
+static int smu_stop_dpms(struct smu_context *smu)
+{
+ return smu_send_smc_msg(smu, SMU_MSG_DisableAllSmuFeatures);
+}
+
static int smu_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1301,6 +1299,33 @@ static int smu_hw_fini(void *handle)
smu_powergate_vcn(&adev->smu, true);
}
+ ret = smu_stop_thermal_control(smu);
+ if (ret) {
+ pr_warn("Fail to stop thermal control!\n");
+ return ret;
+ }
+
+ /*
+ * For custom pptable uploading, skip the DPM features
+ * disable process on Navi1x ASICs.
+ * - As the gfx related features are under control of
+ * RLC on those ASICs. RLC reinitialization will be
+ * needed to reenable them. That will cost much more
+ * efforts.
+ *
+ * - SMU firmware can handle the DPM reenablement
+ * properly.
+ */
+ if (!smu->uploading_custom_pp_table ||
+ !((adev->asic_type >= CHIP_NAVI10) &&
+ (adev->asic_type <= CHIP_NAVI12))) {
+ ret = smu_stop_dpms(smu);
+ if (ret) {
+ pr_warn("Fail to stop Dpms!\n");
+ return ret;
+ }
+ }
+
kfree(table_context->driver_pptable);
table_context->driver_pptable = NULL;
@@ -1342,7 +1367,10 @@ static int smu_suspend(void *handle)
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = &adev->smu;
- bool baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT);
+ bool baco_feature_is_enabled = false;
+
+ if(!(adev->flags & AMD_IS_APU))
+ baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT);
ret = smu_system_features_control(smu, false);
if (ret)
@@ -1361,6 +1389,8 @@ static int smu_suspend(void *handle)
if (adev->asic_type >= CHIP_NAVI10 &&
adev->gfx.rlc.funcs->stop)
adev->gfx.rlc.funcs->stop(adev);
+ if (smu->is_apu)
+ smu_set_gfx_cgpg(&adev->smu, false);
return 0;
}
@@ -1373,7 +1403,11 @@ static int smu_resume(void *handle)
pr_info("SMU is resuming...\n");
- mutex_lock(&smu->mutex);
+ ret = smu_start_smc_engine(smu);
+ if (ret) {
+ pr_err("SMU is not ready yet!\n");
+ goto failed;
+ }
ret = smu_smc_table_hw_init(smu, false);
if (ret)
@@ -1383,13 +1417,16 @@ static int smu_resume(void *handle)
if (ret)
goto failed;
- mutex_unlock(&smu->mutex);
+ if (smu->is_apu)
+ smu_set_gfx_cgpg(&adev->smu, true);
+
+ smu->disable_uclk_switch = 0;
pr_info("SMU is resumed successfully!\n");
return 0;
+
failed:
- mutex_unlock(&smu->mutex);
return ret;
}
@@ -1407,8 +1444,9 @@ int smu_display_configuration_change(struct smu_context *smu,
mutex_lock(&smu->mutex);
- smu_set_deep_sleep_dcefclk(smu,
- display_config->min_dcef_deep_sleep_set_clk / 100);
+ if (smu->ppt_funcs->set_deep_sleep_dcefclk)
+ smu->ppt_funcs->set_deep_sleep_dcefclk(smu,
+ display_config->min_dcef_deep_sleep_set_clk / 100);
for (index = 0; index < display_config->num_path_including_non_display; index++) {
if (display_config->displays[index].controller_id != 0)
@@ -1527,7 +1565,8 @@ static int smu_enable_umd_pstate(void *handle,
struct smu_context *smu = (struct smu_context*)(handle);
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
- if (!smu->pm_enabled || !smu_dpm_ctx->dpm_context)
+
+ if (!smu->is_apu && (!smu->pm_enabled || !smu_dpm_ctx->dpm_context))
return -EINVAL;
if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
@@ -1585,9 +1624,9 @@ static int smu_default_set_performance_level(struct smu_context *smu, enum amd_d
&soc_mask);
if (ret)
return ret;
- smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask);
- smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask);
- smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask);
+ smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false);
+ smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask, false);
+ smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask, false);
break;
case AMD_DPM_FORCED_LEVEL_MANUAL:
case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
@@ -1651,7 +1690,7 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu,
workload = smu->workload_setting[index];
if (smu->power_profile_mode != workload)
- smu_set_power_profile_mode(smu, &workload, 0);
+ smu_set_power_profile_mode(smu, &workload, 0, false);
}
return ret;
@@ -1659,18 +1698,22 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu,
int smu_handle_task(struct smu_context *smu,
enum amd_dpm_forced_level level,
- enum amd_pp_task task_id)
+ enum amd_pp_task task_id,
+ bool lock_needed)
{
int ret = 0;
+ if (lock_needed)
+ mutex_lock(&smu->mutex);
+
switch (task_id) {
case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
ret = smu_pre_display_config_changed(smu);
if (ret)
- return ret;
+ goto out;
ret = smu_set_cpu_power_state(smu);
if (ret)
- return ret;
+ goto out;
ret = smu_adjust_power_state_dynamic(smu, level, false);
break;
case AMD_PP_TASK_COMPLETE_INIT:
@@ -1681,6 +1724,10 @@ int smu_handle_task(struct smu_context *smu,
break;
}
+out:
+ if (lock_needed)
+ mutex_unlock(&smu->mutex);
+
return ret;
}
@@ -1713,7 +1760,7 @@ int smu_switch_power_profile(struct smu_context *smu,
}
if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
- smu_set_power_profile_mode(smu, &workload, 0);
+ smu_set_power_profile_mode(smu, &workload, 0, false);
mutex_unlock(&smu->mutex);
@@ -1725,7 +1772,7 @@ enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
enum amd_dpm_forced_level level;
- if (!smu_dpm_ctx->dpm_context)
+ if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
return -EINVAL;
mutex_lock(&(smu->mutex));
@@ -1740,15 +1787,22 @@ int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_lev
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
int ret = 0;
- if (!smu_dpm_ctx->dpm_context)
+ if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
return -EINVAL;
+ mutex_lock(&smu->mutex);
+
ret = smu_enable_umd_pstate(smu, &level);
- if (ret)
+ if (ret) {
+ mutex_unlock(&smu->mutex);
return ret;
+ }
ret = smu_handle_task(smu, level,
- AMD_PP_TASK_READJUST_POWER_STATE);
+ AMD_PP_TASK_READJUST_POWER_STATE,
+ false);
+
+ mutex_unlock(&smu->mutex);
return ret;
}
@@ -1764,6 +1818,144 @@ int smu_set_display_count(struct smu_context *smu, uint32_t count)
return ret;
}
+int smu_force_clk_levels(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t mask,
+ bool lock_needed)
+{
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ int ret = 0;
+
+ if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
+ pr_debug("force clock level is for dpm manual mode only.\n");
+ return -EINVAL;
+ }
+
+ if (lock_needed)
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels)
+ ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
+
+ if (lock_needed)
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_mp1_state(struct smu_context *smu,
+ enum pp_mp1_state mp1_state)
+{
+ uint16_t msg;
+ int ret;
+
+ /*
+ * The SMC is not fully ready. That may be
+ * expected as the IP may be masked.
+ * So, just return without error.
+ */
+ if (!smu->pm_enabled)
+ return 0;
+
+ mutex_lock(&smu->mutex);
+
+ switch (mp1_state) {
+ case PP_MP1_STATE_SHUTDOWN:
+ msg = SMU_MSG_PrepareMp1ForShutdown;
+ break;
+ case PP_MP1_STATE_UNLOAD:
+ msg = SMU_MSG_PrepareMp1ForUnload;
+ break;
+ case PP_MP1_STATE_RESET:
+ msg = SMU_MSG_PrepareMp1ForReset;
+ break;
+ case PP_MP1_STATE_NONE:
+ default:
+ mutex_unlock(&smu->mutex);
+ return 0;
+ }
+
+ /* some asics may not support those messages */
+ if (smu_msg_get_index(smu, msg) < 0) {
+ mutex_unlock(&smu->mutex);
+ return 0;
+ }
+
+ ret = smu_send_smc_msg(smu, msg);
+ if (ret)
+ pr_err("[PrepareMp1] Failed!\n");
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_df_cstate(struct smu_context *smu,
+ enum pp_df_cstate state)
+{
+ int ret = 0;
+
+ /*
+ * The SMC is not fully ready. That may be
+ * expected as the IP may be masked.
+ * So, just return without error.
+ */
+ if (!smu->pm_enabled)
+ return 0;
+
+ if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
+ return 0;
+
+ mutex_lock(&smu->mutex);
+
+ ret = smu->ppt_funcs->set_df_cstate(smu, state);
+ if (ret)
+ pr_err("[SetDfCstate] failed!\n");
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_write_watermarks_table(struct smu_context *smu)
+{
+ int ret = 0;
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *table = NULL;
+
+ table = &smu_table->tables[SMU_TABLE_WATERMARKS];
+
+ if (!table->cpu_addr)
+ return -EINVAL;
+
+ ret = smu_update_table(smu, SMU_TABLE_WATERMARKS, 0, table->cpu_addr,
+ true);
+
+ return ret;
+}
+
+int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
+ struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
+{
+ int ret = 0;
+ struct smu_table *watermarks = &smu->smu_table.tables[SMU_TABLE_WATERMARKS];
+ void *table = watermarks->cpu_addr;
+
+ mutex_lock(&smu->mutex);
+
+ if (!smu->disable_watermark &&
+ smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
+ smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
+ smu_set_watermarks_table(smu, table, clock_ranges);
+ smu->watermarks_bitmap |= WATERMARKS_EXIST;
+ smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
+ }
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
const struct amd_ip_funcs smu_ip_funcs = {
.name = "smu",
.early_init = smu_early_init,
@@ -1800,3 +1992,559 @@ const struct amdgpu_ip_block_version smu_v12_0_ip_block =
.rev = 0,
.funcs = &smu_ip_funcs,
};
+
+int smu_load_microcode(struct smu_context *smu)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->load_microcode)
+ ret = smu->ppt_funcs->load_microcode(smu);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_check_fw_status(struct smu_context *smu)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->check_fw_status)
+ ret = smu->ppt_funcs->check_fw_status(smu);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_gfx_cgpg)
+ ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_fan_speed_rpm)
+ ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_power_limit(struct smu_context *smu,
+ uint32_t *limit,
+ bool def,
+ bool lock_needed)
+{
+ int ret = 0;
+
+ if (lock_needed)
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_power_limit)
+ ret = smu->ppt_funcs->get_power_limit(smu, limit, def);
+
+ if (lock_needed)
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_power_limit)
+ ret = smu->ppt_funcs->set_power_limit(smu, limit);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->print_clk_levels)
+ ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_od_percentage)
+ ret = smu->ppt_funcs->get_od_percentage(smu, type);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_od_percentage)
+ ret = smu->ppt_funcs->set_od_percentage(smu, type, value);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_od_edit_dpm_table(struct smu_context *smu,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long *input, uint32_t size)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->od_edit_dpm_table)
+ ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_read_sensor(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ void *data, uint32_t *size)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->read_sensor)
+ ret = smu->ppt_funcs->read_sensor(smu, sensor, data, size);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_power_profile_mode(struct smu_context *smu, char *buf)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_power_profile_mode)
+ ret = smu->ppt_funcs->get_power_profile_mode(smu, buf);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_power_profile_mode(struct smu_context *smu,
+ long *param,
+ uint32_t param_size,
+ bool lock_needed)
+{
+ int ret = 0;
+
+ if (lock_needed)
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_power_profile_mode)
+ ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
+
+ if (lock_needed)
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+
+int smu_get_fan_control_mode(struct smu_context *smu)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_fan_control_mode)
+ ret = smu->ppt_funcs->get_fan_control_mode(smu);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_fan_control_mode(struct smu_context *smu, int value)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_fan_control_mode)
+ ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_fan_speed_percent)
+ ret = smu->ppt_funcs->get_fan_speed_percent(smu, speed);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_fan_speed_percent)
+ ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_fan_speed_rpm)
+ ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_deep_sleep_dcefclk)
+ ret = smu->ppt_funcs->set_deep_sleep_dcefclk(smu, clk);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_active_display_count(struct smu_context *smu, uint32_t count)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_active_display_count)
+ ret = smu->ppt_funcs->set_active_display_count(smu, count);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_clock_by_type(struct smu_context *smu,
+ enum amd_pp_clock_type type,
+ struct amd_pp_clocks *clocks)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_clock_by_type)
+ ret = smu->ppt_funcs->get_clock_by_type(smu, type, clocks);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_max_high_clocks(struct smu_context *smu,
+ struct amd_pp_simple_clock_info *clocks)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_max_high_clocks)
+ ret = smu->ppt_funcs->get_max_high_clocks(smu, clocks);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_clock_by_type_with_latency(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ struct pp_clock_levels_with_latency *clocks)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_clock_by_type_with_latency)
+ ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_clock_by_type_with_voltage(struct smu_context *smu,
+ enum amd_pp_clock_type type,
+ struct pp_clock_levels_with_voltage *clocks)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_clock_by_type_with_voltage)
+ ret = smu->ppt_funcs->get_clock_by_type_with_voltage(smu, type, clocks);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+
+int smu_display_clock_voltage_request(struct smu_context *smu,
+ struct pp_display_clock_request *clock_req)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->display_clock_voltage_request)
+ ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+
+int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch)
+{
+ int ret = -EINVAL;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->display_disable_memory_clock_switch)
+ ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_notify_smu_enable_pwe(struct smu_context *smu)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->notify_smu_enable_pwe)
+ ret = smu->ppt_funcs->notify_smu_enable_pwe(smu);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_xgmi_pstate(struct smu_context *smu,
+ uint32_t pstate)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_xgmi_pstate)
+ ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_azalia_d3_pme(struct smu_context *smu)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_azalia_d3_pme)
+ ret = smu->ppt_funcs->set_azalia_d3_pme(smu);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+bool smu_baco_is_support(struct smu_context *smu)
+{
+ bool ret = false;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->baco_is_support)
+ ret = smu->ppt_funcs->baco_is_support(smu);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state)
+{
+ if (smu->ppt_funcs->baco_get_state)
+ return -EINVAL;
+
+ mutex_lock(&smu->mutex);
+ *state = smu->ppt_funcs->baco_get_state(smu);
+ mutex_unlock(&smu->mutex);
+
+ return 0;
+}
+
+int smu_baco_reset(struct smu_context *smu)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->baco_reset)
+ ret = smu->ppt_funcs->baco_reset(smu);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_mode2_reset(struct smu_context *smu)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->mode2_reset)
+ ret = smu->ppt_funcs->mode2_reset(smu);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
+ struct pp_smu_nv_clock_table *max_clocks)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
+ ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_uclk_dpm_states(struct smu_context *smu,
+ unsigned int *clock_values_in_khz,
+ unsigned int *num_states)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_uclk_dpm_states)
+ ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
+{
+ enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_current_power_state)
+ pm_state = smu->ppt_funcs->get_current_power_state(smu);
+
+ mutex_unlock(&smu->mutex);
+
+ return pm_state;
+}
+
+int smu_get_dpm_clock_table(struct smu_context *smu,
+ struct dpm_clocks *clock_table)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_dpm_clock_table)
+ ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+uint32_t smu_get_pptable_power_limit(struct smu_context *smu)
+{
+ uint32_t ret = 0;
+
+ if (smu->ppt_funcs->get_pptable_power_limit)
+ ret = smu->ppt_funcs->get_pptable_power_limit(smu);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
index f1f072012fac..58c7c4a3053e 100644
--- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
@@ -25,6 +25,7 @@
#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_smu.h"
+#include "smu_internal.h"
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
#include "smu_v11_0.h"
@@ -36,6 +37,12 @@
#include "smu_v11_0_pptable.h"
#include "arcturus_ppsmc.h"
#include "nbio/nbio_7_4_sh_mask.h"
+#include "amdgpu_xgmi.h"
+#include <linux/i2c.h>
+#include <linux/pci.h>
+#include "amdgpu_ras.h"
+
+#define to_amdgpu_device(x) (container_of(x, struct amdgpu_ras, eeprom_control.eeprom_accessor))->adev
#define CTF_OFFSET_EDGE 5
#define CTF_OFFSET_HOTSPOT 5
@@ -112,8 +119,7 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_message_map[SMU_MSG_MAX_COUNT]
MSG_MAP(PrepareMp1ForShutdown, PPSMC_MSG_PrepareMp1ForShutdown),
MSG_MAP(SoftReset, PPSMC_MSG_SoftReset),
MSG_MAP(RunAfllBtc, PPSMC_MSG_RunAfllBtc),
- MSG_MAP(RunGfxDcBtc, PPSMC_MSG_RunGfxDcBtc),
- MSG_MAP(RunSocDcBtc, PPSMC_MSG_RunSocDcBtc),
+ MSG_MAP(RunDcBtc, PPSMC_MSG_RunDcBtc),
MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh),
MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow),
MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize),
@@ -172,6 +178,7 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_table_map[SMU_TABLE_COUNT] = {
TAB_MAP(SMU_METRICS),
TAB_MAP(DRIVER_SMU_CONFIG),
TAB_MAP(OVERDRIVE),
+ TAB_MAP(I2C_COMMANDS),
};
static struct smu_11_0_cmn2aisc_mapping arcturus_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
@@ -294,6 +301,9 @@ static int arcturus_tables_init(struct smu_context *smu, struct smu_table *table
SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+
smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
if (!smu_table->metrics_table)
return -ENOMEM;
@@ -528,9 +538,17 @@ static int arcturus_append_powerplay_table(struct smu_context *smu)
return 0;
}
-static int arcturus_run_btc_afll(struct smu_context *smu)
+static int arcturus_run_btc(struct smu_context *smu)
{
- return smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc);
+ int ret = 0;
+
+ ret = smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc);
+ if (ret) {
+ pr_err("RunAfllBtc failed!\n");
+ return ret;
+ }
+
+ return smu_send_smc_msg(smu, SMU_MSG_RunDcBtc);
}
static int arcturus_populate_umd_state_clk(struct smu_context *smu)
@@ -610,12 +628,17 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
return ret;
}
+ /*
+ * For DPM disabled case, there will be only one clock level.
+ * And it's safe to assume that is always the current clock.
+ */
for (i = 0; i < clocks.num_levels; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n", i,
clocks.data[i].clocks_in_khz / 1000,
- arcturus_freqs_in_same_level(
+ (clocks.num_levels == 1) ? "*" :
+ (arcturus_freqs_in_same_level(
clocks.data[i].clocks_in_khz / 1000,
- now / 100) ? "*" : "");
+ now / 100) ? "*" : ""));
break;
case SMU_MCLK:
@@ -635,9 +658,10 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
for (i = 0; i < clocks.num_levels; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
i, clocks.data[i].clocks_in_khz / 1000,
- arcturus_freqs_in_same_level(
+ (clocks.num_levels == 1) ? "*" :
+ (arcturus_freqs_in_same_level(
clocks.data[i].clocks_in_khz / 1000,
- now / 100) ? "*" : "");
+ now / 100) ? "*" : ""));
break;
case SMU_SOCCLK:
@@ -657,9 +681,10 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
for (i = 0; i < clocks.num_levels; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
i, clocks.data[i].clocks_in_khz / 1000,
- arcturus_freqs_in_same_level(
+ (clocks.num_levels == 1) ? "*" :
+ (arcturus_freqs_in_same_level(
clocks.data[i].clocks_in_khz / 1000,
- now / 100) ? "*" : "");
+ now / 100) ? "*" : ""));
break;
case SMU_FCLK:
@@ -679,9 +704,10 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
for (i = 0; i < single_dpm_table->count; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
i, single_dpm_table->dpm_levels[i].value,
- arcturus_freqs_in_same_level(
+ (clocks.num_levels == 1) ? "*" :
+ (arcturus_freqs_in_same_level(
clocks.data[i].clocks_in_khz / 1000,
- now / 100) ? "*" : "");
+ now / 100) ? "*" : ""));
break;
default:
@@ -756,8 +782,6 @@ static int arcturus_force_clk_levels(struct smu_context *smu,
uint32_t soft_min_level, soft_max_level;
int ret = 0;
- mutex_lock(&(smu->mutex));
-
soft_min_level = mask ? (ffs(mask) - 1) : 0;
soft_max_level = mask ? (fls(mask) - 1) : 0;
@@ -792,91 +816,19 @@ static int arcturus_force_clk_levels(struct smu_context *smu,
break;
case SMU_MCLK:
- single_dpm_table = &(dpm_table->mem_table);
-
- if (soft_max_level >= single_dpm_table->count) {
- pr_err("Clock level specified %d is over max allowed %d\n",
- soft_max_level, single_dpm_table->count - 1);
- ret = -EINVAL;
- break;
- }
-
- single_dpm_table->dpm_state.soft_min_level =
- single_dpm_table->dpm_levels[soft_min_level].value;
- single_dpm_table->dpm_state.soft_max_level =
- single_dpm_table->dpm_levels[soft_max_level].value;
-
- ret = arcturus_upload_dpm_level(smu, false, FEATURE_DPM_UCLK_MASK);
- if (ret) {
- pr_err("Failed to upload boot level to lowest!\n");
- break;
- }
-
- ret = arcturus_upload_dpm_level(smu, true, FEATURE_DPM_UCLK_MASK);
- if (ret)
- pr_err("Failed to upload dpm max level to highest!\n");
-
- break;
-
case SMU_SOCCLK:
- single_dpm_table = &(dpm_table->soc_table);
-
- if (soft_max_level >= single_dpm_table->count) {
- pr_err("Clock level specified %d is over max allowed %d\n",
- soft_max_level, single_dpm_table->count - 1);
- ret = -EINVAL;
- break;
- }
-
- single_dpm_table->dpm_state.soft_min_level =
- single_dpm_table->dpm_levels[soft_min_level].value;
- single_dpm_table->dpm_state.soft_max_level =
- single_dpm_table->dpm_levels[soft_max_level].value;
-
- ret = arcturus_upload_dpm_level(smu, false, FEATURE_DPM_SOCCLK_MASK);
- if (ret) {
- pr_err("Failed to upload boot level to lowest!\n");
- break;
- }
-
- ret = arcturus_upload_dpm_level(smu, true, FEATURE_DPM_SOCCLK_MASK);
- if (ret)
- pr_err("Failed to upload dpm max level to highest!\n");
-
- break;
-
case SMU_FCLK:
- single_dpm_table = &(dpm_table->fclk_table);
-
- if (soft_max_level >= single_dpm_table->count) {
- pr_err("Clock level specified %d is over max allowed %d\n",
- soft_max_level, single_dpm_table->count - 1);
- ret = -EINVAL;
- break;
- }
-
- single_dpm_table->dpm_state.soft_min_level =
- single_dpm_table->dpm_levels[soft_min_level].value;
- single_dpm_table->dpm_state.soft_max_level =
- single_dpm_table->dpm_levels[soft_max_level].value;
-
- ret = arcturus_upload_dpm_level(smu, false, FEATURE_DPM_FCLK_MASK);
- if (ret) {
- pr_err("Failed to upload boot level to lowest!\n");
- break;
- }
-
- ret = arcturus_upload_dpm_level(smu, true, FEATURE_DPM_FCLK_MASK);
- if (ret)
- pr_err("Failed to upload dpm max level to highest!\n");
-
+ /*
+ * Should not arrive here since Arcturus does not
+ * support mclk/socclk/fclk softmin/softmax settings
+ */
+ ret = -EINVAL;
break;
default:
break;
}
- mutex_unlock(&(smu->mutex));
return ret;
}
@@ -1018,6 +970,7 @@ static int arcturus_read_sensor(struct smu_context *smu,
if (!data || !size)
return -EINVAL;
+ mutex_lock(&smu->sensor_lock);
switch (sensor) {
case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
*(uint32_t *)data = pptable->FanMaximumRpm;
@@ -1042,8 +995,9 @@ static int arcturus_read_sensor(struct smu_context *smu,
*size = 4;
break;
default:
- ret = smu_smc_read_sensor(smu, sensor, data, size);
+ ret = smu_v11_0_read_sensor(smu, sensor, data, size);
}
+ mutex_unlock(&smu->sensor_lock);
return ret;
}
@@ -1184,6 +1138,7 @@ static int arcturus_force_dpm_limit_value(struct smu_context *smu, bool highest)
{
struct arcturus_dpm_table *dpm_table =
(struct arcturus_dpm_table *)smu->smu_dpm.dpm_context;
+ struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(smu->adev, 0);
uint32_t soft_level;
int ret = 0;
@@ -1197,40 +1152,27 @@ static int arcturus_force_dpm_limit_value(struct smu_context *smu, bool highest)
dpm_table->gfx_table.dpm_state.soft_max_level =
dpm_table->gfx_table.dpm_levels[soft_level].value;
- /* uclk */
- if (highest)
- soft_level = arcturus_find_highest_dpm_level(&(dpm_table->mem_table));
- else
- soft_level = arcturus_find_lowest_dpm_level(&(dpm_table->mem_table));
-
- dpm_table->mem_table.dpm_state.soft_min_level =
- dpm_table->mem_table.dpm_state.soft_max_level =
- dpm_table->mem_table.dpm_levels[soft_level].value;
-
- /* socclk */
- if (highest)
- soft_level = arcturus_find_highest_dpm_level(&(dpm_table->soc_table));
- else
- soft_level = arcturus_find_lowest_dpm_level(&(dpm_table->soc_table));
-
- dpm_table->soc_table.dpm_state.soft_min_level =
- dpm_table->soc_table.dpm_state.soft_max_level =
- dpm_table->soc_table.dpm_levels[soft_level].value;
-
- ret = arcturus_upload_dpm_level(smu, false, 0xFFFFFFFF);
+ ret = arcturus_upload_dpm_level(smu, false, FEATURE_DPM_GFXCLK_MASK);
if (ret) {
pr_err("Failed to upload boot level to %s!\n",
highest ? "highest" : "lowest");
return ret;
}
- ret = arcturus_upload_dpm_level(smu, true, 0xFFFFFFFF);
+ ret = arcturus_upload_dpm_level(smu, true, FEATURE_DPM_GFXCLK_MASK);
if (ret) {
pr_err("Failed to upload dpm max level to %s!\n!",
highest ? "highest" : "lowest");
return ret;
}
+ if (hive)
+ /*
+ * Force XGMI Pstate to highest or lowest
+ * TODO: revise this when xgmi dpm is functional
+ */
+ ret = smu_v11_0_set_xgmi_pstate(smu, highest ? 1 : 0);
+
return ret;
}
@@ -1238,6 +1180,7 @@ static int arcturus_unforce_dpm_levels(struct smu_context *smu)
{
struct arcturus_dpm_table *dpm_table =
(struct arcturus_dpm_table *)smu->smu_dpm.dpm_context;
+ struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(smu->adev, 0);
uint32_t soft_min_level, soft_max_level;
int ret = 0;
@@ -1249,34 +1192,25 @@ static int arcturus_unforce_dpm_levels(struct smu_context *smu)
dpm_table->gfx_table.dpm_state.soft_max_level =
dpm_table->gfx_table.dpm_levels[soft_max_level].value;
- /* uclk */
- soft_min_level = arcturus_find_lowest_dpm_level(&(dpm_table->mem_table));
- soft_max_level = arcturus_find_highest_dpm_level(&(dpm_table->mem_table));
- dpm_table->mem_table.dpm_state.soft_min_level =
- dpm_table->gfx_table.dpm_levels[soft_min_level].value;
- dpm_table->mem_table.dpm_state.soft_max_level =
- dpm_table->gfx_table.dpm_levels[soft_max_level].value;
-
- /* socclk */
- soft_min_level = arcturus_find_lowest_dpm_level(&(dpm_table->soc_table));
- soft_max_level = arcturus_find_highest_dpm_level(&(dpm_table->soc_table));
- dpm_table->soc_table.dpm_state.soft_min_level =
- dpm_table->soc_table.dpm_levels[soft_min_level].value;
- dpm_table->soc_table.dpm_state.soft_max_level =
- dpm_table->soc_table.dpm_levels[soft_max_level].value;
-
- ret = arcturus_upload_dpm_level(smu, false, 0xFFFFFFFF);
+ ret = arcturus_upload_dpm_level(smu, false, FEATURE_DPM_GFXCLK_MASK);
if (ret) {
pr_err("Failed to upload DPM Bootup Levels!");
return ret;
}
- ret = arcturus_upload_dpm_level(smu, true, 0xFFFFFFFF);
+ ret = arcturus_upload_dpm_level(smu, true, FEATURE_DPM_GFXCLK_MASK);
if (ret) {
pr_err("Failed to upload DPM Max Levels!");
return ret;
}
+ if (hive)
+ /*
+ * Reset XGMI Pstate back to default
+ * TODO: revise this when xgmi dpm is functional
+ */
+ ret = smu_v11_0_set_xgmi_pstate(smu, 0);
+
return ret;
}
@@ -1327,15 +1261,14 @@ arcturus_get_profiling_clk_mask(struct smu_context *smu,
static int arcturus_get_power_limit(struct smu_context *smu,
uint32_t *limit,
- bool asic_default)
+ bool cap)
{
PPTable_t *pptable = smu->smu_table.driver_pptable;
uint32_t asic_default_power_limit = 0;
int ret = 0;
int power_src;
- if (!smu->default_power_limit ||
- !smu->power_limit) {
+ if (!smu->power_limit) {
if (smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
power_src = smu_power_get_index(smu, SMU_POWER_SOURCE_AC);
if (power_src < 0)
@@ -1358,17 +1291,11 @@ static int arcturus_get_power_limit(struct smu_context *smu,
pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0];
}
- if (smu->od_enabled) {
- asic_default_power_limit *= (100 + smu->smu_table.TDPODLimit);
- asic_default_power_limit /= 100;
- }
-
- smu->default_power_limit = asic_default_power_limit;
smu->power_limit = asic_default_power_limit;
}
- if (asic_default)
- *limit = smu->default_power_limit;
+ if (cap)
+ *limit = smu_v11_0_get_max_power_limit(smu);
else
*limit = smu->power_limit;
@@ -1889,6 +1816,260 @@ static bool arcturus_is_dpm_running(struct smu_context *smu)
return !!(feature_enabled & SMC_DPM_FEATURE);
}
+static int arcturus_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
+{
+ struct smu_power_context *smu_power = &smu->smu_power;
+ struct smu_power_gate *power_gate = &smu_power->power_gate;
+ int ret = 0;
+
+ if (enable) {
+ if (!smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
+ ret = smu_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, 1);
+ if (ret) {
+ pr_err("[EnableVCNDPM] failed!\n");
+ return ret;
+ }
+ }
+ power_gate->vcn_gated = false;
+ } else {
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
+ ret = smu_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, 0);
+ if (ret) {
+ pr_err("[DisableVCNDPM] failed!\n");
+ return ret;
+ }
+ }
+ power_gate->vcn_gated = true;
+ }
+
+ return ret;
+}
+
+
+static void arcturus_fill_eeprom_i2c_req(SwI2cRequest_t *req, bool write,
+ uint8_t address, uint32_t numbytes,
+ uint8_t *data)
+{
+ int i;
+
+ BUG_ON(numbytes > MAX_SW_I2C_COMMANDS);
+
+ req->I2CcontrollerPort = 0;
+ req->I2CSpeed = 2;
+ req->SlaveAddress = address;
+ req->NumCmds = numbytes;
+
+ for (i = 0; i < numbytes; i++) {
+ SwI2cCmd_t *cmd = &req->SwI2cCmds[i];
+
+ /* First 2 bytes are always write for lower 2b EEPROM address */
+ if (i < 2)
+ cmd->Cmd = 1;
+ else
+ cmd->Cmd = write;
+
+
+ /* Add RESTART for read after address filled */
+ cmd->CmdConfig |= (i == 2 && !write) ? CMDCONFIG_RESTART_MASK : 0;
+
+ /* Add STOP in the end */
+ cmd->CmdConfig |= (i == (numbytes - 1)) ? CMDCONFIG_STOP_MASK : 0;
+
+ /* Fill with data regardless if read or write to simplify code */
+ cmd->RegisterAddr = data[i];
+ }
+}
+
+static int arcturus_i2c_eeprom_read_data(struct i2c_adapter *control,
+ uint8_t address,
+ uint8_t *data,
+ uint32_t numbytes)
+{
+ uint32_t i, ret = 0;
+ SwI2cRequest_t req;
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct smu_table_context *smu_table = &adev->smu.smu_table;
+ struct smu_table *table = &smu_table->tables[SMU_TABLE_I2C_COMMANDS];
+
+ memset(&req, 0, sizeof(req));
+ arcturus_fill_eeprom_i2c_req(&req, false, address, numbytes, data);
+
+ mutex_lock(&adev->smu.mutex);
+ /* Now read data starting with that address */
+ ret = smu_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req,
+ true);
+ mutex_unlock(&adev->smu.mutex);
+
+ if (!ret) {
+ SwI2cRequest_t *res = (SwI2cRequest_t *)table->cpu_addr;
+
+ /* Assume SMU fills res.SwI2cCmds[i].Data with read bytes */
+ for (i = 0; i < numbytes; i++)
+ data[i] = res->SwI2cCmds[i].Data;
+
+ pr_debug("arcturus_i2c_eeprom_read_data, address = %x, bytes = %d, data :",
+ (uint16_t)address, numbytes);
+
+ print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE,
+ 8, 1, data, numbytes, false);
+ } else
+ pr_err("arcturus_i2c_eeprom_read_data - error occurred :%x", ret);
+
+ return ret;
+}
+
+static int arcturus_i2c_eeprom_write_data(struct i2c_adapter *control,
+ uint8_t address,
+ uint8_t *data,
+ uint32_t numbytes)
+{
+ uint32_t ret;
+ SwI2cRequest_t req;
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+
+ memset(&req, 0, sizeof(req));
+ arcturus_fill_eeprom_i2c_req(&req, true, address, numbytes, data);
+
+ mutex_lock(&adev->smu.mutex);
+ ret = smu_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req, true);
+ mutex_unlock(&adev->smu.mutex);
+
+ if (!ret) {
+ pr_debug("arcturus_i2c_write(), address = %x, bytes = %d , data: ",
+ (uint16_t)address, numbytes);
+
+ print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE,
+ 8, 1, data, numbytes, false);
+ /*
+ * According to EEPROM spec there is a MAX of 10 ms required for
+ * EEPROM to flush internal RX buffer after STOP was issued at the
+ * end of write transaction. During this time the EEPROM will not be
+ * responsive to any more commands - so wait a bit more.
+ */
+ msleep(10);
+
+ } else
+ pr_err("arcturus_i2c_write- error occurred :%x", ret);
+
+ return ret;
+}
+
+static int arcturus_i2c_eeprom_i2c_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg *msgs, int num)
+{
+ uint32_t i, j, ret, data_size, data_chunk_size, next_eeprom_addr = 0;
+ uint8_t *data_ptr, data_chunk[MAX_SW_I2C_COMMANDS] = { 0 };
+
+ for (i = 0; i < num; i++) {
+ /*
+ * SMU interface allows at most MAX_SW_I2C_COMMANDS bytes of data at
+ * once and hence the data needs to be spliced into chunks and sent each
+ * chunk separately
+ */
+ data_size = msgs[i].len - 2;
+ data_chunk_size = MAX_SW_I2C_COMMANDS - 2;
+ next_eeprom_addr = (msgs[i].buf[0] << 8 & 0xff00) | (msgs[i].buf[1] & 0xff);
+ data_ptr = msgs[i].buf + 2;
+
+ for (j = 0; j < data_size / data_chunk_size; j++) {
+ /* Insert the EEPROM dest addess, bits 0-15 */
+ data_chunk[0] = ((next_eeprom_addr >> 8) & 0xff);
+ data_chunk[1] = (next_eeprom_addr & 0xff);
+
+ if (msgs[i].flags & I2C_M_RD) {
+ ret = arcturus_i2c_eeprom_read_data(i2c_adap,
+ (uint8_t)msgs[i].addr,
+ data_chunk, MAX_SW_I2C_COMMANDS);
+
+ memcpy(data_ptr, data_chunk + 2, data_chunk_size);
+ } else {
+
+ memcpy(data_chunk + 2, data_ptr, data_chunk_size);
+
+ ret = arcturus_i2c_eeprom_write_data(i2c_adap,
+ (uint8_t)msgs[i].addr,
+ data_chunk, MAX_SW_I2C_COMMANDS);
+ }
+
+ if (ret) {
+ num = -EIO;
+ goto fail;
+ }
+
+ next_eeprom_addr += data_chunk_size;
+ data_ptr += data_chunk_size;
+ }
+
+ if (data_size % data_chunk_size) {
+ data_chunk[0] = ((next_eeprom_addr >> 8) & 0xff);
+ data_chunk[1] = (next_eeprom_addr & 0xff);
+
+ if (msgs[i].flags & I2C_M_RD) {
+ ret = arcturus_i2c_eeprom_read_data(i2c_adap,
+ (uint8_t)msgs[i].addr,
+ data_chunk, (data_size % data_chunk_size) + 2);
+
+ memcpy(data_ptr, data_chunk + 2, data_size % data_chunk_size);
+ } else {
+ memcpy(data_chunk + 2, data_ptr, data_size % data_chunk_size);
+
+ ret = arcturus_i2c_eeprom_write_data(i2c_adap,
+ (uint8_t)msgs[i].addr,
+ data_chunk, (data_size % data_chunk_size) + 2);
+ }
+
+ if (ret) {
+ num = -EIO;
+ goto fail;
+ }
+ }
+ }
+
+fail:
+ return num;
+}
+
+static u32 arcturus_i2c_eeprom_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+
+static const struct i2c_algorithm arcturus_i2c_eeprom_i2c_algo = {
+ .master_xfer = arcturus_i2c_eeprom_i2c_xfer,
+ .functionality = arcturus_i2c_eeprom_i2c_func,
+};
+
+static int arcturus_i2c_eeprom_control_init(struct i2c_adapter *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ int res;
+
+ control->owner = THIS_MODULE;
+ control->class = I2C_CLASS_SPD;
+ control->dev.parent = &adev->pdev->dev;
+ control->algo = &arcturus_i2c_eeprom_i2c_algo;
+ snprintf(control->name, sizeof(control->name), "RAS EEPROM");
+
+ res = i2c_add_adapter(control);
+ if (res)
+ DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
+
+ return res;
+}
+
+static void arcturus_i2c_eeprom_control_fini(struct i2c_adapter *control)
+{
+ i2c_del_adapter(control);
+}
+
+static uint32_t arcturus_get_pptable_power_limit(struct smu_context *smu)
+{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+
+ return pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0];
+}
+
static const struct pptable_funcs arcturus_ppt_funcs = {
/* translate smu index into arcturus specific index */
.get_smu_msg_index = arcturus_get_smu_msg_index,
@@ -1907,7 +2088,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
/* init dpm */
.get_allowed_feature_mask = arcturus_get_allowed_feature_mask,
/* btc */
- .run_afll_btc = arcturus_run_btc_afll,
+ .run_btc = arcturus_run_btc,
/* dpm/clk tables */
.set_default_dpm_table = arcturus_set_default_dpm_table,
.populate_umd_state_clk = arcturus_populate_umd_state_clk,
@@ -1927,12 +2108,62 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.dump_pptable = arcturus_dump_pptable,
.get_power_limit = arcturus_get_power_limit,
.is_dpm_running = arcturus_is_dpm_running,
+ .dpm_set_uvd_enable = arcturus_dpm_set_uvd_enable,
+ .i2c_eeprom_init = arcturus_i2c_eeprom_control_init,
+ .i2c_eeprom_fini = arcturus_i2c_eeprom_control_fini,
+ .init_microcode = smu_v11_0_init_microcode,
+ .load_microcode = smu_v11_0_load_microcode,
+ .init_smc_tables = smu_v11_0_init_smc_tables,
+ .fini_smc_tables = smu_v11_0_fini_smc_tables,
+ .init_power = smu_v11_0_init_power,
+ .fini_power = smu_v11_0_fini_power,
+ .check_fw_status = smu_v11_0_check_fw_status,
+ .setup_pptable = smu_v11_0_setup_pptable,
+ .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
+ .get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios,
+ .check_pptable = smu_v11_0_check_pptable,
+ .parse_pptable = smu_v11_0_parse_pptable,
+ .populate_smc_tables = smu_v11_0_populate_smc_pptable,
+ .check_fw_version = smu_v11_0_check_fw_version,
+ .write_pptable = smu_v11_0_write_pptable,
+ .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
+ .set_tool_table_location = smu_v11_0_set_tool_table_location,
+ .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
+ .system_features_control = smu_v11_0_system_features_control,
+ .send_smc_msg = smu_v11_0_send_msg,
+ .send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
+ .read_smc_arg = smu_v11_0_read_arg,
+ .init_display_count = smu_v11_0_init_display_count,
+ .set_allowed_mask = smu_v11_0_set_allowed_mask,
+ .get_enabled_mask = smu_v11_0_get_enabled_mask,
+ .notify_display_change = smu_v11_0_notify_display_change,
+ .set_power_limit = smu_v11_0_set_power_limit,
+ .get_current_clk_freq = smu_v11_0_get_current_clk_freq,
+ .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
+ .start_thermal_control = smu_v11_0_start_thermal_control,
+ .stop_thermal_control = smu_v11_0_stop_thermal_control,
+ .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk,
+ .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
+ .get_fan_control_mode = smu_v11_0_get_fan_control_mode,
+ .set_fan_control_mode = smu_v11_0_set_fan_control_mode,
+ .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
+ .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
+ .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
+ .gfx_off_control = smu_v11_0_gfx_off_control,
+ .register_irq_handler = smu_v11_0_register_irq_handler,
+ .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
+ .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
+ .baco_is_support= smu_v11_0_baco_is_support,
+ .baco_get_state = smu_v11_0_baco_get_state,
+ .baco_set_state = smu_v11_0_baco_set_state,
+ .baco_reset = smu_v11_0_baco_reset,
+ .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
+ .set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
+ .override_pcie_parameters = smu_v11_0_override_pcie_parameters,
+ .get_pptable_power_limit = arcturus_get_pptable_power_limit,
};
void arcturus_set_ppt_funcs(struct smu_context *smu)
{
- struct smu_table_context *smu_table = &smu->smu_table;
-
smu->ppt_funcs = &arcturus_ppt_funcs;
- smu_table->table_count = TABLE_COUNT;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
index cc63705920dc..2773966ae434 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
@@ -36,7 +36,8 @@ HARDWARE_MGR = hwmgr.o processpptables.o \
pp_overdriver.o smu_helper.o \
vega20_processpptables.o vega20_hwmgr.o vega20_powertune.o \
vega20_thermal.o common_baco.o vega10_baco.o vega20_baco.o \
- vega12_baco.o smu9_baco.o
+ vega12_baco.o smu9_baco.o tonga_baco.o polaris_baco.o fiji_baco.o \
+ ci_baco.o smu7_baco.o
AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR))
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.c
new file mode 100644
index 000000000000..3be40114e63d
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.c
@@ -0,0 +1,195 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "ci_baco.h"
+
+#include "gmc/gmc_7_1_d.h"
+#include "gmc/gmc_7_1_sh_mask.h"
+
+#include "bif/bif_4_1_d.h"
+#include "bif/bif_4_1_sh_mask.h"
+
+#include "dce/dce_8_0_d.h"
+#include "dce/dce_8_0_sh_mask.h"
+
+#include "smu/smu_7_0_1_d.h"
+#include "smu/smu_7_0_1_sh_mask.h"
+
+#include "gca/gfx_7_2_d.h"
+#include "gca/gfx_7_2_sh_mask.h"
+
+static const struct baco_cmd_entry gpio_tbl[] =
+{
+ { CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_MASK, 0, 0, 0, 0xff77ffff },
+ { CMD_WRITE, mmDC_GPIO_DVODATA_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmDC_GPIO_DVODATA_MASK, 0, 0, 0, 0xffffffff },
+ { CMD_WRITE, mmDC_GPIO_GENERIC_EN, 0, 0, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmDC_GPIO_GENERIC_MASK, 0, 0, 0, 0x03333333 },
+ { CMD_WRITE, mmDC_GPIO_SYNCA_EN, 0, 0, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmDC_GPIO_SYNCA_MASK, 0, 0, 0, 0x00001111 }
+};
+
+static const struct baco_cmd_entry enable_fb_req_rej_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0300024 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 },
+ { CMD_WRITE, mmBIF_FB_EN, 0, 0, 0, 0x0 }
+};
+
+static const struct baco_cmd_entry use_bclk_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x4000000, 0x1a, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x2 },
+ { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL_MASK, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_MCLK_SEL_MASK, MPLL_CNTL_MODE__MPLL_MCLK_SEL__SHIFT, 0, 0x0 }
+};
+
+static const struct baco_cmd_entry turn_off_plls_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmDISPPLL_BG_CNTL, DISPPLL_BG_CNTL__DISPPLL_BG_PDN_MASK, DISPPLL_BG_CNTL__DISPPLL_BG_PDN__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL_DC },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_DC__OSC_EN_MASK, CG_CLKPIN_CNTL_DC__OSC_EN__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_DC__XTALIN_SEL_MASK, CG_CLKPIN_CNTL_DC__XTALIN_SEL__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmPLL_CNTL, PLL_CNTL__PLL_RESET_MASK, PLL_CNTL__PLL_RESET__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmPLL_CNTL, PLL_CNTL__PLL_POWER_DOWN_MASK, PLL_CNTL__PLL_POWER_DOWN__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmPLL_CNTL, PLL_CNTL__PLL_BYPASS_CAL_MASK, PLL_CNTL__PLL_BYPASS_CAL__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK, CG_SPLL_FUNC_CNTL__SPLL_RESET__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK, CG_SPLL_FUNC_CNTL__SPLL_PWRON__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x2000000, 0x19, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x8000000, 0x1b, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__GLOBAL_MPLL_RESET_MASK, MPLL_CNTL_MODE__GLOBAL_MPLL_RESET__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmMPLL_CONTROL, 0, 0, 0, 0x00000006 },
+ { CMD_WRITE, mmMC_IO_RXCNTL_DPHY0_D0, 0, 0, 0, 0x00007740 },
+ { CMD_WRITE, mmMC_IO_RXCNTL_DPHY0_D1, 0, 0, 0, 0x00007740 },
+ { CMD_WRITE, mmMC_IO_RXCNTL_DPHY1_D0, 0, 0, 0, 0x00007740 },
+ { CMD_WRITE, mmMC_IO_RXCNTL_DPHY1_D1, 0, 0, 0, 0x00007740 },
+ { CMD_READMODIFYWRITE, mmMCLK_PWRMGT_CNTL, MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK, MCLK_PWRMGT_CNTL__MRDCK0_PDNB__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmMCLK_PWRMGT_CNTL, MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK, MCLK_PWRMGT_CNTL__MRDCK1_PDNB__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmMC_SEQ_CNTL_2, MC_SEQ_CNTL_2__DRST_PU_MASK, MC_SEQ_CNTL_2__DRST_PU__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmMC_SEQ_CNTL_2, MC_SEQ_CNTL_2__DRST_PD_MASK, MC_SEQ_CNTL_2__DRST_PD__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x4 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMISC_CLK_CTRL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT, 0, 0x2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__ZCLK_SEL_MASK, MISC_CLK_CTRL__ZCLK_SEL__SHIFT, 0, 0x2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL_MASK, MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL__SHIFT, 0, 0x2 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixTHM_CLK_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__CMON_CLK_SEL_MASK, THM_CLK_CNTL__CMON_CLK_SEL__SHIFT, 0, 0x2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__TMON_CLK_SEL_MASK, THM_CLK_CNTL__TMON_CLK_SEL__SHIFT, 0, 0x2 }
+};
+
+static const struct baco_cmd_entry enter_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, 0, 5, 0x02 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, 0, 5, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, 0, 5, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, 0, 5, 0x08 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x40 }
+};
+
+#define BACO_CNTL__PWRGOOD_MASK BACO_CNTL__PWRGOOD_GPIO_MASK+BACO_CNTL__PWRGOOD_MEM_MASK+BACO_CNTL__PWRGOOD_DVO_MASK
+
+static const struct baco_cmd_entry exit_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 },
+ { CMD_DELAY_MS, 0, 0, 0, 20, 0 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x20 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x10 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 }
+};
+
+static const struct baco_cmd_entry clean_baco_tbl[] =
+{
+ { CMD_WRITE, mmBIOS_SCRATCH_6, 0, 0, 0, 0 },
+ { CMD_WRITE, mmCP_PFP_UCODE_ADDR, 0, 0, 0, 0 }
+};
+
+int ci_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
+{
+ enum BACO_STATE cur_state;
+
+ smu7_baco_get_state(hwmgr, &cur_state);
+
+ if (cur_state == state)
+ /* aisc already in the target state */
+ return 0;
+
+ if (state == BACO_STATE_IN) {
+ baco_program_registers(hwmgr, gpio_tbl, ARRAY_SIZE(gpio_tbl));
+ baco_program_registers(hwmgr, enable_fb_req_rej_tbl,
+ ARRAY_SIZE(enable_fb_req_rej_tbl));
+ baco_program_registers(hwmgr, use_bclk_tbl, ARRAY_SIZE(use_bclk_tbl));
+ baco_program_registers(hwmgr, turn_off_plls_tbl,
+ ARRAY_SIZE(turn_off_plls_tbl));
+ if (baco_program_registers(hwmgr, enter_baco_tbl,
+ ARRAY_SIZE(enter_baco_tbl)))
+ return 0;
+
+ } else if (state == BACO_STATE_OUT) {
+ /* HW requires at least 20ms between regulator off and on */
+ msleep(20);
+ /* Execute Hardware BACO exit sequence */
+ if (baco_program_registers(hwmgr, exit_baco_tbl,
+ ARRAY_SIZE(exit_baco_tbl))) {
+ if (baco_program_registers(hwmgr, clean_baco_tbl,
+ ARRAY_SIZE(clean_baco_tbl)))
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.h
new file mode 100644
index 000000000000..17041f187020
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __CI_BACO_H__
+#define __CI_BACO_H__
+#include "smu7_baco.h"
+
+extern int ci_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c
index 9c57c1f67749..1c73776bd606 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c
@@ -79,6 +79,25 @@ static bool baco_cmd_handler(struct pp_hwmgr *hwmgr, u32 command, u32 reg, u32 m
return ret;
}
+bool baco_program_registers(struct pp_hwmgr *hwmgr,
+ const struct baco_cmd_entry *entry,
+ const u32 array_size)
+{
+ u32 i, reg = 0;
+
+ for (i = 0; i < array_size; i++) {
+ if ((entry[i].cmd == CMD_WRITE) ||
+ (entry[i].cmd == CMD_READMODIFYWRITE) ||
+ (entry[i].cmd == CMD_WAITFOR))
+ reg = entry[i].reg_offset;
+ if (!baco_cmd_handler(hwmgr, entry[i].cmd, reg, entry[i].mask,
+ entry[i].shift, entry[i].val, entry[i].timeout))
+ return false;
+ }
+
+ return true;
+}
+
bool soc15_baco_program_registers(struct pp_hwmgr *hwmgr,
const struct soc15_baco_cmd_entry *entry,
const u32 array_size)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h
index 95296c916f4e..8393eb62706d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h
@@ -33,6 +33,15 @@ enum baco_cmd_type {
CMD_DELAY_US,
};
+struct baco_cmd_entry {
+ enum baco_cmd_type cmd;
+ uint32_t reg_offset;
+ uint32_t mask;
+ uint32_t shift;
+ uint32_t timeout;
+ uint32_t val;
+};
+
struct soc15_baco_cmd_entry {
enum baco_cmd_type cmd;
uint32_t hwip;
@@ -44,6 +53,10 @@ struct soc15_baco_cmd_entry {
uint32_t timeout;
uint32_t val;
};
+
+extern bool baco_program_registers(struct pp_hwmgr *hwmgr,
+ const struct baco_cmd_entry *entry,
+ const u32 array_size);
extern bool soc15_baco_program_registers(struct pp_hwmgr *hwmgr,
const struct soc15_baco_cmd_entry *entry,
const u32 array_size);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.c
new file mode 100644
index 000000000000..c0368f2dfb21
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.c
@@ -0,0 +1,196 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "fiji_baco.h"
+
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+
+#include "smu/smu_7_1_3_d.h"
+#include "smu/smu_7_1_3_sh_mask.h"
+
+
+static const struct baco_cmd_entry gpio_tbl[] =
+{
+ { CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_MASK, 0, 0, 0, 0xff77ffff },
+ { CMD_WRITE, mmDC_GPIO_DVODATA_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmDC_GPIO_DVODATA_MASK, 0, 0, 0, 0xffffffff },
+ { CMD_WRITE, mmDC_GPIO_GENERIC_EN, 0, 0, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmDC_GPIO_GENERIC_MASK, 0, 0, 0, 0x03333333 },
+ { CMD_WRITE, mmDC_GPIO_SYNCA_EN, 0, 0, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmDC_GPIO_SYNCA_MASK, 0, 0, 0, 0x00001111 }
+};
+
+static const struct baco_cmd_entry enable_fb_req_rej_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0300024 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 },
+ { CMD_WRITE, mmBIF_FB_EN, 0, 0, 0, 0x0 }
+};
+
+static const struct baco_cmd_entry use_bclk_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x4000000, 0x1a, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x2 }
+};
+
+static const struct baco_cmd_entry turn_off_plls_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK, CG_SPLL_FUNC_CNTL__SPLL_RESET__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK, CG_SPLL_FUNC_CNTL__SPLL_PWRON__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x2000000, 0x19, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x8000000, 0x1b, 0, 0x0 }
+};
+
+static const struct baco_cmd_entry clk_req_b_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x4 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMISC_CLK_CTRL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__ZCLK_SEL_MASK, MISC_CLK_CTRL__ZCLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK, CG_CLKPIN_CNTL__BCLK_AS_XCLK__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixTHM_CLK_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__CMON_CLK_SEL_MASK, THM_CLK_CNTL__CMON_CLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__TMON_CLK_SEL_MASK, THM_CLK_CNTL__TMON_CLK_SEL__SHIFT, 0, 0x1 }
+};
+
+static const struct baco_cmd_entry enter_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, 0, 5, 0x40000 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, 0, 5, 0x02 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, 0, 5, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, 0, 5, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, 0, 5, 0x08 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x40 }
+};
+
+#define BACO_CNTL__PWRGOOD_MASK BACO_CNTL__PWRGOOD_GPIO_MASK+BACO_CNTL__PWRGOOD_MEM_MASK+BACO_CNTL__PWRGOOD_DVO_MASK
+
+static const struct baco_cmd_entry exit_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x200 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x100 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 }
+};
+
+static const struct baco_cmd_entry clean_baco_tbl[] =
+{
+ { CMD_WRITE, mmBIOS_SCRATCH_0, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_1, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_2, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_3, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_4, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_5, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_6, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_8, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_9, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_10, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_11, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_12, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_13, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_14, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_15, 0, 0, 0, 0 }
+};
+
+int fiji_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
+{
+ enum BACO_STATE cur_state;
+
+ smu7_baco_get_state(hwmgr, &cur_state);
+
+ if (cur_state == state)
+ /* aisc already in the target state */
+ return 0;
+
+ if (state == BACO_STATE_IN) {
+ baco_program_registers(hwmgr, gpio_tbl, ARRAY_SIZE(gpio_tbl));
+ baco_program_registers(hwmgr, enable_fb_req_rej_tbl,
+ ARRAY_SIZE(enable_fb_req_rej_tbl));
+ baco_program_registers(hwmgr, use_bclk_tbl, ARRAY_SIZE(use_bclk_tbl));
+ baco_program_registers(hwmgr, turn_off_plls_tbl,
+ ARRAY_SIZE(turn_off_plls_tbl));
+ baco_program_registers(hwmgr, clk_req_b_tbl, ARRAY_SIZE(clk_req_b_tbl));
+ if (baco_program_registers(hwmgr, enter_baco_tbl,
+ ARRAY_SIZE(enter_baco_tbl)))
+ return 0;
+
+ } else if (state == BACO_STATE_OUT) {
+ /* HW requires at least 20ms between regulator off and on */
+ msleep(20);
+ /* Execute Hardware BACO exit sequence */
+ if (baco_program_registers(hwmgr, exit_baco_tbl,
+ ARRAY_SIZE(exit_baco_tbl))) {
+ if (baco_program_registers(hwmgr, clean_baco_tbl,
+ ARRAY_SIZE(clean_baco_tbl)))
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.h
new file mode 100644
index 000000000000..47f402900bdb
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __FIJI_BACO_H__
+#define __FIJI_BACO_H__
+#include "smu7_baco.h"
+
+extern int fiji_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index a24beaa4fb01..d2909c91d65b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -81,6 +81,8 @@ static void hwmgr_init_workload_prority(struct pp_hwmgr *hwmgr)
int hwmgr_early_init(struct pp_hwmgr *hwmgr)
{
+ struct amdgpu_device *adev;
+
if (!hwmgr)
return -EINVAL;
@@ -94,8 +96,11 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
hwmgr_init_workload_prority(hwmgr);
hwmgr->gfxoff_state_changed_by_workload = false;
+ adev = hwmgr->adev;
+
switch (hwmgr->chip_family) {
case AMDGPU_FAMILY_CI:
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
hwmgr->smumgr_funcs = &ci_smu_funcs;
ci_set_asic_special_caps(hwmgr);
hwmgr->feature_mask &= ~(PP_VBI_TIME_SUPPORT_MASK |
@@ -106,12 +111,14 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
smu7_init_function_pointers(hwmgr);
break;
case AMDGPU_FAMILY_CZ:
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
hwmgr->od_enabled = false;
hwmgr->smumgr_funcs = &smu8_smu_funcs;
hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
smu8_init_function_pointers(hwmgr);
break;
case AMDGPU_FAMILY_VI:
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
switch (hwmgr->chip_id) {
case CHIP_TOPAZ:
@@ -153,6 +160,7 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
case AMDGPU_FAMILY_AI:
switch (hwmgr->chip_id) {
case CHIP_VEGA10:
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
hwmgr->smumgr_funcs = &vega10_smu_funcs;
vega10_hwmgr_init(hwmgr);
@@ -162,6 +170,7 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
vega12_hwmgr_init(hwmgr);
break;
case CHIP_VEGA20:
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
hwmgr->smumgr_funcs = &vega20_smu_funcs;
vega20_hwmgr_init(hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.c
new file mode 100644
index 000000000000..8f8e296f2fe9
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.c
@@ -0,0 +1,222 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "polaris_baco.h"
+
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+
+#include "smu/smu_7_1_3_d.h"
+#include "smu/smu_7_1_3_sh_mask.h"
+
+static const struct baco_cmd_entry gpio_tbl[] =
+{
+ { CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_MASK, 0, 0, 0, 0xff77ffff },
+ { CMD_WRITE, mmDC_GPIO_DVODATA_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmDC_GPIO_DVODATA_MASK, 0, 0, 0, 0xffffffff },
+ { CMD_WRITE, mmDC_GPIO_GENERIC_EN, 0, 0, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmDC_GPIO_GENERIC_MASK, 0, 0, 0, 0x03333333 },
+ { CMD_WRITE, mmDC_GPIO_SYNCA_EN, 0, 0, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmDC_GPIO_SYNCA_MASK, 0, 0, 0, 0x00001111 }
+};
+
+static const struct baco_cmd_entry enable_fb_req_rej_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0300024 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 },
+ { CMD_WRITE, mmBIF_FB_EN, 0, 0, 0, 0x0 }
+};
+
+static const struct baco_cmd_entry use_bclk_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x4000000, 0x1a, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixGCK_DFS_BYPASS_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, GCK_DFS_BYPASS_CNTL__BYPASSACLK_MASK, GCK_DFS_BYPASS_CNTL__BYPASSACLK__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x2 },
+ { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL_MASK, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_MCLK_SEL_MASK, MPLL_CNTL_MODE__MPLL_MCLK_SEL__SHIFT, 0, 0x0 }
+};
+
+static const struct baco_cmd_entry turn_off_plls_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmDC_GPIO_PAD_STRENGTH_1, DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP_MASK, DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP__SHIFT, 0, 0x1 },
+ { CMD_DELAY_US, 0, 0, 0, 1, 0x0 },
+ { CMD_READMODIFYWRITE, mmMC_SEQ_DRAM, MC_SEQ_DRAM__RST_CTL_MASK, MC_SEQ_DRAM__RST_CTL__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC05002B0 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x10, 0x4, 0, 0x1 },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, 0x10, 0, 1, 0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC050032C },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x10, 0x4, 0, 0x1 },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, 0x10, 0, 1, 0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500080 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 },
+ { CMD_READMODIFYWRITE, 0xda2, 0x40, 0x6, 0, 0x0 },
+ { CMD_DELAY_US, 0, 0, 0, 3, 0x0 },
+ { CMD_READMODIFYWRITE, 0xda2, 0x8, 0x3, 0, 0x0 },
+ { CMD_READMODIFYWRITE, 0xda2, 0x3fff00, 0x8, 0, 0x32 },
+ { CMD_DELAY_US, 0, 0, 0, 3, 0x0 },
+ { CMD_READMODIFYWRITE, mmMPLL_FUNC_CNTL_2, MPLL_FUNC_CNTL_2__ISO_DIS_P_MASK, MPLL_FUNC_CNTL_2__ISO_DIS_P__SHIFT, 0, 0x0 },
+ { CMD_DELAY_US, 0, 0, 0, 5, 0x0 }
+};
+
+static const struct baco_cmd_entry clk_req_b_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixTHM_CLK_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__CMON_CLK_SEL_MASK, THM_CLK_CNTL__CMON_CLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__TMON_CLK_SEL_MASK, THM_CLK_CNTL__TMON_CLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMISC_CLK_CTRL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__ZCLK_SEL_MASK, MISC_CLK_CTRL__ZCLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK, CG_CLKPIN_CNTL__BCLK_AS_XCLK__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x4 }
+};
+
+static const struct baco_cmd_entry enter_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, 0, 5, 0x40000 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, 0, 5, 0x02 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, 0, 5, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, 0, 5, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, 0, 5, 0x08 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x40 }
+};
+
+#define BACO_CNTL__PWRGOOD_MASK BACO_CNTL__PWRGOOD_GPIO_MASK+BACO_CNTL__PWRGOOD_MEM_MASK+BACO_CNTL__PWRGOOD_DVO_MASK
+
+static const struct baco_cmd_entry exit_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x200 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x100 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 }
+};
+
+static const struct baco_cmd_entry clean_baco_tbl[] =
+{
+ { CMD_WRITE, mmBIOS_SCRATCH_6, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 }
+};
+
+static const struct baco_cmd_entry use_bclk_tbl_vg[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x4000000, 0x1a, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixGCK_DFS_BYPASS_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, GCK_DFS_BYPASS_CNTL__BYPASSACLK_MASK, GCK_DFS_BYPASS_CNTL__BYPASSACLK__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x2 }
+};
+
+static const struct baco_cmd_entry turn_off_plls_tbl_vg[] =
+{
+ { CMD_READMODIFYWRITE, mmDC_GPIO_PAD_STRENGTH_1, DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP_MASK, DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP__SHIFT, 0, 0x1 },
+ { CMD_DELAY_US, 0, 0, 0, 1, 0x0 },
+ { CMD_READMODIFYWRITE, mmMC_SEQ_DRAM, MC_SEQ_DRAM__RST_CTL_MASK, MC_SEQ_DRAM__RST_CTL__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC05002B0 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x10, 0x4, 0, 0x1 },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, 0x10, 0, 1, 0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC050032C },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x10, 0x4, 0, 0x1 },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, 0x10, 0, 1, 0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500080 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 },
+ { CMD_DELAY_US, 0, 0, 0, 3, 0x0 },
+ { CMD_DELAY_US, 0, 0, 0, 3, 0x0 },
+ { CMD_DELAY_US, 0, 0, 0, 5, 0x0 }
+};
+
+int polaris_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
+{
+ enum BACO_STATE cur_state;
+
+ smu7_baco_get_state(hwmgr, &cur_state);
+
+ if (cur_state == state)
+ /* aisc already in the target state */
+ return 0;
+
+ if (state == BACO_STATE_IN) {
+ baco_program_registers(hwmgr, gpio_tbl, ARRAY_SIZE(gpio_tbl));
+ baco_program_registers(hwmgr, enable_fb_req_rej_tbl,
+ ARRAY_SIZE(enable_fb_req_rej_tbl));
+ if (hwmgr->chip_id == CHIP_VEGAM) {
+ baco_program_registers(hwmgr, use_bclk_tbl_vg, ARRAY_SIZE(use_bclk_tbl_vg));
+ baco_program_registers(hwmgr, turn_off_plls_tbl_vg,
+ ARRAY_SIZE(turn_off_plls_tbl_vg));
+ } else {
+ baco_program_registers(hwmgr, use_bclk_tbl, ARRAY_SIZE(use_bclk_tbl));
+ baco_program_registers(hwmgr, turn_off_plls_tbl,
+ ARRAY_SIZE(turn_off_plls_tbl));
+ }
+ baco_program_registers(hwmgr, clk_req_b_tbl, ARRAY_SIZE(clk_req_b_tbl));
+ if (baco_program_registers(hwmgr, enter_baco_tbl,
+ ARRAY_SIZE(enter_baco_tbl)))
+ return 0;
+
+ } else if (state == BACO_STATE_OUT) {
+ /* HW requires at least 20ms between regulator off and on */
+ msleep(20);
+ /* Execute Hardware BACO exit sequence */
+ if (baco_program_registers(hwmgr, exit_baco_tbl,
+ ARRAY_SIZE(exit_baco_tbl))) {
+ if (baco_program_registers(hwmgr, clean_baco_tbl,
+ ARRAY_SIZE(clean_baco_tbl)))
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.h
new file mode 100644
index 000000000000..87a5fa0a157a
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __POLARIS_BACO_H__
+#define __POLARIS_BACO_H__
+#include "smu7_baco.h"
+
+extern int polaris_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.c
new file mode 100644
index 000000000000..044cda005aed
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "smu7_baco.h"
+#include "tonga_baco.h"
+#include "fiji_baco.h"
+#include "polaris_baco.h"
+#include "ci_baco.h"
+
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+
+#include "smu/smu_7_1_2_d.h"
+#include "smu/smu_7_1_2_sh_mask.h"
+
+int smu7_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
+ uint32_t reg;
+
+ *cap = false;
+ if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_BACO))
+ return 0;
+
+ reg = RREG32(mmCC_BIF_BX_FUSESTRAP0);
+
+ if (reg & CC_BIF_BX_FUSESTRAP0__STRAP_BIF_PX_CAPABLE_MASK)
+ *cap = true;
+
+ return 0;
+}
+
+int smu7_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
+ uint32_t reg;
+
+ reg = RREG32(mmBACO_CNTL);
+
+ if (reg & BACO_CNTL__BACO_MODE_MASK)
+ /* gfx has already entered BACO state */
+ *state = BACO_STATE_IN;
+ else
+ *state = BACO_STATE_OUT;
+ return 0;
+}
+
+int smu7_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
+
+ switch (adev->asic_type) {
+ case CHIP_TOPAZ:
+ case CHIP_TONGA:
+ return tonga_baco_set_state(hwmgr, state);
+ case CHIP_FIJI:
+ return fiji_baco_set_state(hwmgr, state);
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
+ case CHIP_VEGAM:
+ return polaris_baco_set_state(hwmgr, state);
+#ifdef CONFIG_DRM_AMDGPU_CIK
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ return ci_baco_set_state(hwmgr, state);
+#endif
+ default:
+ return -EINVAL;
+ }
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.h
new file mode 100644
index 000000000000..be0d98abb536
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __SMU7_BACO_H__
+#define __SMU7_BACO_H__
+#include "hwmgr.h"
+#include "common_baco.h"
+
+extern int smu7_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap);
+extern int smu7_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state);
+extern int smu7_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 34f95e0e3ea4..f73dff68e799 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -48,6 +48,7 @@
#include "smu7_clockpowergating.h"
#include "processpptables.h"
#include "pp_thermal.h"
+#include "smu7_baco.h"
#include "ivsrcid/ivsrcid_vislands30.h"
@@ -1994,7 +1995,6 @@ static int smu7_sort_lookup_table(struct pp_hwmgr *hwmgr,
struct phm_ppt_v1_voltage_lookup_table *lookup_table)
{
uint32_t table_size, i, j;
- struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
table_size = lookup_table->count;
PP_ASSERT_WITH_CODE(0 != lookup_table->count,
@@ -2005,9 +2005,8 @@ static int smu7_sort_lookup_table(struct pp_hwmgr *hwmgr,
for (j = i + 1; j > 0; j--) {
if (lookup_table->entries[j].us_vdd <
lookup_table->entries[j - 1].us_vdd) {
- tmp_voltage_lookup_record = lookup_table->entries[j - 1];
- lookup_table->entries[j - 1] = lookup_table->entries[j];
- lookup_table->entries[j] = tmp_voltage_lookup_record;
+ swap(lookup_table->entries[j - 1],
+ lookup_table->entries[j]);
}
}
}
@@ -3478,18 +3477,31 @@ static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr,
static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query)
{
+ struct amdgpu_device *adev = hwmgr->adev;
int i;
u32 tmp = 0;
if (!query)
return -EINVAL;
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0);
- tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
- *query = tmp;
+ /*
+ * PPSMC_MSG_GetCurrPkgPwr is not supported on:
+ * - Hawaii
+ * - Bonaire
+ * - Fiji
+ * - Tonga
+ */
+ if ((adev->asic_type != CHIP_HAWAII) &&
+ (adev->asic_type != CHIP_BONAIRE) &&
+ (adev->asic_type != CHIP_FIJI) &&
+ (adev->asic_type != CHIP_TONGA)) {
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0);
+ tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+ *query = tmp;
- if (tmp != 0)
- return 0;
+ if (tmp != 0)
+ return 0;
+ }
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
@@ -3970,6 +3982,13 @@ static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
"Failed to populate and upload SCLK MCLK DPM levels!",
result = tmp_result);
+ /*
+ * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag.
+ * That effectively disables AVFS feature.
+ */
+ if (hwmgr->hardcode_pp_table != NULL)
+ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
+
tmp_result = smu7_update_avfs(hwmgr);
PP_ASSERT_WITH_CODE((0 == tmp_result),
"Failed to update avfs voltages!",
@@ -5145,6 +5164,9 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
.get_power_profile_mode = smu7_get_power_profile_mode,
.set_power_profile_mode = smu7_set_power_profile_mode,
.get_performance_level = smu7_get_performance_level,
+ .get_asic_baco_capability = smu7_baco_get_capability,
+ .get_asic_baco_state = smu7_baco_get_state,
+ .set_asic_baco_state = smu7_baco_set_state,
.power_off_asic = smu7_power_off_asic,
};
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.c
new file mode 100644
index 000000000000..ea743bea8e29
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.c
@@ -0,0 +1,231 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "tonga_baco.h"
+
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+
+#include "smu/smu_7_1_2_d.h"
+#include "smu/smu_7_1_2_sh_mask.h"
+
+
+static const struct baco_cmd_entry gpio_tbl[] =
+{
+ { CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_MASK, 0, 0, 0, 0xff77ffff },
+ { CMD_WRITE, mmDC_GPIO_DVODATA_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmDC_GPIO_DVODATA_MASK, 0, 0, 0, 0xffffffff },
+ { CMD_WRITE, mmDC_GPIO_GENERIC_EN, 0, 0, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmDC_GPIO_GENERIC_MASK, 0, 0, 0, 0x03333333 },
+ { CMD_WRITE, mmDC_GPIO_SYNCA_EN, 0, 0, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmDC_GPIO_SYNCA_MASK, 0, 0, 0, 0x00001111 }
+};
+
+static const struct baco_cmd_entry enable_fb_req_rej_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0300024 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 },
+ { CMD_WRITE, mmBIF_FB_EN, 0, 0, 0, 0x0 }
+};
+
+static const struct baco_cmd_entry use_bclk_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x4000000, 0x1a, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x2 },
+ { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL_MASK, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_MCLK_SEL_MASK, MPLL_CNTL_MODE__MPLL_MCLK_SEL__SHIFT, 0, 0x0 }
+};
+
+static const struct baco_cmd_entry turn_off_plls_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK, CG_SPLL_FUNC_CNTL__SPLL_RESET__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK, CG_SPLL_FUNC_CNTL__SPLL_PWRON__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x2000000, 0x19, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x8000000, 0x1b, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__GLOBAL_MPLL_RESET_MASK, MPLL_CNTL_MODE__GLOBAL_MPLL_RESET__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmMPLL_CONTROL, 0, 0, 0, 0x00000006 },
+ { CMD_WRITE, mmMC_IO_RXCNTL_DPHY0_D0, 0, 0, 0, 0x00007740 },
+ { CMD_WRITE, mmMC_IO_RXCNTL_DPHY0_D1, 0, 0, 0, 0x00007740 },
+ { CMD_WRITE, mmMC_IO_RXCNTL_DPHY1_D0, 0, 0, 0, 0x00007740 },
+ { CMD_WRITE, mmMC_IO_RXCNTL_DPHY1_D1, 0, 0, 0, 0x00007740 },
+ { CMD_READMODIFYWRITE, mmMCLK_PWRMGT_CNTL, MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK, MCLK_PWRMGT_CNTL__MRDCK0_PDNB__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmMCLK_PWRMGT_CNTL, MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK, MCLK_PWRMGT_CNTL__MRDCK1_PDNB__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmMC_SEQ_CNTL_2, MC_SEQ_CNTL_2__DRST_PU_MASK, MC_SEQ_CNTL_2__DRST_PU__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmMC_SEQ_CNTL_2, MC_SEQ_CNTL_2__DRST_PD_MASK, MC_SEQ_CNTL_2__DRST_PD__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x4 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMISC_CLK_CTRL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__ZCLK_SEL_MASK, MISC_CLK_CTRL__ZCLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK, CG_CLKPIN_CNTL__BCLK_AS_XCLK__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixTHM_CLK_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__CMON_CLK_SEL_MASK, THM_CLK_CNTL__CMON_CLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__TMON_CLK_SEL_MASK, THM_CLK_CNTL__TMON_CLK_SEL__SHIFT, 0, 0x1 }
+};
+
+static const struct baco_cmd_entry enter_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, 0, 5, 0x40000 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, 0, 5, 0x02 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, 0, 5, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, 0, 5, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, 0, 5, 0x08 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x40 }
+};
+
+#define BACO_CNTL__PWRGOOD_MASK BACO_CNTL__PWRGOOD_GPIO_MASK+BACO_CNTL__PWRGOOD_MEM_MASK+BACO_CNTL__PWRGOOD_DVO_MASK
+
+static const struct baco_cmd_entry exit_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x200 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x100 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 }
+};
+
+static const struct baco_cmd_entry clean_baco_tbl[] =
+{
+ { CMD_WRITE, mmBIOS_SCRATCH_6, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 }
+};
+
+static const struct baco_cmd_entry gpio_tbl_iceland[] =
+{
+ { CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_MASK, 0, 0, 0, 0xff77ffff }
+};
+
+static const struct baco_cmd_entry exit_baco_tbl_iceland[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 },
+ { CMD_DELAY_MS, 0, 0, 0, 20, 0 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x200 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x100 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 }
+};
+
+static const struct baco_cmd_entry clean_baco_tbl_iceland[] =
+{
+ { CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 }
+};
+
+int tonga_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
+{
+ enum BACO_STATE cur_state;
+
+ smu7_baco_get_state(hwmgr, &cur_state);
+
+ if (cur_state == state)
+ /* aisc already in the target state */
+ return 0;
+
+ if (state == BACO_STATE_IN) {
+ if (hwmgr->chip_id == CHIP_TOPAZ)
+ baco_program_registers(hwmgr, gpio_tbl_iceland, ARRAY_SIZE(gpio_tbl_iceland));
+ else
+ baco_program_registers(hwmgr, gpio_tbl, ARRAY_SIZE(gpio_tbl));
+ baco_program_registers(hwmgr, enable_fb_req_rej_tbl,
+ ARRAY_SIZE(enable_fb_req_rej_tbl));
+ baco_program_registers(hwmgr, use_bclk_tbl, ARRAY_SIZE(use_bclk_tbl));
+ baco_program_registers(hwmgr, turn_off_plls_tbl,
+ ARRAY_SIZE(turn_off_plls_tbl));
+ if (baco_program_registers(hwmgr, enter_baco_tbl,
+ ARRAY_SIZE(enter_baco_tbl)))
+ return 0;
+
+ } else if (state == BACO_STATE_OUT) {
+ /* HW requires at least 20ms between regulator off and on */
+ msleep(20);
+ /* Execute Hardware BACO exit sequence */
+ if (hwmgr->chip_id == CHIP_TOPAZ) {
+ if (baco_program_registers(hwmgr, exit_baco_tbl_iceland,
+ ARRAY_SIZE(exit_baco_tbl_iceland))) {
+ if (baco_program_registers(hwmgr, clean_baco_tbl_iceland,
+ ARRAY_SIZE(clean_baco_tbl_iceland)))
+ return 0;
+ }
+ } else {
+ if (baco_program_registers(hwmgr, exit_baco_tbl,
+ ARRAY_SIZE(exit_baco_tbl))) {
+ if (baco_program_registers(hwmgr, clean_baco_tbl,
+ ARRAY_SIZE(clean_baco_tbl)))
+ return 0;
+ }
+ }
+ }
+
+ return -EINVAL;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.h
new file mode 100644
index 000000000000..5dc16cc8a295
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __TONGA_BACO_H__
+#define __TONGA_BACO_H__
+#include "smu7_baco.h"
+
+extern int tonga_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index d08493b67b67..d71a492c87a3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -712,7 +712,6 @@ static int vega10_sort_lookup_table(struct pp_hwmgr *hwmgr,
struct phm_ppt_v1_voltage_lookup_table *lookup_table)
{
uint32_t table_size, i, j;
- struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
PP_ASSERT_WITH_CODE(lookup_table && lookup_table->count,
"Lookup table is empty", return -EINVAL);
@@ -724,9 +723,8 @@ static int vega10_sort_lookup_table(struct pp_hwmgr *hwmgr,
for (j = i + 1; j > 0; j--) {
if (lookup_table->entries[j].us_vdd <
lookup_table->entries[j - 1].us_vdd) {
- tmp_voltage_lookup_record = lookup_table->entries[j - 1];
- lookup_table->entries[j - 1] = lookup_table->entries[j];
- lookup_table->entries[j] = tmp_voltage_lookup_record;
+ swap(lookup_table->entries[j - 1],
+ lookup_table->entries[j]);
}
}
}
@@ -3691,6 +3689,13 @@ static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr,
PP_ASSERT_WITH_CODE(!result,
"Failed to upload PPtable!", return result);
+ /*
+ * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag.
+ * That effectively disables AVFS feature.
+ */
+ if(hwmgr->hardcode_pp_table != NULL)
+ data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
+
vega10_update_avfs(hwmgr);
/*
@@ -5098,9 +5103,7 @@ static void vega10_odn_update_soc_table(struct pp_hwmgr *hwmgr,
if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk;
- for (i = 0; i < podn_vdd_dep->count - 1; i++)
- od_vddc_lookup_table->entries[i].us_vdd = podn_vdd_dep->entries[i].vddc;
- if (od_vddc_lookup_table->entries[i].us_vdd < podn_vdd_dep->entries[i].vddc)
+ for (i = 0; i < podn_vdd_dep->count; i++)
od_vddc_lookup_table->entries[i].us_vdd = podn_vdd_dep->entries[i].vddc;
} else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) {
podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk;
@@ -5267,6 +5270,59 @@ static int vega10_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_
return 0;
}
+static int vega10_disable_power_features_for_compute_performance(struct pp_hwmgr *hwmgr, bool disable)
+{
+ struct vega10_hwmgr *data = hwmgr->backend;
+ uint32_t feature_mask = 0;
+
+ if (disable) {
+ feature_mask |= data->smu_features[GNLD_ULV].enabled ?
+ data->smu_features[GNLD_ULV].smu_feature_bitmap : 0;
+ feature_mask |= data->smu_features[GNLD_DS_GFXCLK].enabled ?
+ data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap : 0;
+ feature_mask |= data->smu_features[GNLD_DS_SOCCLK].enabled ?
+ data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap : 0;
+ feature_mask |= data->smu_features[GNLD_DS_LCLK].enabled ?
+ data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap : 0;
+ feature_mask |= data->smu_features[GNLD_DS_DCEFCLK].enabled ?
+ data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap : 0;
+ } else {
+ feature_mask |= (!data->smu_features[GNLD_ULV].enabled) ?
+ data->smu_features[GNLD_ULV].smu_feature_bitmap : 0;
+ feature_mask |= (!data->smu_features[GNLD_DS_GFXCLK].enabled) ?
+ data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap : 0;
+ feature_mask |= (!data->smu_features[GNLD_DS_SOCCLK].enabled) ?
+ data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap : 0;
+ feature_mask |= (!data->smu_features[GNLD_DS_LCLK].enabled) ?
+ data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap : 0;
+ feature_mask |= (!data->smu_features[GNLD_DS_DCEFCLK].enabled) ?
+ data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap : 0;
+ }
+
+ if (feature_mask)
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
+ !disable, feature_mask),
+ "enable/disable power features for compute performance Failed!",
+ return -EINVAL);
+
+ if (disable) {
+ data->smu_features[GNLD_ULV].enabled = false;
+ data->smu_features[GNLD_DS_GFXCLK].enabled = false;
+ data->smu_features[GNLD_DS_SOCCLK].enabled = false;
+ data->smu_features[GNLD_DS_LCLK].enabled = false;
+ data->smu_features[GNLD_DS_DCEFCLK].enabled = false;
+ } else {
+ data->smu_features[GNLD_ULV].enabled = true;
+ data->smu_features[GNLD_DS_GFXCLK].enabled = true;
+ data->smu_features[GNLD_DS_SOCCLK].enabled = true;
+ data->smu_features[GNLD_DS_LCLK].enabled = true;
+ data->smu_features[GNLD_DS_DCEFCLK].enabled = true;
+ }
+
+ return 0;
+
+}
+
static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
.backend_init = vega10_hwmgr_backend_init,
.backend_fini = vega10_hwmgr_backend_fini,
@@ -5334,6 +5390,8 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
.get_ppfeature_status = vega10_get_ppfeature_status,
.set_ppfeature_status = vega10_set_ppfeature_status,
.set_mp1_state = vega10_set_mp1_state,
+ .disable_power_features_for_compute_performance =
+ vega10_disable_power_features_for_compute_performance,
};
int vega10_hwmgr_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c
index df6ff9252401..9b5e72bdceca 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c
@@ -29,7 +29,7 @@
#include "vega20_baco.h"
#include "vega20_smumgr.h"
-
+#include "amdgpu_ras.h"
static const struct soc15_baco_cmd_entry clean_baco_tbl[] =
{
@@ -74,6 +74,7 @@ int vega20_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state)
int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
enum BACO_STATE cur_state;
uint32_t data;
@@ -84,13 +85,19 @@ int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
return 0;
if (state == BACO_STATE_IN) {
- data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL);
- data |= 0x80000000;
- WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data);
-
-
- if(smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnterBaco, 0))
- return -EINVAL;
+ if (!ras || !ras->supported) {
+ data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL);
+ data |= 0x80000000;
+ WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data);
+
+ if(smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_EnterBaco, 0))
+ return -EINVAL;
+ } else {
+ if(smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_EnterBaco, 1))
+ return -EINVAL;
+ }
} else if (state == BACO_STATE_OUT) {
if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ExitBaco))
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index f5915308e643..5bcf0d684151 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -183,6 +183,9 @@ static int vega20_set_features_platform_caps(struct pp_hwmgr *hwmgr)
PHM_PlatformCaps_TablelessHardwareInterface);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_BACO);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_EnableSMU7ThermalManagement);
if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
@@ -490,8 +493,8 @@ static int vega20_setup_asic_task(struct pp_hwmgr *hwmgr)
"Failed to init sclk threshold!",
return ret);
- if (adev->in_baco_reset) {
- adev->in_baco_reset = 0;
+ if (adev->in_gpu_reset &&
+ (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) {
ret = vega20_baco_apply_vdci_flush_workaround(hwmgr);
if (ret)
@@ -4155,6 +4158,38 @@ static int vega20_smu_i2c_bus_access(struct pp_hwmgr *hwmgr, bool acquire)
return res;
}
+static int vega20_set_df_cstate(struct pp_hwmgr *hwmgr,
+ enum pp_df_cstate state)
+{
+ int ret;
+
+ /* PPSMC_MSG_DFCstateControl is supported with 40.50 and later fws */
+ if (hwmgr->smu_version < 0x283200) {
+ pr_err("Df cstate control is supported with 40.50 and later SMC fw!\n");
+ return -EINVAL;
+ }
+
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DFCstateControl, state);
+ if (ret)
+ pr_err("SetDfCstate failed!\n");
+
+ return ret;
+}
+
+static int vega20_set_xgmi_pstate(struct pp_hwmgr *hwmgr,
+ uint32_t pstate)
+{
+ int ret;
+
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetXgmiMode,
+ pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3);
+ if (ret)
+ pr_err("SetXgmiPstate failed!\n");
+
+ return ret;
+}
+
static const struct pp_hwmgr_func vega20_hwmgr_funcs = {
/* init/fini related */
.backend_init = vega20_hwmgr_backend_init,
@@ -4223,6 +4258,8 @@ static const struct pp_hwmgr_func vega20_hwmgr_funcs = {
.set_asic_baco_state = vega20_baco_set_state,
.set_mp1_state = vega20_set_mp1_state,
.smu_i2c_bus_access = vega20_smu_i2c_bus_access,
+ .set_df_cstate = vega20_set_df_cstate,
+ .set_xgmi_pstate = vega20_set_xgmi_pstate,
};
int vega20_hwmgr_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index 6109815a0401..031e0c22fcc7 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -259,10 +259,8 @@ struct smu_table_context
struct smu_bios_boot_up_values boot_values;
void *driver_pptable;
struct smu_table *tables;
- uint32_t table_count;
struct smu_table memory_pool;
uint8_t thermal_controller_type;
- uint16_t TDPODLimit;
void *overdrive_table;
};
@@ -322,6 +320,13 @@ struct mclock_latency_table {
struct mclk_latency_entries entries[MAX_REGULAR_DPM_NUM];
};
+enum smu_reset_mode
+{
+ SMU_RESET_MODE_0,
+ SMU_RESET_MODE_1,
+ SMU_RESET_MODE_2,
+};
+
enum smu_baco_state
{
SMU_BACO_STATE_ENTER = 0,
@@ -341,9 +346,9 @@ struct smu_context
struct amdgpu_device *adev;
struct amdgpu_irq_src *irq_source;
- const struct smu_funcs *funcs;
const struct pptable_funcs *ppt_funcs;
struct mutex mutex;
+ struct mutex sensor_lock;
uint64_t pool_size;
struct smu_table_context smu_table;
@@ -381,11 +386,15 @@ struct smu_context
uint32_t power_profile_mode;
uint32_t default_power_profile_mode;
bool pm_enabled;
+ bool is_apu;
uint32_t smc_if_version;
+ bool uploading_custom_pp_table;
};
+struct i2c_adapter;
+
struct pptable_funcs {
int (*alloc_dpm_context)(struct smu_context *smu);
int (*store_powerplay_table)(struct smu_context *smu);
@@ -397,7 +406,7 @@ struct pptable_funcs {
int (*get_smu_table_index)(struct smu_context *smu, uint32_t index);
int (*get_smu_power_index)(struct smu_context *smu, uint32_t index);
int (*get_workload_type)(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile);
- int (*run_afll_btc)(struct smu_context *smu);
+ int (*run_btc)(struct smu_context *smu);
int (*get_allowed_feature_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num);
enum amd_pm_state_type (*get_current_power_state)(struct smu_context *smu);
int (*set_default_dpm_table)(struct smu_context *smu);
@@ -458,17 +467,19 @@ struct pptable_funcs {
int (*display_disable_memory_clock_switch)(struct smu_context *smu, bool disable_memory_clock_switch);
void (*dump_pptable)(struct smu_context *smu);
int (*get_power_limit)(struct smu_context *smu, uint32_t *limit, bool asic_default);
- int (*get_dpm_uclk_limited)(struct smu_context *smu, uint32_t *clock, bool max);
-};
-
-struct smu_funcs
-{
+ int (*get_dpm_clk_limited)(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t dpm_level, uint32_t *freq);
+ int (*set_df_cstate)(struct smu_context *smu, enum pp_df_cstate state);
+ int (*update_pcie_parameters)(struct smu_context *smu, uint32_t pcie_gen_cap, uint32_t pcie_width_cap);
+ int (*i2c_eeprom_init)(struct i2c_adapter *control);
+ void (*i2c_eeprom_fini)(struct i2c_adapter *control);
+ int (*get_dpm_clock_table)(struct smu_context *smu, struct dpm_clocks *clock_table);
int (*init_microcode)(struct smu_context *smu);
+ int (*load_microcode)(struct smu_context *smu);
int (*init_smc_tables)(struct smu_context *smu);
int (*fini_smc_tables)(struct smu_context *smu);
int (*init_power)(struct smu_context *smu);
int (*fini_power)(struct smu_context *smu);
- int (*load_microcode)(struct smu_context *smu);
int (*check_fw_status)(struct smu_context *smu);
int (*setup_pptable)(struct smu_context *smu);
int (*get_vbios_bootup_values)(struct smu_context *smu);
@@ -484,7 +495,6 @@ struct smu_funcs
int (*set_min_dcef_deep_sleep)(struct smu_context *smu);
int (*set_tool_table_location)(struct smu_context *smu);
int (*notify_memory_pool_location)(struct smu_context *smu);
- int (*write_watermarks_table)(struct smu_context *smu);
int (*set_last_dcef_min_deep_sleep_clk)(struct smu_context *smu);
int (*system_features_control)(struct smu_context *smu, bool en);
int (*send_smc_msg)(struct smu_context *smu, uint16_t msg);
@@ -498,8 +508,7 @@ struct smu_funcs
int (*get_current_clk_freq)(struct smu_context *smu, enum smu_clk_type clk_id, uint32_t *value);
int (*init_max_sustainable_clocks)(struct smu_context *smu);
int (*start_thermal_control)(struct smu_context *smu);
- int (*read_sensor)(struct smu_context *smu, enum amd_pp_sensors sensor,
- void *data, uint32_t *size);
+ int (*stop_thermal_control)(struct smu_context *smu);
int (*set_deep_sleep_dcefclk)(struct smu_context *smu, uint32_t clk);
int (*set_active_display_count)(struct smu_context *smu, uint32_t count);
int (*store_cc6_data)(struct smu_context *smu, uint32_t separation_time,
@@ -521,8 +530,6 @@ struct smu_funcs
int (*get_current_shallow_sleep_clocks)(struct smu_context *smu,
struct smu_clock_info *clocks);
int (*notify_smu_enable_pwe)(struct smu_context *smu);
- int (*set_watermarks_for_clock_ranges)(struct smu_context *smu,
- struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges);
int (*conv_power_profile_to_pplib_workload)(int power_profile);
uint32_t (*get_fan_control_mode)(struct smu_context *smu);
int (*set_fan_control_mode)(struct smu_context *smu, uint32_t mode);
@@ -537,234 +544,90 @@ struct smu_funcs
enum smu_baco_state (*baco_get_state)(struct smu_context *smu);
int (*baco_set_state)(struct smu_context *smu, enum smu_baco_state state);
int (*baco_reset)(struct smu_context *smu);
+ int (*mode2_reset)(struct smu_context *smu);
int (*get_dpm_ultimate_freq)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max);
+ int (*set_soft_freq_limited_range)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max);
+ int (*override_pcie_parameters)(struct smu_context *smu);
+ uint32_t (*get_pptable_power_limit)(struct smu_context *smu);
};
-#define smu_init_microcode(smu) \
- ((smu)->funcs->init_microcode ? (smu)->funcs->init_microcode((smu)) : 0)
-#define smu_init_smc_tables(smu) \
- ((smu)->funcs->init_smc_tables ? (smu)->funcs->init_smc_tables((smu)) : 0)
-#define smu_fini_smc_tables(smu) \
- ((smu)->funcs->fini_smc_tables ? (smu)->funcs->fini_smc_tables((smu)) : 0)
-#define smu_init_power(smu) \
- ((smu)->funcs->init_power ? (smu)->funcs->init_power((smu)) : 0)
-#define smu_fini_power(smu) \
- ((smu)->funcs->fini_power ? (smu)->funcs->fini_power((smu)) : 0)
-#define smu_load_microcode(smu) \
- ((smu)->funcs->load_microcode ? (smu)->funcs->load_microcode((smu)) : 0)
-#define smu_check_fw_status(smu) \
- ((smu)->funcs->check_fw_status ? (smu)->funcs->check_fw_status((smu)) : 0)
-#define smu_setup_pptable(smu) \
- ((smu)->funcs->setup_pptable ? (smu)->funcs->setup_pptable((smu)) : 0)
-#define smu_powergate_sdma(smu, gate) \
- ((smu)->funcs->powergate_sdma ? (smu)->funcs->powergate_sdma((smu), (gate)) : 0)
-#define smu_powergate_vcn(smu, gate) \
- ((smu)->funcs->powergate_vcn ? (smu)->funcs->powergate_vcn((smu), (gate)) : 0)
-#define smu_set_gfx_cgpg(smu, enabled) \
- ((smu)->funcs->set_gfx_cgpg ? (smu)->funcs->set_gfx_cgpg((smu), (enabled)) : 0)
-#define smu_get_vbios_bootup_values(smu) \
- ((smu)->funcs->get_vbios_bootup_values ? (smu)->funcs->get_vbios_bootup_values((smu)) : 0)
-#define smu_get_clk_info_from_vbios(smu) \
- ((smu)->funcs->get_clk_info_from_vbios ? (smu)->funcs->get_clk_info_from_vbios((smu)) : 0)
-#define smu_check_pptable(smu) \
- ((smu)->funcs->check_pptable ? (smu)->funcs->check_pptable((smu)) : 0)
-#define smu_parse_pptable(smu) \
- ((smu)->funcs->parse_pptable ? (smu)->funcs->parse_pptable((smu)) : 0)
-#define smu_populate_smc_tables(smu) \
- ((smu)->funcs->populate_smc_tables ? (smu)->funcs->populate_smc_tables((smu)) : 0)
-#define smu_check_fw_version(smu) \
- ((smu)->funcs->check_fw_version ? (smu)->funcs->check_fw_version((smu)) : 0)
-#define smu_write_pptable(smu) \
- ((smu)->funcs->write_pptable ? (smu)->funcs->write_pptable((smu)) : 0)
-#define smu_set_min_dcef_deep_sleep(smu) \
- ((smu)->funcs->set_min_dcef_deep_sleep ? (smu)->funcs->set_min_dcef_deep_sleep((smu)) : 0)
-#define smu_set_tool_table_location(smu) \
- ((smu)->funcs->set_tool_table_location ? (smu)->funcs->set_tool_table_location((smu)) : 0)
-#define smu_notify_memory_pool_location(smu) \
- ((smu)->funcs->notify_memory_pool_location ? (smu)->funcs->notify_memory_pool_location((smu)) : 0)
-#define smu_gfx_off_control(smu, enable) \
- ((smu)->funcs->gfx_off_control ? (smu)->funcs->gfx_off_control((smu), (enable)) : 0)
-
-#define smu_write_watermarks_table(smu) \
- ((smu)->funcs->write_watermarks_table ? (smu)->funcs->write_watermarks_table((smu)) : 0)
-#define smu_set_last_dcef_min_deep_sleep_clk(smu) \
- ((smu)->funcs->set_last_dcef_min_deep_sleep_clk ? (smu)->funcs->set_last_dcef_min_deep_sleep_clk((smu)) : 0)
-#define smu_system_features_control(smu, en) \
- ((smu)->funcs->system_features_control ? (smu)->funcs->system_features_control((smu), (en)) : 0)
-#define smu_init_max_sustainable_clocks(smu) \
- ((smu)->funcs->init_max_sustainable_clocks ? (smu)->funcs->init_max_sustainable_clocks((smu)) : 0)
-#define smu_set_default_od_settings(smu, initialize) \
- ((smu)->ppt_funcs->set_default_od_settings ? (smu)->ppt_funcs->set_default_od_settings((smu), (initialize)) : 0)
-#define smu_set_fan_speed_rpm(smu, speed) \
- ((smu)->funcs->set_fan_speed_rpm ? (smu)->funcs->set_fan_speed_rpm((smu), (speed)) : 0)
-#define smu_send_smc_msg(smu, msg) \
- ((smu)->funcs->send_smc_msg? (smu)->funcs->send_smc_msg((smu), (msg)) : 0)
-#define smu_send_smc_msg_with_param(smu, msg, param) \
- ((smu)->funcs->send_smc_msg_with_param? (smu)->funcs->send_smc_msg_with_param((smu), (msg), (param)) : 0)
-#define smu_read_smc_arg(smu, arg) \
- ((smu)->funcs->read_smc_arg? (smu)->funcs->read_smc_arg((smu), (arg)) : 0)
-#define smu_alloc_dpm_context(smu) \
- ((smu)->ppt_funcs->alloc_dpm_context ? (smu)->ppt_funcs->alloc_dpm_context((smu)) : 0)
-#define smu_init_display_count(smu, count) \
- ((smu)->funcs->init_display_count ? (smu)->funcs->init_display_count((smu), (count)) : 0)
-#define smu_feature_set_allowed_mask(smu) \
- ((smu)->funcs->set_allowed_mask? (smu)->funcs->set_allowed_mask((smu)) : 0)
-#define smu_feature_get_enabled_mask(smu, mask, num) \
- ((smu)->funcs->get_enabled_mask? (smu)->funcs->get_enabled_mask((smu), (mask), (num)) : 0)
-#define smu_is_dpm_running(smu) \
- ((smu)->ppt_funcs->is_dpm_running ? (smu)->ppt_funcs->is_dpm_running((smu)) : 0)
-#define smu_notify_display_change(smu) \
- ((smu)->funcs->notify_display_change? (smu)->funcs->notify_display_change((smu)) : 0)
-#define smu_store_powerplay_table(smu) \
- ((smu)->ppt_funcs->store_powerplay_table ? (smu)->ppt_funcs->store_powerplay_table((smu)) : 0)
-#define smu_check_powerplay_table(smu) \
- ((smu)->ppt_funcs->check_powerplay_table ? (smu)->ppt_funcs->check_powerplay_table((smu)) : 0)
-#define smu_append_powerplay_table(smu) \
- ((smu)->ppt_funcs->append_powerplay_table ? (smu)->ppt_funcs->append_powerplay_table((smu)) : 0)
-#define smu_set_default_dpm_table(smu) \
- ((smu)->ppt_funcs->set_default_dpm_table ? (smu)->ppt_funcs->set_default_dpm_table((smu)) : 0)
-#define smu_populate_umd_state_clk(smu) \
- ((smu)->ppt_funcs->populate_umd_state_clk ? (smu)->ppt_funcs->populate_umd_state_clk((smu)) : 0)
-#define smu_set_default_od8_settings(smu) \
- ((smu)->ppt_funcs->set_default_od8_settings ? (smu)->ppt_funcs->set_default_od8_settings((smu)) : 0)
-#define smu_get_power_limit(smu, limit, def) \
- ((smu)->ppt_funcs->get_power_limit ? (smu)->ppt_funcs->get_power_limit((smu), (limit), (def)) : 0)
-#define smu_set_power_limit(smu, limit) \
- ((smu)->funcs->set_power_limit ? (smu)->funcs->set_power_limit((smu), (limit)) : 0)
-#define smu_get_current_clk_freq(smu, clk_id, value) \
- ((smu)->funcs->get_current_clk_freq? (smu)->funcs->get_current_clk_freq((smu), (clk_id), (value)) : 0)
-#define smu_print_clk_levels(smu, clk_type, buf) \
- ((smu)->ppt_funcs->print_clk_levels ? (smu)->ppt_funcs->print_clk_levels((smu), (clk_type), (buf)) : 0)
-#define smu_force_clk_levels(smu, clk_type, level) \
- ((smu)->ppt_funcs->force_clk_levels ? (smu)->ppt_funcs->force_clk_levels((smu), (clk_type), (level)) : 0)
-#define smu_get_od_percentage(smu, type) \
- ((smu)->ppt_funcs->get_od_percentage ? (smu)->ppt_funcs->get_od_percentage((smu), (type)) : 0)
-#define smu_set_od_percentage(smu, type, value) \
- ((smu)->ppt_funcs->set_od_percentage ? (smu)->ppt_funcs->set_od_percentage((smu), (type), (value)) : 0)
-#define smu_od_edit_dpm_table(smu, type, input, size) \
- ((smu)->ppt_funcs->od_edit_dpm_table ? (smu)->ppt_funcs->od_edit_dpm_table((smu), (type), (input), (size)) : 0)
-#define smu_tables_init(smu, tab) \
- ((smu)->ppt_funcs->tables_init ? (smu)->ppt_funcs->tables_init((smu), (tab)) : 0)
-#define smu_set_thermal_fan_table(smu) \
- ((smu)->ppt_funcs->set_thermal_fan_table ? (smu)->ppt_funcs->set_thermal_fan_table((smu)) : 0)
-#define smu_start_thermal_control(smu) \
- ((smu)->funcs->start_thermal_control? (smu)->funcs->start_thermal_control((smu)) : 0)
-#define smu_read_sensor(smu, sensor, data, size) \
- ((smu)->ppt_funcs->read_sensor? (smu)->ppt_funcs->read_sensor((smu), (sensor), (data), (size)) : 0)
-#define smu_smc_read_sensor(smu, sensor, data, size) \
- ((smu)->funcs->read_sensor? (smu)->funcs->read_sensor((smu), (sensor), (data), (size)) : -EINVAL)
-#define smu_get_power_profile_mode(smu, buf) \
- ((smu)->ppt_funcs->get_power_profile_mode ? (smu)->ppt_funcs->get_power_profile_mode((smu), buf) : 0)
-#define smu_set_power_profile_mode(smu, param, param_size) \
- ((smu)->ppt_funcs->set_power_profile_mode ? (smu)->ppt_funcs->set_power_profile_mode((smu), (param), (param_size)) : 0)
-#define smu_pre_display_config_changed(smu) \
- ((smu)->ppt_funcs->pre_display_config_changed ? (smu)->ppt_funcs->pre_display_config_changed((smu)) : 0)
-#define smu_display_config_changed(smu) \
- ((smu)->ppt_funcs->display_config_changed ? (smu)->ppt_funcs->display_config_changed((smu)) : 0)
-#define smu_apply_clocks_adjust_rules(smu) \
- ((smu)->ppt_funcs->apply_clocks_adjust_rules ? (smu)->ppt_funcs->apply_clocks_adjust_rules((smu)) : 0)
-#define smu_notify_smc_dispaly_config(smu) \
- ((smu)->ppt_funcs->notify_smc_dispaly_config ? (smu)->ppt_funcs->notify_smc_dispaly_config((smu)) : 0)
-#define smu_force_dpm_limit_value(smu, highest) \
- ((smu)->ppt_funcs->force_dpm_limit_value ? (smu)->ppt_funcs->force_dpm_limit_value((smu), (highest)) : 0)
-#define smu_unforce_dpm_levels(smu) \
- ((smu)->ppt_funcs->unforce_dpm_levels ? (smu)->ppt_funcs->unforce_dpm_levels((smu)) : 0)
-#define smu_get_profiling_clk_mask(smu, level, sclk_mask, mclk_mask, soc_mask) \
- ((smu)->ppt_funcs->get_profiling_clk_mask ? (smu)->ppt_funcs->get_profiling_clk_mask((smu), (level), (sclk_mask), (mclk_mask), (soc_mask)) : 0)
-#define smu_set_cpu_power_state(smu) \
- ((smu)->ppt_funcs->set_cpu_power_state ? (smu)->ppt_funcs->set_cpu_power_state((smu)) : 0)
-#define smu_get_fan_control_mode(smu) \
- ((smu)->funcs->get_fan_control_mode ? (smu)->funcs->get_fan_control_mode((smu)) : 0)
-#define smu_set_fan_control_mode(smu, value) \
- ((smu)->funcs->set_fan_control_mode ? (smu)->funcs->set_fan_control_mode((smu), (value)) : 0)
-#define smu_get_fan_speed_percent(smu, speed) \
- ((smu)->ppt_funcs->get_fan_speed_percent ? (smu)->ppt_funcs->get_fan_speed_percent((smu), (speed)) : 0)
-#define smu_set_fan_speed_percent(smu, speed) \
- ((smu)->funcs->set_fan_speed_percent ? (smu)->funcs->set_fan_speed_percent((smu), (speed)) : 0)
-#define smu_get_fan_speed_rpm(smu, speed) \
- ((smu)->ppt_funcs->get_fan_speed_rpm ? (smu)->ppt_funcs->get_fan_speed_rpm((smu), (speed)) : 0)
-
-#define smu_msg_get_index(smu, msg) \
- ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_msg_index? (smu)->ppt_funcs->get_smu_msg_index((smu), (msg)) : -EINVAL) : -EINVAL)
-#define smu_clk_get_index(smu, msg) \
- ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_clk_index? (smu)->ppt_funcs->get_smu_clk_index((smu), (msg)) : -EINVAL) : -EINVAL)
-#define smu_feature_get_index(smu, msg) \
- ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_feature_index? (smu)->ppt_funcs->get_smu_feature_index((smu), (msg)) : -EINVAL) : -EINVAL)
-#define smu_table_get_index(smu, tab) \
- ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_table_index? (smu)->ppt_funcs->get_smu_table_index((smu), (tab)) : -EINVAL) : -EINVAL)
-#define smu_power_get_index(smu, src) \
- ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_power_index? (smu)->ppt_funcs->get_smu_power_index((smu), (src)) : -EINVAL) : -EINVAL)
-#define smu_workload_get_type(smu, profile) \
- ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_workload_type? (smu)->ppt_funcs->get_workload_type((smu), (profile)) : -EINVAL) : -EINVAL)
-#define smu_run_afll_btc(smu) \
- ((smu)->ppt_funcs? ((smu)->ppt_funcs->run_afll_btc? (smu)->ppt_funcs->run_afll_btc((smu)) : 0) : 0)
-#define smu_get_allowed_feature_mask(smu, feature_mask, num) \
- ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_allowed_feature_mask? (smu)->ppt_funcs->get_allowed_feature_mask((smu), (feature_mask), (num)) : 0) : 0)
-#define smu_set_deep_sleep_dcefclk(smu, clk) \
- ((smu)->funcs->set_deep_sleep_dcefclk ? (smu)->funcs->set_deep_sleep_dcefclk((smu), (clk)) : 0)
-#define smu_set_active_display_count(smu, count) \
- ((smu)->funcs->set_active_display_count ? (smu)->funcs->set_active_display_count((smu), (count)) : 0)
-#define smu_store_cc6_data(smu, st, cc6_dis, pst_dis, pst_sw_dis) \
- ((smu)->funcs->store_cc6_data ? (smu)->funcs->store_cc6_data((smu), (st), (cc6_dis), (pst_dis), (pst_sw_dis)) : 0)
-#define smu_get_clock_by_type(smu, type, clocks) \
- ((smu)->funcs->get_clock_by_type ? (smu)->funcs->get_clock_by_type((smu), (type), (clocks)) : 0)
-#define smu_get_max_high_clocks(smu, clocks) \
- ((smu)->funcs->get_max_high_clocks ? (smu)->funcs->get_max_high_clocks((smu), (clocks)) : 0)
-#define smu_get_clock_by_type_with_latency(smu, clk_type, clocks) \
- ((smu)->ppt_funcs->get_clock_by_type_with_latency ? (smu)->ppt_funcs->get_clock_by_type_with_latency((smu), (clk_type), (clocks)) : 0)
-#define smu_get_clock_by_type_with_voltage(smu, type, clocks) \
- ((smu)->ppt_funcs->get_clock_by_type_with_voltage ? (smu)->ppt_funcs->get_clock_by_type_with_voltage((smu), (type), (clocks)) : 0)
-#define smu_display_clock_voltage_request(smu, clock_req) \
- ((smu)->funcs->display_clock_voltage_request ? (smu)->funcs->display_clock_voltage_request((smu), (clock_req)) : 0)
-#define smu_display_disable_memory_clock_switch(smu, disable_memory_clock_switch) \
- ((smu)->ppt_funcs->display_disable_memory_clock_switch ? (smu)->ppt_funcs->display_disable_memory_clock_switch((smu), (disable_memory_clock_switch)) : -EINVAL)
-#define smu_get_dal_power_level(smu, clocks) \
- ((smu)->funcs->get_dal_power_level ? (smu)->funcs->get_dal_power_level((smu), (clocks)) : 0)
-#define smu_get_perf_level(smu, designation, level) \
- ((smu)->funcs->get_perf_level ? (smu)->funcs->get_perf_level((smu), (designation), (level)) : 0)
-#define smu_get_current_shallow_sleep_clocks(smu, clocks) \
- ((smu)->funcs->get_current_shallow_sleep_clocks ? (smu)->funcs->get_current_shallow_sleep_clocks((smu), (clocks)) : 0)
-#define smu_notify_smu_enable_pwe(smu) \
- ((smu)->funcs->notify_smu_enable_pwe ? (smu)->funcs->notify_smu_enable_pwe((smu)) : 0)
-#define smu_set_watermarks_for_clock_ranges(smu, clock_ranges) \
- ((smu)->funcs->set_watermarks_for_clock_ranges ? (smu)->funcs->set_watermarks_for_clock_ranges((smu), (clock_ranges)) : 0)
-#define smu_dpm_set_uvd_enable(smu, enable) \
- ((smu)->ppt_funcs->dpm_set_uvd_enable ? (smu)->ppt_funcs->dpm_set_uvd_enable((smu), (enable)) : 0)
-#define smu_dpm_set_vce_enable(smu, enable) \
- ((smu)->ppt_funcs->dpm_set_vce_enable ? (smu)->ppt_funcs->dpm_set_vce_enable((smu), (enable)) : 0)
-#define smu_set_xgmi_pstate(smu, pstate) \
- ((smu)->funcs->set_xgmi_pstate ? (smu)->funcs->set_xgmi_pstate((smu), (pstate)) : 0)
-#define smu_set_watermarks_table(smu, tab, clock_ranges) \
- ((smu)->ppt_funcs->set_watermarks_table ? (smu)->ppt_funcs->set_watermarks_table((smu), (tab), (clock_ranges)) : 0)
-#define smu_get_current_clk_freq_by_table(smu, clk_type, value) \
- ((smu)->ppt_funcs->get_current_clk_freq_by_table ? (smu)->ppt_funcs->get_current_clk_freq_by_table((smu), (clk_type), (value)) : 0)
-#define smu_thermal_temperature_range_update(smu, range, rw) \
- ((smu)->ppt_funcs->thermal_temperature_range_update? (smu)->ppt_funcs->thermal_temperature_range_update((smu), (range), (rw)) : 0)
-#define smu_get_thermal_temperature_range(smu, range) \
- ((smu)->ppt_funcs->get_thermal_temperature_range? (smu)->ppt_funcs->get_thermal_temperature_range((smu), (range)) : 0)
-#define smu_register_irq_handler(smu) \
- ((smu)->funcs->register_irq_handler ? (smu)->funcs->register_irq_handler(smu) : 0)
-#define smu_set_azalia_d3_pme(smu) \
- ((smu)->funcs->set_azalia_d3_pme ? (smu)->funcs->set_azalia_d3_pme((smu)) : 0)
-#define smu_get_dpm_ultimate_freq(smu, param, min, max) \
- ((smu)->funcs->get_dpm_ultimate_freq ? (smu)->funcs->get_dpm_ultimate_freq((smu), (param), (min), (max)) : 0)
-#define smu_get_max_sustainable_clocks_by_dc(smu, max_clocks) \
- ((smu)->funcs->get_max_sustainable_clocks_by_dc ? (smu)->funcs->get_max_sustainable_clocks_by_dc((smu), (max_clocks)) : 0)
-#define smu_get_uclk_dpm_states(smu, clocks_in_khz, num_states) \
- ((smu)->ppt_funcs->get_uclk_dpm_states ? (smu)->ppt_funcs->get_uclk_dpm_states((smu), (clocks_in_khz), (num_states)) : 0)
-#define smu_baco_is_support(smu) \
- ((smu)->funcs->baco_is_support? (smu)->funcs->baco_is_support((smu)) : false)
-#define smu_baco_get_state(smu, state) \
- ((smu)->funcs->baco_get_state? (smu)->funcs->baco_get_state((smu), (state)) : 0)
-#define smu_baco_reset(smu) \
- ((smu)->funcs->baco_reset? (smu)->funcs->baco_reset((smu)) : 0)
-#define smu_asic_set_performance_level(smu, level) \
- ((smu)->ppt_funcs->set_performance_level? (smu)->ppt_funcs->set_performance_level((smu), (level)) : -EINVAL);
-#define smu_dump_pptable(smu) \
- ((smu)->ppt_funcs->dump_pptable ? (smu)->ppt_funcs->dump_pptable((smu)) : 0)
-#define smu_get_dpm_uclk_limited(smu, clock, max) \
- ((smu)->ppt_funcs->get_dpm_uclk_limited ? (smu)->ppt_funcs->get_dpm_uclk_limited((smu), (clock), (max)) : -EINVAL)
+int smu_load_microcode(struct smu_context *smu);
+
+int smu_check_fw_status(struct smu_context *smu);
+
+int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
+
+#define smu_i2c_eeprom_init(smu, control) \
+ ((smu)->ppt_funcs->i2c_eeprom_init ? (smu)->ppt_funcs->i2c_eeprom_init((control)) : -EINVAL)
+#define smu_i2c_eeprom_fini(smu, control) \
+ ((smu)->ppt_funcs->i2c_eeprom_fini ? (smu)->ppt_funcs->i2c_eeprom_fini((control)) : -EINVAL)
+
+int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed);
+
+int smu_get_power_limit(struct smu_context *smu,
+ uint32_t *limit,
+ bool def,
+ bool lock_needed);
+
+int smu_set_power_limit(struct smu_context *smu, uint32_t limit);
+int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf);
+int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type);
+int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value);
+
+int smu_od_edit_dpm_table(struct smu_context *smu,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long *input, uint32_t size);
+
+int smu_read_sensor(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ void *data, uint32_t *size);
+int smu_get_power_profile_mode(struct smu_context *smu, char *buf);
+
+int smu_set_power_profile_mode(struct smu_context *smu,
+ long *param,
+ uint32_t param_size,
+ bool lock_needed);
+int smu_get_fan_control_mode(struct smu_context *smu);
+int smu_set_fan_control_mode(struct smu_context *smu, int value);
+int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed);
+int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed);
+int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed);
+
+int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk);
+int smu_set_active_display_count(struct smu_context *smu, uint32_t count);
+
+int smu_get_clock_by_type(struct smu_context *smu,
+ enum amd_pp_clock_type type,
+ struct amd_pp_clocks *clocks);
+
+int smu_get_max_high_clocks(struct smu_context *smu,
+ struct amd_pp_simple_clock_info *clocks);
+
+int smu_get_clock_by_type_with_latency(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ struct pp_clock_levels_with_latency *clocks);
+
+int smu_get_clock_by_type_with_voltage(struct smu_context *smu,
+ enum amd_pp_clock_type type,
+ struct pp_clock_levels_with_voltage *clocks);
+
+int smu_display_clock_voltage_request(struct smu_context *smu,
+ struct pp_display_clock_request *clock_req);
+int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch);
+int smu_notify_smu_enable_pwe(struct smu_context *smu);
+
+int smu_set_xgmi_pstate(struct smu_context *smu,
+ uint32_t pstate);
+
+int smu_set_azalia_d3_pme(struct smu_context *smu);
+
+bool smu_baco_is_support(struct smu_context *smu);
+
+int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state);
+
+int smu_baco_reset(struct smu_context *smu);
+int smu_mode2_reset(struct smu_context *smu);
extern int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
uint16_t *size, uint8_t *frev, uint8_t *crev,
@@ -798,6 +661,10 @@ int smu_sys_get_pp_table(struct smu_context *smu, void **table);
int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size);
int smu_get_power_num_states(struct smu_context *smu, struct pp_states_info *state_info);
enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu);
+int smu_write_watermarks_table(struct smu_context *smu);
+int smu_set_watermarks_for_clock_ranges(
+ struct smu_context *smu,
+ struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges);
/* smu to display interface */
extern int smu_display_configuration_change(struct smu_context *smu, const
@@ -808,7 +675,8 @@ extern int smu_get_current_clocks(struct smu_context *smu,
extern int smu_dpm_set_power_gate(struct smu_context *smu,uint32_t block_type, bool gate);
extern int smu_handle_task(struct smu_context *smu,
enum amd_dpm_forced_level level,
- enum amd_pp_task task_id);
+ enum amd_pp_task task_id,
+ bool lock_needed);
int smu_switch_power_profile(struct smu_context *smu,
enum PP_SMC_POWER_PROFILE type,
bool en);
@@ -818,7 +686,7 @@ int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_typ
int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t *value);
int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
- uint32_t *min, uint32_t *max);
+ uint32_t *min, uint32_t *max, bool lock_needed);
int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t min, uint32_t max);
int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
@@ -827,10 +695,29 @@ enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu);
int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level);
int smu_set_display_count(struct smu_context *smu, uint32_t count);
bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type);
-int smu_feature_update_enable_state(struct smu_context *smu, uint64_t feature_mask, bool enabled);
const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type);
const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature);
size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf);
int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask);
+int smu_force_clk_levels(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t mask,
+ bool lock_needed);
+int smu_set_mp1_state(struct smu_context *smu,
+ enum pp_mp1_state mp1_state);
+int smu_set_df_cstate(struct smu_context *smu,
+ enum pp_df_cstate state);
+
+int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
+ struct pp_smu_nv_clock_table *max_clocks);
+
+int smu_get_uclk_dpm_states(struct smu_context *smu,
+ unsigned int *clock_values_in_khz,
+ unsigned int *num_states);
+
+int smu_get_dpm_clock_table(struct smu_context *smu,
+ struct dpm_clocks *clock_table);
+
+uint32_t smu_get_pptable_power_limit(struct smu_context *smu);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h
index 78e5927b7711..e3291259b249 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h
@@ -95,8 +95,7 @@
//BTC
#define PPSMC_MSG_RunAfllBtc 0x30
-#define PPSMC_MSG_RunGfxDcBtc 0x31
-#define PPSMC_MSG_RunSocDcBtc 0x32
+#define PPSMC_MSG_RunDcBtc 0x31
//Debug
#define PPSMC_MSG_DramLogSetDramAddrHigh 0x33
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 7bf9a14bfa0b..af977675fd33 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -355,6 +355,10 @@ struct pp_hwmgr_func {
int (*set_mp1_state)(struct pp_hwmgr *hwmgr, enum pp_mp1_state mp1_state);
int (*asic_reset)(struct pp_hwmgr *hwmgr, enum SMU_ASIC_RESET_MODE mode);
int (*smu_i2c_bus_access)(struct pp_hwmgr *hwmgr, bool aquire);
+ int (*set_df_cstate)(struct pp_hwmgr *hwmgr, enum pp_df_cstate state);
+ int (*set_xgmi_pstate)(struct pp_hwmgr *hwmgr, uint32_t pstate);
+ int (*disable_power_features_for_compute_performance)(struct pp_hwmgr *hwmgr,
+ bool disable);
};
struct pp_table_func {
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h
index e02950b505fa..a886f0644d24 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h
@@ -137,29 +137,29 @@
#define FEATURE_DS_SOCCLK_MASK (1 << FEATURE_DS_SOCCLK_BIT )
#define FEATURE_DS_LCLK_MASK (1 << FEATURE_DS_LCLK_BIT )
#define FEATURE_DS_FCLK_MASK (1 << FEATURE_DS_FCLK_BIT )
-#define FEATURE_DS_LCLK_MASK (1 << FEATURE_DS_LCLK_BIT )
+#define FEATURE_DS_UCLK_MASK (1 << FEATURE_DS_UCLK_BIT )
#define FEATURE_GFX_ULV_MASK (1 << FEATURE_GFX_ULV_BIT )
-#define FEATURE_VCN_PG_MASK (1 << FEATURE_VCN_PG_BIT )
+#define FEATURE_DPM_VCN_MASK (1 << FEATURE_DPM_VCN_BIT )
#define FEATURE_RSMU_SMN_CG_MASK (1 << FEATURE_RSMU_SMN_CG_BIT )
#define FEATURE_WAFL_CG_MASK (1 << FEATURE_WAFL_CG_BIT )
#define FEATURE_PPT_MASK (1 << FEATURE_PPT_BIT )
#define FEATURE_TDC_MASK (1 << FEATURE_TDC_BIT )
-#define FEATURE_APCC_MASK (1 << FEATURE_APCC_BIT )
+#define FEATURE_APCC_PLUS_MASK (1 << FEATURE_APCC_PLUS_BIT )
#define FEATURE_VR0HOT_MASK (1 << FEATURE_VR0HOT_BIT )
#define FEATURE_VR1HOT_MASK (1 << FEATURE_VR1HOT_BIT )
#define FEATURE_FW_CTF_MASK (1 << FEATURE_FW_CTF_BIT )
#define FEATURE_FAN_CONTROL_MASK (1 << FEATURE_FAN_CONTROL_BIT )
#define FEATURE_THERMAL_MASK (1 << FEATURE_THERMAL_BIT )
-#define FEATURE_OUT_OF_BAND_MONITOR_MASK (1 << EATURE_OUT_OF_BAND_MONITOR_BIT )
-#define FEATURE_TEMP_DEPENDENT_VMIN_MASK (1 << FEATURE_TEMP_DEPENDENT_VMIN_MASK )
+#define FEATURE_OUT_OF_BAND_MONITOR_MASK (1 << FEATURE_OUT_OF_BAND_MONITOR_BIT )
+#define FEATURE_TEMP_DEPENDENT_VMIN_MASK (1 << FEATURE_TEMP_DEPENDENT_VMIN_BIT )
//FIXME need updating
// Debug Overrides Bitmask
#define DPM_OVERRIDE_DISABLE_UCLK_PID 0x00000001
-#define DPM_OVERRIDE_ENABLE_VOLT_LINK_VCN_FCLK 0x00000002
+#define DPM_OVERRIDE_DISABLE_VOLT_LINK_VCN_FCLK 0x00000002
// I2C Config Bit Defines
#define I2C_CONTROLLER_ENABLED 1
@@ -423,18 +423,30 @@ typedef enum {
} PwrConfig_e;
typedef enum {
- XGMI_LINK_RATE_12 = 0, // 12Gbps
- XGMI_LINK_RATE_16, // 16Gbps
- XGMI_LINK_RATE_22, // 22Gbps
- XGMI_LINK_RATE_25, // 25Gbps
+ XGMI_LINK_RATE_2 = 2, // 2Gbps
+ XGMI_LINK_RATE_4 = 4, // 4Gbps
+ XGMI_LINK_RATE_8 = 8, // 8Gbps
+ XGMI_LINK_RATE_12 = 12, // 12Gbps
+ XGMI_LINK_RATE_16 = 16, // 16Gbps
+ XGMI_LINK_RATE_17 = 17, // 17Gbps
+ XGMI_LINK_RATE_18 = 18, // 18Gbps
+ XGMI_LINK_RATE_19 = 19, // 19Gbps
+ XGMI_LINK_RATE_20 = 20, // 20Gbps
+ XGMI_LINK_RATE_21 = 21, // 21Gbps
+ XGMI_LINK_RATE_22 = 22, // 22Gbps
+ XGMI_LINK_RATE_23 = 23, // 23Gbps
+ XGMI_LINK_RATE_24 = 24, // 24Gbps
+ XGMI_LINK_RATE_25 = 25, // 25Gbps
XGMI_LINK_RATE_COUNT
} XGMI_LINK_RATE_e;
typedef enum {
- XGMI_LINK_WIDTH_2 = 0, // x2
- XGMI_LINK_WIDTH_4, // x4
- XGMI_LINK_WIDTH_8, // x8
- XGMI_LINK_WIDTH_16, // x16
+ XGMI_LINK_WIDTH_1 = 1, // x1
+ XGMI_LINK_WIDTH_2 = 2, // x2
+ XGMI_LINK_WIDTH_4 = 4, // x4
+ XGMI_LINK_WIDTH_8 = 8, // x8
+ XGMI_LINK_WIDTH_9 = 9, // x9
+ XGMI_LINK_WIDTH_16 = 16, // x16
XGMI_LINK_WIDTH_COUNT
} XGMI_LINK_WIDTH_e;
@@ -696,7 +708,11 @@ typedef struct {
uint8_t GpioI2cSda; // Serial Data
uint16_t GpioPadding;
- uint32_t BoardReserved[9];
+ // Platform input telemetry voltage coefficient
+ uint32_t BoardVoltageCoeffA; // decode by /1000
+ uint32_t BoardVoltageCoeffB; // decode by /1000
+
+ uint32_t BoardReserved[7];
// Padding for MMHUB - do not modify this
uint32_t MmHubPadding[8]; // SMU internal use
@@ -802,7 +818,7 @@ typedef struct {
uint32_t P2VCharzFreq[AVFS_VOLTAGE_COUNT]; // in 10KHz units
- uint32_t EnabledAvfsModules[2];
+ uint32_t EnabledAvfsModules[3];
uint32_t MmHubPadding[8]; // SMU internal use
} AvfsFuseOverride_t;
@@ -865,7 +881,8 @@ typedef struct {
//#define TABLE_ACTIVITY_MONITOR_COEFF 7
#define TABLE_OVERDRIVE 7
#define TABLE_WAFL_XGMI_TOPOLOGY 8
-#define TABLE_COUNT 9
+#define TABLE_I2C_COMMANDS 9
+#define TABLE_COUNT 10
// These defines are used with the SMC_MSG_SetUclkFastSwitch message.
typedef enum {
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h
index b0dd05d431dd..d8c9b7f91fcc 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h
@@ -114,6 +114,7 @@
__SMU_DUMMY_MAP(PowerDownJpeg), \
__SMU_DUMMY_MAP(BacoAudioD3PME), \
__SMU_DUMMY_MAP(ArmD3), \
+ __SMU_DUMMY_MAP(RunDcBtc), \
__SMU_DUMMY_MAP(RunGfxDcBtc), \
__SMU_DUMMY_MAP(RunSocDcBtc), \
__SMU_DUMMY_MAP(SetMemoryChannelEnable), \
@@ -168,6 +169,7 @@
__SMU_DUMMY_MAP(PowerGateAtHub), \
__SMU_DUMMY_MAP(SetSoftMinJpeg), \
__SMU_DUMMY_MAP(SetHardMinFclkByFreq), \
+ __SMU_DUMMY_MAP(DFCstateControl), \
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
@@ -251,6 +253,7 @@ enum smu_clk_type {
__SMU_DUMMY_MAP(TEMP_DEPENDENT_VMIN), \
__SMU_DUMMY_MAP(MMHUB_PG), \
__SMU_DUMMY_MAP(ATHUB_PG), \
+ __SMU_DUMMY_MAP(APCC_DFLL), \
__SMU_DUMMY_MAP(WAFL_CG),
#undef __SMU_DUMMY_MAP
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
index 5bda8539447a..606149085683 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
@@ -27,7 +27,7 @@
#define SMU11_DRIVER_IF_VERSION_INV 0xFFFFFFFF
#define SMU11_DRIVER_IF_VERSION_VG20 0x13
-#define SMU11_DRIVER_IF_VERSION_ARCT 0x09
+#define SMU11_DRIVER_IF_VERSION_ARCT 0x10
#define SMU11_DRIVER_IF_VERSION_NV10 0x33
#define SMU11_DRIVER_IF_VERSION_NV14 0x34
@@ -48,6 +48,8 @@
#define SMU11_TOOL_SIZE 0x19000
+#define MAX_PCIE_CONF 2
+
#define CLK_MAP(clk, index) \
[SMU_##clk] = {1, (index)}
@@ -88,6 +90,11 @@ struct smu_11_0_dpm_table {
uint32_t max; /* MHz */
};
+struct smu_11_0_pcie_table {
+ uint8_t pcie_gen[MAX_PCIE_CONF];
+ uint8_t pcie_lane[MAX_PCIE_CONF];
+};
+
struct smu_11_0_dpm_tables {
struct smu_11_0_dpm_table soc_table;
struct smu_11_0_dpm_table gfx_table;
@@ -100,6 +107,7 @@ struct smu_11_0_dpm_tables {
struct smu_11_0_dpm_table display_table;
struct smu_11_0_dpm_table phy_table;
struct smu_11_0_dpm_table fclk_table;
+ struct smu_11_0_pcie_table pcie_table;
};
struct smu_11_0_dpm_context {
@@ -130,6 +138,128 @@ enum smu_v11_0_baco_seq {
BACO_SEQ_COUNT,
};
-void smu_v11_0_set_smu_funcs(struct smu_context *smu);
+int smu_v11_0_init_microcode(struct smu_context *smu);
+
+int smu_v11_0_load_microcode(struct smu_context *smu);
+
+int smu_v11_0_init_smc_tables(struct smu_context *smu);
+
+int smu_v11_0_fini_smc_tables(struct smu_context *smu);
+
+int smu_v11_0_init_power(struct smu_context *smu);
+
+int smu_v11_0_fini_power(struct smu_context *smu);
+
+int smu_v11_0_check_fw_status(struct smu_context *smu);
+
+int smu_v11_0_setup_pptable(struct smu_context *smu);
+
+int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu);
+
+int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu);
+
+int smu_v11_0_check_pptable(struct smu_context *smu);
+
+int smu_v11_0_parse_pptable(struct smu_context *smu);
+
+int smu_v11_0_populate_smc_pptable(struct smu_context *smu);
+
+int smu_v11_0_check_fw_version(struct smu_context *smu);
+
+int smu_v11_0_write_pptable(struct smu_context *smu);
+
+int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu);
+
+int smu_v11_0_set_tool_table_location(struct smu_context *smu);
+
+int smu_v11_0_notify_memory_pool_location(struct smu_context *smu);
+
+int smu_v11_0_system_features_control(struct smu_context *smu,
+ bool en);
+
+int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg);
+
+int
+smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
+ uint32_t param);
+
+int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg);
+
+int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count);
+
+int smu_v11_0_set_allowed_mask(struct smu_context *smu);
+
+int smu_v11_0_get_enabled_mask(struct smu_context *smu,
+ uint32_t *feature_mask, uint32_t num);
+
+int smu_v11_0_notify_display_change(struct smu_context *smu);
+
+int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n);
+
+int smu_v11_0_get_current_clk_freq(struct smu_context *smu,
+ enum smu_clk_type clk_id,
+ uint32_t *value);
+
+int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu);
+
+int smu_v11_0_start_thermal_control(struct smu_context *smu);
+
+int smu_v11_0_stop_thermal_control(struct smu_context *smu);
+
+int smu_v11_0_read_sensor(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ void *data, uint32_t *size);
+
+int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk);
+
+int
+smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
+ struct pp_display_clock_request
+ *clock_req);
+
+uint32_t
+smu_v11_0_get_fan_control_mode(struct smu_context *smu);
+
+int
+smu_v11_0_set_fan_control_mode(struct smu_context *smu,
+ uint32_t mode);
+
+int
+smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed);
+
+int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
+ uint32_t speed);
+
+int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
+ uint32_t pstate);
+
+int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable);
+
+int smu_v11_0_register_irq_handler(struct smu_context *smu);
+
+int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu);
+
+int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
+ struct pp_smu_nv_clock_table *max_clocks);
+
+bool smu_v11_0_baco_is_support(struct smu_context *smu);
+
+enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu);
+
+int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state);
+
+int smu_v11_0_baco_reset(struct smu_context *smu);
+
+int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t *min, uint32_t *max);
+
+int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t min, uint32_t max);
+
+int smu_v11_0_override_pcie_parameters(struct smu_context *smu);
+
+int smu_v11_0_set_default_od_settings(struct smu_context *smu, bool initialize, size_t overdrive_table_size);
+
+uint32_t smu_v11_0_get_max_power_limit(struct smu_context *smu);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h
index 86cdc3393eac..b2f96a101124 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h
@@ -141,7 +141,9 @@ struct smu_11_0_powerplay_table
struct smu_11_0_power_saving_clock_table power_saving_clock;
struct smu_11_0_overdrive_table overdrive_table;
+#ifndef SMU_11_0_PARTIAL_PPTABLE
PPTable_t smc_pptable; //PPTable_t in smu11_driver_if.h
+#endif
} __attribute__((packed));
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
index acf3db12f59f..9b9f5df0911c 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
@@ -37,6 +37,45 @@ struct smu_12_0_cmn2aisc_mapping {
int map_to;
};
-void smu_v12_0_set_smu_funcs(struct smu_context *smu);
+int smu_v12_0_send_msg_without_waiting(struct smu_context *smu,
+ uint16_t msg);
+
+int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg);
+
+int smu_v12_0_wait_for_response(struct smu_context *smu);
+
+int smu_v12_0_send_msg(struct smu_context *smu, uint16_t msg);
+
+int
+smu_v12_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
+ uint32_t param);
+
+int smu_v12_0_check_fw_status(struct smu_context *smu);
+
+int smu_v12_0_check_fw_version(struct smu_context *smu);
+
+int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate);
+
+int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate);
+
+int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable);
+
+uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu);
+
+int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable);
+
+int smu_v12_0_init_smc_tables(struct smu_context *smu);
+
+int smu_v12_0_fini_smc_tables(struct smu_context *smu);
+
+int smu_v12_0_populate_smc_tables(struct smu_context *smu);
+
+int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t *min, uint32_t *max);
+
+int smu_v12_0_mode2_reset(struct smu_context *smu);
+
+int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t min, uint32_t max);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h
index a0883038f3c3..0c66f0fe1aaf 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h
@@ -120,7 +120,8 @@
#define PPSMC_MSG_SetMGpuFanBoostLimitRpm 0x5D
#define PPSMC_MSG_GetAVFSVoltageByDpm 0x5F
#define PPSMC_MSG_BacoWorkAroundFlushVDCI 0x60
-#define PPSMC_Message_Count 0x61
+#define PPSMC_MSG_DFCstateControl 0x63
+#define PPSMC_Message_Count 0x64
typedef uint32_t PPSMC_Result;
typedef uint32_t PPSMC_Msg;
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index 12c0e469bf35..aaec884d63ed 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -26,6 +26,7 @@
#include <linux/pci.h>
#include "amdgpu.h"
#include "amdgpu_smu.h"
+#include "smu_internal.h"
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
#include "smu_v11_0.h"
@@ -35,6 +36,7 @@
#include "navi10_ppt.h"
#include "smu_v11_0_pptable.h"
#include "smu_v11_0_ppsmc.h"
+#include "nbio/nbio_7_4_sh_mask.h"
#include "asic_reg/mp/mp_11_0_sh_mask.h"
@@ -177,6 +179,7 @@ static struct smu_11_0_cmn2aisc_mapping navi10_feature_mask_map[SMU_FEATURE_COUN
FEA_MAP(TEMP_DEPENDENT_VMIN),
FEA_MAP(MMHUB_PG),
FEA_MAP(ATHUB_PG),
+ FEA_MAP(APCC_DFLL),
};
static struct smu_11_0_cmn2aisc_mapping navi10_table_map[SMU_TABLE_COUNT] = {
@@ -205,7 +208,7 @@ static struct smu_11_0_cmn2aisc_mapping navi10_workload_map[PP_SMC_POWER_PROFILE
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT),
- WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_CUSTOM_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
};
@@ -327,40 +330,52 @@ navi10_get_allowed_feature_mask(struct smu_context *smu,
memset(feature_mask, 0, sizeof(uint32_t) * num);
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT)
- | FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT)
- | FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT)
| FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT)
- | FEATURE_MASK(FEATURE_DPM_LINK_BIT)
- | FEATURE_MASK(FEATURE_GFX_ULV_BIT)
| FEATURE_MASK(FEATURE_RSMU_SMN_CG_BIT)
| FEATURE_MASK(FEATURE_DS_SOCCLK_BIT)
| FEATURE_MASK(FEATURE_PPT_BIT)
| FEATURE_MASK(FEATURE_TDC_BIT)
| FEATURE_MASK(FEATURE_GFX_EDC_BIT)
+ | FEATURE_MASK(FEATURE_APCC_PLUS_BIT)
| FEATURE_MASK(FEATURE_VR0HOT_BIT)
| FEATURE_MASK(FEATURE_FAN_CONTROL_BIT)
| FEATURE_MASK(FEATURE_THERMAL_BIT)
| FEATURE_MASK(FEATURE_LED_DISPLAY_BIT)
- | FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT)
- | FEATURE_MASK(FEATURE_DS_GFXCLK_BIT)
+ | FEATURE_MASK(FEATURE_DS_LCLK_BIT)
| FEATURE_MASK(FEATURE_DS_DCEFCLK_BIT)
| FEATURE_MASK(FEATURE_FW_DSTATE_BIT)
| FEATURE_MASK(FEATURE_BACO_BIT)
| FEATURE_MASK(FEATURE_ACDC_BIT)
| FEATURE_MASK(FEATURE_GFX_SS_BIT)
| FEATURE_MASK(FEATURE_APCC_DFLL_BIT)
- | FEATURE_MASK(FEATURE_FW_CTF_BIT);
+ | FEATURE_MASK(FEATURE_FW_CTF_BIT)
+ | FEATURE_MASK(FEATURE_OUT_OF_BAND_MONITOR_BIT);
+
+ if (adev->pm.pp_feature & PP_SOCCLK_DPM_MASK)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
+
+ if (adev->pm.pp_feature & PP_SCLK_DPM_MASK)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT);
+
+ if (adev->pm.pp_feature & PP_PCIE_DPM_MASK)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_LINK_BIT);
+
+ if (adev->pm.pp_feature & PP_DCEFCLK_DPM_MASK)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT);
if (adev->pm.pp_feature & PP_MCLK_DPM_MASK)
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT)
| FEATURE_MASK(FEATURE_MEM_VDDCI_SCALING_BIT)
| FEATURE_MASK(FEATURE_MEM_MVDD_SCALING_BIT);
- if (adev->pm.pp_feature & PP_GFXOFF_MASK) {
+ if (adev->pm.pp_feature & PP_ULV_MASK)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_ULV_BIT);
+
+ if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_GFXCLK_BIT);
+
+ if (adev->pm.pp_feature & PP_GFXOFF_MASK)
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFXOFF_BIT);
- /* TODO: remove it once fw fix the bug */
- *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_FW_DSTATE_BIT);
- }
if (smu->adev->pg_flags & AMD_PG_SUPPORT_MMHUB)
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MMHUB_PG_BIT);
@@ -547,7 +562,7 @@ static int navi10_get_metrics_table(struct smu_context *smu,
struct smu_table_context *smu_table= &smu->smu_table;
int ret = 0;
- if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + HZ / 1000)) {
+ if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(100))) {
ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
(void *)smu_table->metrics_table, false);
if (ret) {
@@ -585,6 +600,7 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
struct smu_table_context *table_context = &smu->smu_table;
struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context;
PPTable_t *driver_ppt = NULL;
+ int i;
driver_ppt = table_context->driver_pptable;
@@ -615,6 +631,11 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
dpm_context->dpm_tables.phy_table.min = driver_ppt->FreqTablePhyclk[0];
dpm_context->dpm_tables.phy_table.max = driver_ppt->FreqTablePhyclk[NUM_PHYCLK_DPM_LEVELS - 1];
+ for (i = 0; i < MAX_PCIE_CONF; i++) {
+ dpm_context->dpm_tables.pcie_table.pcie_gen[i] = driver_ppt->PcieGenSpeed[i];
+ dpm_context->dpm_tables.pcie_table.pcie_lane[i] = driver_ppt->PcieLaneCount[i];
+ }
+
return 0;
}
@@ -677,13 +698,29 @@ static bool navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu
return dpm_desc->SnapToDiscrete == 0 ? true : false;
}
+static inline bool navi10_od_feature_is_supported(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODFEATURE_ID feature)
+{
+ return od_table->cap[feature];
+}
+
+
static int navi10_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
+ uint16_t *curve_settings;
int i, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0;
uint32_t freq_values[3] = {0};
uint32_t mark_index = 0;
+ struct smu_table_context *table_context = &smu->smu_table;
+ uint32_t gen_speed, lane_width;
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context;
+ struct amdgpu_device *adev = smu->adev;
+ PPTable_t *pptable = (PPTable_t *)table_context->driver_pptable;
+ OverDriveTable_t *od_table =
+ (OverDriveTable_t *)table_context->overdrive_table;
+ struct smu_11_0_overdrive_table *od_settings = smu->od_settings;
switch (clk_type) {
case SMU_GFXCLK:
@@ -734,6 +771,69 @@ static int navi10_print_clk_levels(struct smu_context *smu,
}
break;
+ case SMU_PCIE:
+ gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
+ PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
+ >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
+ lane_width = (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
+ PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
+ >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
+ for (i = 0; i < NUM_LINK_LEVELS; i++)
+ size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i,
+ (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 0) ? "2.5GT/s," :
+ (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 1) ? "5.0GT/s," :
+ (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 2) ? "8.0GT/s," :
+ (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 3) ? "16.0GT/s," : "",
+ (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 1) ? "x1" :
+ (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 2) ? "x2" :
+ (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 3) ? "x4" :
+ (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 4) ? "x8" :
+ (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 5) ? "x12" :
+ (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 6) ? "x16" : "",
+ pptable->LclkFreq[i],
+ (gen_speed == dpm_context->dpm_tables.pcie_table.pcie_gen[i]) &&
+ (lane_width == dpm_context->dpm_tables.pcie_table.pcie_lane[i]) ?
+ "*" : "");
+ break;
+ case SMU_OD_SCLK:
+ if (!smu->od_enabled || !od_table || !od_settings)
+ break;
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_LIMITS))
+ break;
+ size += sprintf(buf + size, "OD_SCLK:\n");
+ size += sprintf(buf + size, "0: %uMhz\n1: %uMhz\n", od_table->GfxclkFmin, od_table->GfxclkFmax);
+ break;
+ case SMU_OD_MCLK:
+ if (!smu->od_enabled || !od_table || !od_settings)
+ break;
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_UCLK_MAX))
+ break;
+ size += sprintf(buf + size, "OD_MCLK:\n");
+ size += sprintf(buf + size, "0: %uMHz\n", od_table->UclkFmax);
+ break;
+ case SMU_OD_VDDC_CURVE:
+ if (!smu->od_enabled || !od_table || !od_settings)
+ break;
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_CURVE))
+ break;
+ size += sprintf(buf + size, "OD_VDDC_CURVE:\n");
+ for (i = 0; i < 3; i++) {
+ switch (i) {
+ case 0:
+ curve_settings = &od_table->GfxclkFreq1;
+ break;
+ case 1:
+ curve_settings = &od_table->GfxclkFreq2;
+ break;
+ case 2:
+ curve_settings = &od_table->GfxclkFreq3;
+ break;
+ default:
+ break;
+ }
+ size += sprintf(buf + size, "%d: %uMHz @ %umV\n", i, curve_settings[0], curve_settings[1] / NAVI10_VOLTAGE_SCALE);
+ }
+ break;
default:
break;
}
@@ -759,6 +859,12 @@ static int navi10_force_clk_levels(struct smu_context *smu,
case SMU_UCLK:
case SMU_DCEFCLK:
case SMU_FCLK:
+ /* There is only 2 levels for fine grained DPM */
+ if (navi10_is_support_fine_grained_dpm(smu, clk_type)) {
+ soft_max_level = (soft_max_level >= 1 ? 1 : 0);
+ soft_min_level = (soft_min_level >= 1 ? 1 : 0);
+ }
+
ret = smu_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq);
if (ret)
return size;
@@ -783,13 +889,13 @@ static int navi10_populate_umd_state_clk(struct smu_context *smu)
int ret = 0;
uint32_t min_sclk_freq = 0, min_mclk_freq = 0;
- ret = smu_get_dpm_freq_range(smu, SMU_SCLK, &min_sclk_freq, NULL);
+ ret = smu_get_dpm_freq_range(smu, SMU_SCLK, &min_sclk_freq, NULL, false);
if (ret)
return ret;
smu->pstate_sclk = min_sclk_freq * 100;
- ret = smu_get_dpm_freq_range(smu, SMU_MCLK, &min_mclk_freq, NULL);
+ ret = smu_get_dpm_freq_range(smu, SMU_MCLK, &min_mclk_freq, NULL, false);
if (ret)
return ret;
@@ -842,7 +948,7 @@ static int navi10_pre_display_config_changed(struct smu_context *smu)
return ret;
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
- ret = smu_get_dpm_freq_range(smu, SMU_UCLK, NULL, &max_freq);
+ ret = smu_get_dpm_freq_range(smu, SMU_UCLK, NULL, &max_freq, false);
if (ret)
return ret;
ret = smu_set_hard_freq_range(smu, SMU_UCLK, 0, max_freq);
@@ -892,7 +998,7 @@ static int navi10_force_dpm_limit_value(struct smu_context *smu, bool highest)
for (i = 0; i < ARRAY_SIZE(clks); i++) {
clk_type = clks[i];
- ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq);
+ ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false);
if (ret)
return ret;
@@ -919,7 +1025,7 @@ static int navi10_unforce_dpm_levels(struct smu_context *smu)
for (i = 0; i < ARRAY_SIZE(clks); i++) {
clk_type = clks[i];
- ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq);
+ ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false);
if (ret)
return ret;
@@ -1254,7 +1360,9 @@ static int navi10_notify_smc_dispaly_config(struct smu_context *smu)
if (smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
clock_req.clock_type = amd_pp_dcef_clock;
clock_req.clock_freq_in_khz = min_clocks.dcef_clock * 10;
- if (!smu_display_clock_voltage_request(smu, &clock_req)) {
+
+ ret = smu_v11_0_display_clock_voltage_request(smu, &clock_req);
+ if (!ret) {
if (smu_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetMinDeepSleepDcefclk,
@@ -1386,6 +1494,7 @@ static int navi10_read_sensor(struct smu_context *smu,
if(!data || !size)
return -EINVAL;
+ mutex_lock(&smu->sensor_lock);
switch (sensor) {
case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
*(uint32_t *)data = pptable->FanMaximumRpm;
@@ -1407,8 +1516,9 @@ static int navi10_read_sensor(struct smu_context *smu,
*size = 4;
break;
default:
- ret = smu_smc_read_sensor(smu, sensor, data, size);
+ ret = smu_v11_0_read_sensor(smu, sensor, data, size);
}
+ mutex_unlock(&smu->sensor_lock);
return ret;
}
@@ -1449,18 +1559,47 @@ static int navi10_set_peak_clock_by_device(struct smu_context *smu)
uint32_t sclk_freq = 0, uclk_freq = 0;
uint32_t uclk_level = 0;
- switch (adev->pdev->revision) {
- case 0xf0: /* XTX */
- case 0xc0:
- sclk_freq = NAVI10_PEAK_SCLK_XTX;
- break;
- case 0xf1: /* XT */
- case 0xc1:
- sclk_freq = NAVI10_PEAK_SCLK_XT;
+ switch (adev->asic_type) {
+ case CHIP_NAVI10:
+ switch (adev->pdev->revision) {
+ case 0xf0: /* XTX */
+ case 0xc0:
+ sclk_freq = NAVI10_PEAK_SCLK_XTX;
+ break;
+ case 0xf1: /* XT */
+ case 0xc1:
+ sclk_freq = NAVI10_PEAK_SCLK_XT;
+ break;
+ default: /* XL */
+ sclk_freq = NAVI10_PEAK_SCLK_XL;
+ break;
+ }
break;
- default: /* XL */
- sclk_freq = NAVI10_PEAK_SCLK_XL;
+ case CHIP_NAVI14:
+ switch (adev->pdev->revision) {
+ case 0xc7: /* XT */
+ case 0xf4:
+ sclk_freq = NAVI14_UMD_PSTATE_PEAK_XT_GFXCLK;
+ break;
+ case 0xc1: /* XTM */
+ case 0xf2:
+ sclk_freq = NAVI14_UMD_PSTATE_PEAK_XTM_GFXCLK;
+ break;
+ case 0xc3: /* XLM */
+ case 0xf3:
+ sclk_freq = NAVI14_UMD_PSTATE_PEAK_XLM_GFXCLK;
+ break;
+ case 0xc5: /* XTX */
+ case 0xf6:
+ sclk_freq = NAVI14_UMD_PSTATE_PEAK_XLM_GFXCLK;
+ break;
+ default: /* XL */
+ sclk_freq = NAVI14_UMD_PSTATE_PEAK_XL_GFXCLK;
+ break;
+ }
break;
+ default:
+ return -EINVAL;
}
ret = smu_get_dpm_level_count(smu, SMU_UCLK, &uclk_level);
@@ -1483,10 +1622,6 @@ static int navi10_set_peak_clock_by_device(struct smu_context *smu)
static int navi10_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
{
int ret = 0;
- struct amdgpu_device *adev = smu->adev;
-
- if (adev->asic_type != CHIP_NAVI10)
- return -EINVAL;
switch (level) {
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
@@ -1539,17 +1674,22 @@ static int navi10_display_disable_memory_clock_switch(struct smu_context *smu,
return ret;
}
+static uint32_t navi10_get_pptable_power_limit(struct smu_context *smu)
+{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ return pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0];
+}
+
static int navi10_get_power_limit(struct smu_context *smu,
uint32_t *limit,
- bool asic_default)
+ bool cap)
{
PPTable_t *pptable = smu->smu_table.driver_pptable;
uint32_t asic_default_power_limit = 0;
int ret = 0;
int power_src;
- if (!smu->default_power_limit ||
- !smu->power_limit) {
+ if (!smu->power_limit) {
if (smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
power_src = smu_power_get_index(smu, SMU_POWER_SOURCE_AC);
if (power_src < 0)
@@ -1572,23 +1712,291 @@ static int navi10_get_power_limit(struct smu_context *smu,
pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0];
}
- if (smu->od_enabled) {
- asic_default_power_limit *= (100 + smu->smu_table.TDPODLimit);
- asic_default_power_limit /= 100;
- }
-
- smu->default_power_limit = asic_default_power_limit;
smu->power_limit = asic_default_power_limit;
}
- if (asic_default)
- *limit = smu->default_power_limit;
+ if (cap)
+ *limit = smu_v11_0_get_max_power_limit(smu);
else
*limit = smu->power_limit;
return 0;
}
+static int navi10_update_pcie_parameters(struct smu_context *smu,
+ uint32_t pcie_gen_cap,
+ uint32_t pcie_width_cap)
+{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ int ret, i;
+ uint32_t smu_pcie_arg;
+
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context;
+
+ for (i = 0; i < NUM_LINK_LEVELS; i++) {
+ smu_pcie_arg = (i << 16) |
+ ((pptable->PcieGenSpeed[i] <= pcie_gen_cap) ? (pptable->PcieGenSpeed[i] << 8) :
+ (pcie_gen_cap << 8)) | ((pptable->PcieLaneCount[i] <= pcie_width_cap) ?
+ pptable->PcieLaneCount[i] : pcie_width_cap);
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg);
+
+ if (ret)
+ return ret;
+
+ if (pptable->PcieGenSpeed[i] > pcie_gen_cap)
+ dpm_context->dpm_tables.pcie_table.pcie_gen[i] = pcie_gen_cap;
+ if (pptable->PcieLaneCount[i] > pcie_width_cap)
+ dpm_context->dpm_tables.pcie_table.pcie_lane[i] = pcie_width_cap;
+ }
+
+ return 0;
+}
+
+static inline void navi10_dump_od_table(OverDriveTable_t *od_table) {
+ pr_debug("OD: Gfxclk: (%d, %d)\n", od_table->GfxclkFmin, od_table->GfxclkFmax);
+ pr_debug("OD: Gfx1: (%d, %d)\n", od_table->GfxclkFreq1, od_table->GfxclkVolt1);
+ pr_debug("OD: Gfx2: (%d, %d)\n", od_table->GfxclkFreq2, od_table->GfxclkVolt2);
+ pr_debug("OD: Gfx3: (%d, %d)\n", od_table->GfxclkFreq3, od_table->GfxclkVolt3);
+ pr_debug("OD: UclkFmax: %d\n", od_table->UclkFmax);
+ pr_debug("OD: OverDrivePct: %d\n", od_table->OverDrivePct);
+}
+
+static int navi10_od_setting_check_range(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODSETTING_ID setting, uint32_t value)
+{
+ if (value < od_table->min[setting]) {
+ pr_warn("OD setting (%d, %d) is less than the minimum allowed (%d)\n", setting, value, od_table->min[setting]);
+ return -EINVAL;
+ }
+ if (value > od_table->max[setting]) {
+ pr_warn("OD setting (%d, %d) is greater than the maximum allowed (%d)\n", setting, value, od_table->max[setting]);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int navi10_setup_od_limits(struct smu_context *smu) {
+ struct smu_11_0_overdrive_table *overdrive_table = NULL;
+ struct smu_11_0_powerplay_table *powerplay_table = NULL;
+
+ if (!smu->smu_table.power_play_table) {
+ pr_err("powerplay table uninitialized!\n");
+ return -ENOENT;
+ }
+ powerplay_table = (struct smu_11_0_powerplay_table *)smu->smu_table.power_play_table;
+ overdrive_table = &powerplay_table->overdrive_table;
+ if (!smu->od_settings) {
+ smu->od_settings = kmemdup(overdrive_table, sizeof(struct smu_11_0_overdrive_table), GFP_KERNEL);
+ } else {
+ memcpy(smu->od_settings, overdrive_table, sizeof(struct smu_11_0_overdrive_table));
+ }
+ return 0;
+}
+
+static int navi10_set_default_od_settings(struct smu_context *smu, bool initialize) {
+ OverDriveTable_t *od_table;
+ int ret = 0;
+
+ ret = smu_v11_0_set_default_od_settings(smu, initialize, sizeof(OverDriveTable_t));
+ if (ret)
+ return ret;
+
+ if (initialize) {
+ ret = navi10_setup_od_limits(smu);
+ if (ret) {
+ pr_err("Failed to retrieve board OD limits\n");
+ return ret;
+ }
+
+ }
+
+ od_table = (OverDriveTable_t *)smu->smu_table.overdrive_table;
+ if (od_table) {
+ navi10_dump_od_table(od_table);
+ }
+
+ return ret;
+}
+
+static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, long input[], uint32_t size) {
+ int i;
+ int ret = 0;
+ struct smu_table_context *table_context = &smu->smu_table;
+ OverDriveTable_t *od_table;
+ struct smu_11_0_overdrive_table *od_settings;
+ enum SMU_11_0_ODSETTING_ID freq_setting, voltage_setting;
+ uint16_t *freq_ptr, *voltage_ptr;
+ od_table = (OverDriveTable_t *)table_context->overdrive_table;
+
+ if (!smu->od_enabled) {
+ pr_warn("OverDrive is not enabled!\n");
+ return -EINVAL;
+ }
+
+ if (!smu->od_settings) {
+ pr_err("OD board limits are not set!\n");
+ return -ENOENT;
+ }
+
+ od_settings = smu->od_settings;
+
+ switch (type) {
+ case PP_OD_EDIT_SCLK_VDDC_TABLE:
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_LIMITS)) {
+ pr_warn("GFXCLK_LIMITS not supported!\n");
+ return -ENOTSUPP;
+ }
+ if (!table_context->overdrive_table) {
+ pr_err("Overdrive is not initialized\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < size; i += 2) {
+ if (i + 2 > size) {
+ pr_info("invalid number of input parameters %d\n", size);
+ return -EINVAL;
+ }
+ switch (input[i]) {
+ case 0:
+ freq_setting = SMU_11_0_ODSETTING_GFXCLKFMIN;
+ freq_ptr = &od_table->GfxclkFmin;
+ if (input[i + 1] > od_table->GfxclkFmax) {
+ pr_info("GfxclkFmin (%ld) must be <= GfxclkFmax (%u)!\n",
+ input[i + 1],
+ od_table->GfxclkFmin);
+ return -EINVAL;
+ }
+ break;
+ case 1:
+ freq_setting = SMU_11_0_ODSETTING_GFXCLKFMAX;
+ freq_ptr = &od_table->GfxclkFmax;
+ if (input[i + 1] < od_table->GfxclkFmin) {
+ pr_info("GfxclkFmax (%ld) must be >= GfxclkFmin (%u)!\n",
+ input[i + 1],
+ od_table->GfxclkFmax);
+ return -EINVAL;
+ }
+ break;
+ default:
+ pr_info("Invalid SCLK_VDDC_TABLE index: %ld\n", input[i]);
+ pr_info("Supported indices: [0:min,1:max]\n");
+ return -EINVAL;
+ }
+ ret = navi10_od_setting_check_range(od_settings, freq_setting, input[i + 1]);
+ if (ret)
+ return ret;
+ *freq_ptr = input[i + 1];
+ }
+ break;
+ case PP_OD_EDIT_MCLK_VDDC_TABLE:
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_UCLK_MAX)) {
+ pr_warn("UCLK_MAX not supported!\n");
+ return -ENOTSUPP;
+ }
+ if (size < 2) {
+ pr_info("invalid number of parameters: %d\n", size);
+ return -EINVAL;
+ }
+ if (input[0] != 1) {
+ pr_info("Invalid MCLK_VDDC_TABLE index: %ld\n", input[0]);
+ pr_info("Supported indices: [1:max]\n");
+ return -EINVAL;
+ }
+ ret = navi10_od_setting_check_range(od_settings, SMU_11_0_ODSETTING_UCLKFMAX, input[1]);
+ if (ret)
+ return ret;
+ od_table->UclkFmax = input[1];
+ break;
+ case PP_OD_COMMIT_DPM_TABLE:
+ navi10_dump_od_table(od_table);
+ ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)od_table, true);
+ if (ret) {
+ pr_err("Failed to import overdrive table!\n");
+ return ret;
+ }
+ // no lock needed because smu_od_edit_dpm_table has it
+ ret = smu_handle_task(smu, smu->smu_dpm.dpm_level,
+ AMD_PP_TASK_READJUST_POWER_STATE,
+ false);
+ if (ret) {
+ return ret;
+ }
+ break;
+ case PP_OD_EDIT_VDDC_CURVE:
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_CURVE)) {
+ pr_warn("GFXCLK_CURVE not supported!\n");
+ return -ENOTSUPP;
+ }
+ if (size < 3) {
+ pr_info("invalid number of parameters: %d\n", size);
+ return -EINVAL;
+ }
+ if (!od_table) {
+ pr_info("Overdrive is not initialized\n");
+ return -EINVAL;
+ }
+
+ switch (input[0]) {
+ case 0:
+ freq_setting = SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P1;
+ voltage_setting = SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P1;
+ freq_ptr = &od_table->GfxclkFreq1;
+ voltage_ptr = &od_table->GfxclkVolt1;
+ break;
+ case 1:
+ freq_setting = SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P2;
+ voltage_setting = SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P2;
+ freq_ptr = &od_table->GfxclkFreq2;
+ voltage_ptr = &od_table->GfxclkVolt2;
+ break;
+ case 2:
+ freq_setting = SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P3;
+ voltage_setting = SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P3;
+ freq_ptr = &od_table->GfxclkFreq3;
+ voltage_ptr = &od_table->GfxclkVolt3;
+ break;
+ default:
+ pr_info("Invalid VDDC_CURVE index: %ld\n", input[0]);
+ pr_info("Supported indices: [0, 1, 2]\n");
+ return -EINVAL;
+ }
+ ret = navi10_od_setting_check_range(od_settings, freq_setting, input[1]);
+ if (ret)
+ return ret;
+ // Allow setting zero to disable the OverDrive VDDC curve
+ if (input[2] != 0) {
+ ret = navi10_od_setting_check_range(od_settings, voltage_setting, input[2]);
+ if (ret)
+ return ret;
+ *freq_ptr = input[1];
+ *voltage_ptr = ((uint16_t)input[2]) * NAVI10_VOLTAGE_SCALE;
+ pr_debug("OD: set curve %ld: (%d, %d)\n", input[0], *freq_ptr, *voltage_ptr);
+ } else {
+ // If setting 0, disable all voltage curve settings
+ od_table->GfxclkVolt1 = 0;
+ od_table->GfxclkVolt2 = 0;
+ od_table->GfxclkVolt3 = 0;
+ }
+ navi10_dump_od_table(od_table);
+ break;
+ default:
+ return -ENOSYS;
+ }
+ return ret;
+}
+
+static int navi10_run_btc(struct smu_context *smu)
+{
+ int ret = 0;
+
+ ret = smu_send_smc_msg(smu, SMU_MSG_RunBtc);
+ if (ret)
+ pr_err("RunBtc failed!\n");
+
+ return ret;
+}
+
static const struct pptable_funcs navi10_ppt_funcs = {
.tables_init = navi10_tables_init,
.alloc_dpm_context = navi10_allocate_dpm_context,
@@ -1627,12 +2035,63 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.get_thermal_temperature_range = navi10_get_thermal_temperature_range,
.display_disable_memory_clock_switch = navi10_display_disable_memory_clock_switch,
.get_power_limit = navi10_get_power_limit,
+ .update_pcie_parameters = navi10_update_pcie_parameters,
+ .init_microcode = smu_v11_0_init_microcode,
+ .load_microcode = smu_v11_0_load_microcode,
+ .init_smc_tables = smu_v11_0_init_smc_tables,
+ .fini_smc_tables = smu_v11_0_fini_smc_tables,
+ .init_power = smu_v11_0_init_power,
+ .fini_power = smu_v11_0_fini_power,
+ .check_fw_status = smu_v11_0_check_fw_status,
+ .setup_pptable = smu_v11_0_setup_pptable,
+ .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
+ .get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios,
+ .check_pptable = smu_v11_0_check_pptable,
+ .parse_pptable = smu_v11_0_parse_pptable,
+ .populate_smc_tables = smu_v11_0_populate_smc_pptable,
+ .check_fw_version = smu_v11_0_check_fw_version,
+ .write_pptable = smu_v11_0_write_pptable,
+ .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
+ .set_tool_table_location = smu_v11_0_set_tool_table_location,
+ .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
+ .system_features_control = smu_v11_0_system_features_control,
+ .send_smc_msg = smu_v11_0_send_msg,
+ .send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
+ .read_smc_arg = smu_v11_0_read_arg,
+ .init_display_count = smu_v11_0_init_display_count,
+ .set_allowed_mask = smu_v11_0_set_allowed_mask,
+ .get_enabled_mask = smu_v11_0_get_enabled_mask,
+ .notify_display_change = smu_v11_0_notify_display_change,
+ .set_power_limit = smu_v11_0_set_power_limit,
+ .get_current_clk_freq = smu_v11_0_get_current_clk_freq,
+ .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
+ .start_thermal_control = smu_v11_0_start_thermal_control,
+ .stop_thermal_control = smu_v11_0_stop_thermal_control,
+ .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk,
+ .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
+ .get_fan_control_mode = smu_v11_0_get_fan_control_mode,
+ .set_fan_control_mode = smu_v11_0_set_fan_control_mode,
+ .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
+ .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
+ .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
+ .gfx_off_control = smu_v11_0_gfx_off_control,
+ .register_irq_handler = smu_v11_0_register_irq_handler,
+ .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
+ .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
+ .baco_is_support= smu_v11_0_baco_is_support,
+ .baco_get_state = smu_v11_0_baco_get_state,
+ .baco_set_state = smu_v11_0_baco_set_state,
+ .baco_reset = smu_v11_0_baco_reset,
+ .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
+ .set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
+ .override_pcie_parameters = smu_v11_0_override_pcie_parameters,
+ .set_default_od_settings = navi10_set_default_od_settings,
+ .od_edit_dpm_table = navi10_od_edit_dpm_table,
+ .get_pptable_power_limit = navi10_get_pptable_power_limit,
+ .run_btc = navi10_run_btc,
};
void navi10_set_ppt_funcs(struct smu_context *smu)
{
- struct smu_table_context *smu_table = &smu->smu_table;
-
smu->ppt_funcs = &navi10_ppt_funcs;
- smu_table->table_count = TABLE_COUNT;
}
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h b/drivers/gpu/drm/amd/powerplay/navi10_ppt.h
index 620ff17c2fef..ec03c7992f6d 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.h
@@ -27,6 +27,17 @@
#define NAVI10_PEAK_SCLK_XT (1755)
#define NAVI10_PEAK_SCLK_XL (1625)
+#define NAVI14_UMD_PSTATE_PEAK_XT_GFXCLK (1670)
+#define NAVI14_UMD_PSTATE_PEAK_XTM_GFXCLK (1448)
+#define NAVI14_UMD_PSTATE_PEAK_XLM_GFXCLK (1181)
+#define NAVI14_UMD_PSTATE_PEAK_XTX_GFXCLK (1717)
+#define NAVI14_UMD_PSTATE_PEAK_XL_GFXCLK (1448)
+
+#define NAVI10_VOLTAGE_SCALE (4)
+
+#define smnPCIE_LC_SPEED_CNTL 0x11140290
+#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288
+
extern void navi10_set_ppt_funcs(struct smu_context *smu);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
index e62bfba51562..04daf7e9fe05 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
@@ -23,6 +23,7 @@
#include "amdgpu.h"
#include "amdgpu_smu.h"
+#include "smu_internal.h"
#include "soc15_common.h"
#include "smu_v12_0_ppsmc.h"
#include "smu12_driver_if.h"
@@ -160,21 +161,17 @@ static int renoir_tables_init(struct smu_context *smu, struct smu_table *tables)
* This interface just for getting uclk ultimate freq and should't introduce
* other likewise function result in overmuch callback.
*/
-static int renoir_get_dpm_uclk_limited(struct smu_context *smu, uint32_t *clock, bool max)
+static int renoir_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t dpm_level, uint32_t *freq)
{
+ DpmClocks_t *clk_table = smu->smu_table.clocks_table;
- DpmClocks_t *table = smu->smu_table.clocks_table;
-
- if (!clock || !table)
+ if (!clk_table || clk_type >= SMU_CLK_COUNT)
return -EINVAL;
- if (max)
- *clock = table->FClocks[NUM_FCLK_DPM_LEVELS-1].Freq;
- else
- *clock = table->FClocks[0].Freq;
+ GET_DPM_CUR_FREQ(clk_table, clk_type, dpm_level, *freq);
return 0;
-
}
static int renoir_print_clk_levels(struct smu_context *smu,
@@ -183,11 +180,13 @@ static int renoir_print_clk_levels(struct smu_context *smu,
int i, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0;
DpmClocks_t *clk_table = smu->smu_table.clocks_table;
- SmuMetrics_t metrics = {0};
+ SmuMetrics_t metrics;
if (!clk_table || clk_type >= SMU_CLK_COUNT)
return -EINVAL;
+ memset(&metrics, 0, sizeof(metrics));
+
ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
(void *)&metrics, false);
if (ret)
@@ -198,7 +197,7 @@ static int renoir_print_clk_levels(struct smu_context *smu,
case SMU_SCLK:
/* retirve table returned paramters unit is MHz */
cur_value = metrics.ClockFrequency[CLOCK_GFXCLK];
- ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, &min, &max);
+ ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, &min, &max, false);
if (!ret) {
/* driver only know min/max gfx_clk, Add level 1 for all other gfx clks */
if (cur_value == max)
@@ -246,20 +245,474 @@ static int renoir_print_clk_levels(struct smu_context *smu,
return size;
}
+static enum amd_pm_state_type renoir_get_current_power_state(struct smu_context *smu)
+{
+ enum amd_pm_state_type pm_type;
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+
+ if (!smu_dpm_ctx->dpm_context ||
+ !smu_dpm_ctx->dpm_current_power_state)
+ return -EINVAL;
+
+ switch (smu_dpm_ctx->dpm_current_power_state->classification.ui_label) {
+ case SMU_STATE_UI_LABEL_BATTERY:
+ pm_type = POWER_STATE_TYPE_BATTERY;
+ break;
+ case SMU_STATE_UI_LABEL_BALLANCED:
+ pm_type = POWER_STATE_TYPE_BALANCED;
+ break;
+ case SMU_STATE_UI_LABEL_PERFORMANCE:
+ pm_type = POWER_STATE_TYPE_PERFORMANCE;
+ break;
+ default:
+ if (smu_dpm_ctx->dpm_current_power_state->classification.flags & SMU_STATE_CLASSIFICATION_FLAG_BOOT)
+ pm_type = POWER_STATE_TYPE_INTERNAL_BOOT;
+ else
+ pm_type = POWER_STATE_TYPE_DEFAULT;
+ break;
+ }
+
+ return pm_type;
+}
+
+static int renoir_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
+{
+ struct smu_power_context *smu_power = &smu->smu_power;
+ struct smu_power_gate *power_gate = &smu_power->power_gate;
+ int ret = 0;
+
+ if (enable) {
+ /* vcn dpm on is a prerequisite for vcn power gate messages */
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0);
+ if (ret)
+ return ret;
+ }
+ power_gate->vcn_gated = false;
+ } else {
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
+ ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn);
+ if (ret)
+ return ret;
+ }
+ power_gate->vcn_gated = true;
+ }
+
+ return ret;
+}
+
+static int renoir_force_dpm_limit_value(struct smu_context *smu, bool highest)
+{
+ int ret = 0, i = 0;
+ uint32_t min_freq, max_freq, force_freq;
+ enum smu_clk_type clk_type;
+
+ enum smu_clk_type clks[] = {
+ SMU_GFXCLK,
+ SMU_MCLK,
+ SMU_SOCCLK,
+ };
+
+ for (i = 0; i < ARRAY_SIZE(clks); i++) {
+ clk_type = clks[i];
+ ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false);
+ if (ret)
+ return ret;
+
+ force_freq = highest ? max_freq : min_freq;
+ ret = smu_set_soft_freq_range(smu, clk_type, force_freq, force_freq);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int renoir_unforce_dpm_levels(struct smu_context *smu) {
+
+ int ret = 0, i = 0;
+ uint32_t min_freq, max_freq;
+ enum smu_clk_type clk_type;
+
+ struct clk_feature_map {
+ enum smu_clk_type clk_type;
+ uint32_t feature;
+ } clk_feature_map[] = {
+ {SMU_GFXCLK, SMU_FEATURE_DPM_GFXCLK_BIT},
+ {SMU_MCLK, SMU_FEATURE_DPM_UCLK_BIT},
+ {SMU_SOCCLK, SMU_FEATURE_DPM_SOCCLK_BIT},
+ };
+
+ for (i = 0; i < ARRAY_SIZE(clk_feature_map); i++) {
+ if (!smu_feature_is_enabled(smu, clk_feature_map[i].feature))
+ continue;
+
+ clk_type = clk_feature_map[i].clk_type;
+
+ ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false);
+ if (ret)
+ return ret;
+
+ ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int renoir_get_workload_type(struct smu_context *smu, uint32_t profile)
+{
+
+ uint32_t pplib_workload = 0;
+
+ switch (profile) {
+ case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
+ pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_CUSTOM:
+ pplib_workload = WORKLOAD_PPLIB_COUNT;
+ break;
+ case PP_SMC_POWER_PROFILE_VIDEO:
+ pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_VR:
+ pplib_workload = WORKLOAD_PPLIB_VR_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_COMPUTE:
+ pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return pplib_workload;
+}
+
+static int renoir_get_profiling_clk_mask(struct smu_context *smu,
+ enum amd_dpm_forced_level level,
+ uint32_t *sclk_mask,
+ uint32_t *mclk_mask,
+ uint32_t *soc_mask)
+{
+
+ if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
+ if (sclk_mask)
+ *sclk_mask = 0;
+ } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
+ if (mclk_mask)
+ *mclk_mask = 0;
+ } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ if(sclk_mask)
+ /* The sclk as gfxclk and has three level about max/min/current */
+ *sclk_mask = 3 - 1;
+
+ if(mclk_mask)
+ *mclk_mask = NUM_MEMCLK_DPM_LEVELS - 1;
+
+ if(soc_mask)
+ *soc_mask = NUM_SOCCLK_DPM_LEVELS - 1;
+ }
+
+ return 0;
+}
+
+/**
+ * This interface get dpm clock table for dc
+ */
+static int renoir_get_dpm_clock_table(struct smu_context *smu, struct dpm_clocks *clock_table)
+{
+ DpmClocks_t *table = smu->smu_table.clocks_table;
+ int i;
+
+ if (!clock_table || !table)
+ return -EINVAL;
+
+ for (i = 0; i < NUM_DCFCLK_DPM_LEVELS; i++) {
+ clock_table->DcfClocks[i].Freq = table->DcfClocks[i].Freq;
+ clock_table->DcfClocks[i].Vol = table->DcfClocks[i].Vol;
+ }
+
+ for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) {
+ clock_table->SocClocks[i].Freq = table->SocClocks[i].Freq;
+ clock_table->SocClocks[i].Vol = table->SocClocks[i].Vol;
+ }
+
+ for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) {
+ clock_table->FClocks[i].Freq = table->FClocks[i].Freq;
+ clock_table->FClocks[i].Vol = table->FClocks[i].Vol;
+ }
+
+ for (i = 0; i< NUM_MEMCLK_DPM_LEVELS; i++) {
+ clock_table->MemClocks[i].Freq = table->MemClocks[i].Freq;
+ clock_table->MemClocks[i].Vol = table->MemClocks[i].Vol;
+ }
+
+ return 0;
+}
+
+static int renoir_force_clk_levels(struct smu_context *smu,
+ enum smu_clk_type clk_type, uint32_t mask)
+{
+
+ int ret = 0 ;
+ uint32_t soft_min_level = 0, soft_max_level = 0, min_freq = 0, max_freq = 0;
+ DpmClocks_t *clk_table = smu->smu_table.clocks_table;
+
+ soft_min_level = mask ? (ffs(mask) - 1) : 0;
+ soft_max_level = mask ? (fls(mask) - 1) : 0;
+
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ if (soft_min_level > 2 || soft_max_level > 2) {
+ pr_info("Currently sclk only support 3 levels on APU\n");
+ return -EINVAL;
+ }
+
+ ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, &min_freq, &max_freq, false);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
+ soft_max_level == 0 ? min_freq :
+ soft_max_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : max_freq);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
+ soft_min_level == 2 ? max_freq :
+ soft_min_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : min_freq);
+ if (ret)
+ return ret;
+ break;
+ case SMU_SOCCLK:
+ GET_DPM_CUR_FREQ(clk_table, clk_type, soft_min_level, min_freq);
+ GET_DPM_CUR_FREQ(clk_table, clk_type, soft_max_level, max_freq);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max_freq);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min_freq);
+ if (ret)
+ return ret;
+ break;
+ case SMU_MCLK:
+ case SMU_FCLK:
+ GET_DPM_CUR_FREQ(clk_table, clk_type, soft_min_level, min_freq);
+ GET_DPM_CUR_FREQ(clk_table, clk_type, soft_max_level, max_freq);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max_freq);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min_freq);
+ if (ret)
+ return ret;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
+{
+ int workload_type, ret;
+ uint32_t profile_mode = input[size];
+
+ if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
+ pr_err("Invalid power profile mode %d\n", smu->power_profile_mode);
+ return -EINVAL;
+ }
+
+ /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
+ workload_type = smu_workload_get_type(smu, smu->power_profile_mode);
+ if (workload_type < 0) {
+ pr_err("Unsupported power profile mode %d on RENOIR\n",smu->power_profile_mode);
+ return -EINVAL;
+ }
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
+ 1 << workload_type);
+ if (ret) {
+ pr_err("Fail to set workload type %d\n", workload_type);
+ return ret;
+ }
+
+ smu->power_profile_mode = profile_mode;
+
+ return 0;
+}
+
+static int renoir_set_peak_clock_by_device(struct smu_context *smu)
+{
+ int ret = 0;
+ uint32_t sclk_freq = 0, uclk_freq = 0;
+
+ ret = smu_get_dpm_freq_range(smu, SMU_SCLK, NULL, &sclk_freq, false);
+ if (ret)
+ return ret;
+
+ ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq);
+ if (ret)
+ return ret;
+
+ ret = smu_get_dpm_freq_range(smu, SMU_UCLK, NULL, &uclk_freq, false);
+ if (ret)
+ return ret;
+
+ ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int renoir_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
+{
+ int ret = 0;
+
+ switch (level) {
+ case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+ ret = renoir_set_peak_clock_by_device(smu);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/* save watermark settings into pplib smu structure,
+ * also pass data to smu controller
+ */
+static int renoir_set_watermarks_table(
+ struct smu_context *smu,
+ void *watermarks,
+ struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
+{
+ int i;
+ int ret = 0;
+ Watermarks_t *table = watermarks;
+
+ if (!table || !clock_ranges)
+ return -EINVAL;
+
+ if (clock_ranges->num_wm_dmif_sets > 4 ||
+ clock_ranges->num_wm_mcif_sets > 4)
+ return -EINVAL;
+
+ /* save into smu->smu_table.tables[SMU_TABLE_WATERMARKS]->cpu_addr*/
+ for (i = 0; i < clock_ranges->num_wm_dmif_sets; i++) {
+ table->WatermarkRow[WM_DCFCLK][i].MinClock =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz));
+ table->WatermarkRow[WM_DCFCLK][i].MaxClock =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz));
+ table->WatermarkRow[WM_DCFCLK][i].MinMclk =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz));
+ table->WatermarkRow[WM_DCFCLK][i].MaxMclk =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz));
+ table->WatermarkRow[WM_DCFCLK][i].WmSetting = (uint8_t)
+ clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
+ }
+
+ for (i = 0; i < clock_ranges->num_wm_mcif_sets; i++) {
+ table->WatermarkRow[WM_SOCCLK][i].MinClock =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz));
+ table->WatermarkRow[WM_SOCCLK][i].MaxClock =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz));
+ table->WatermarkRow[WM_SOCCLK][i].MinMclk =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz));
+ table->WatermarkRow[WM_SOCCLK][i].MaxMclk =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz));
+ table->WatermarkRow[WM_SOCCLK][i].WmSetting = (uint8_t)
+ clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
+ }
+
+ /* pass data to smu controller */
+ ret = smu_write_watermarks_table(smu);
+
+ return ret;
+}
+
+static int renoir_get_power_profile_mode(struct smu_context *smu,
+ char *buf)
+{
+ static const char *profile_name[] = {
+ "BOOTUP_DEFAULT",
+ "3D_FULL_SCREEN",
+ "POWER_SAVING",
+ "VIDEO",
+ "VR",
+ "COMPUTE",
+ "CUSTOM"};
+ uint32_t i, size = 0;
+ int16_t workload_type = 0;
+
+ if (!smu->pm_enabled || !buf)
+ return -EINVAL;
+
+ for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
+ /*
+ * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT
+ * Not all profile modes are supported on arcturus.
+ */
+ workload_type = smu_workload_get_type(smu, i);
+ if (workload_type < 0)
+ continue;
+
+ size += sprintf(buf + size, "%2d %14s%s\n",
+ i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
+ }
+
+ return size;
+}
+
static const struct pptable_funcs renoir_ppt_funcs = {
.get_smu_msg_index = renoir_get_smu_msg_index,
.get_smu_table_index = renoir_get_smu_table_index,
.tables_init = renoir_tables_init,
.set_power_state = NULL,
- .get_dpm_uclk_limited = renoir_get_dpm_uclk_limited,
+ .get_dpm_clk_limited = renoir_get_dpm_clk_limited,
.print_clk_levels = renoir_print_clk_levels,
+ .get_current_power_state = renoir_get_current_power_state,
+ .dpm_set_uvd_enable = renoir_dpm_set_uvd_enable,
+ .force_dpm_limit_value = renoir_force_dpm_limit_value,
+ .unforce_dpm_levels = renoir_unforce_dpm_levels,
+ .get_workload_type = renoir_get_workload_type,
+ .get_profiling_clk_mask = renoir_get_profiling_clk_mask,
+ .force_clk_levels = renoir_force_clk_levels,
+ .set_power_profile_mode = renoir_set_power_profile_mode,
+ .set_performance_level = renoir_set_performance_level,
+ .get_dpm_clock_table = renoir_get_dpm_clock_table,
+ .set_watermarks_table = renoir_set_watermarks_table,
+ .get_power_profile_mode = renoir_get_power_profile_mode,
+ .check_fw_status = smu_v12_0_check_fw_status,
+ .check_fw_version = smu_v12_0_check_fw_version,
+ .powergate_sdma = smu_v12_0_powergate_sdma,
+ .powergate_vcn = smu_v12_0_powergate_vcn,
+ .send_smc_msg = smu_v12_0_send_msg,
+ .send_smc_msg_with_param = smu_v12_0_send_msg_with_param,
+ .read_smc_arg = smu_v12_0_read_arg,
+ .set_gfx_cgpg = smu_v12_0_set_gfx_cgpg,
+ .gfx_off_control = smu_v12_0_gfx_off_control,
+ .init_smc_tables = smu_v12_0_init_smc_tables,
+ .fini_smc_tables = smu_v12_0_fini_smc_tables,
+ .populate_smc_tables = smu_v12_0_populate_smc_tables,
+ .get_dpm_ultimate_freq = smu_v12_0_get_dpm_ultimate_freq,
+ .mode2_reset = smu_v12_0_mode2_reset,
+ .set_soft_freq_limited_range = smu_v12_0_set_soft_freq_limited_range,
};
void renoir_set_ppt_funcs(struct smu_context *smu)
{
- struct smu_table_context *smu_table = &smu->smu_table;
-
smu->ppt_funcs = &renoir_ppt_funcs;
smu->smc_if_version = SMU12_DRIVER_IF_VERSION;
- smu_table->table_count = TABLE_COUNT;
+ smu->is_apu = true;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smu_internal.h b/drivers/gpu/drm/amd/powerplay/smu_internal.h
new file mode 100644
index 000000000000..8bcda7871309
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smu_internal.h
@@ -0,0 +1,204 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __SMU_INTERNAL_H__
+#define __SMU_INTERNAL_H__
+
+#include "amdgpu_smu.h"
+
+#define smu_init_microcode(smu) \
+ ((smu)->ppt_funcs->init_microcode ? (smu)->ppt_funcs->init_microcode((smu)) : 0)
+#define smu_init_smc_tables(smu) \
+ ((smu)->ppt_funcs->init_smc_tables ? (smu)->ppt_funcs->init_smc_tables((smu)) : 0)
+#define smu_fini_smc_tables(smu) \
+ ((smu)->ppt_funcs->fini_smc_tables ? (smu)->ppt_funcs->fini_smc_tables((smu)) : 0)
+#define smu_init_power(smu) \
+ ((smu)->ppt_funcs->init_power ? (smu)->ppt_funcs->init_power((smu)) : 0)
+#define smu_fini_power(smu) \
+ ((smu)->ppt_funcs->fini_power ? (smu)->ppt_funcs->fini_power((smu)) : 0)
+
+#define smu_setup_pptable(smu) \
+ ((smu)->ppt_funcs->setup_pptable ? (smu)->ppt_funcs->setup_pptable((smu)) : 0)
+#define smu_powergate_sdma(smu, gate) \
+ ((smu)->ppt_funcs->powergate_sdma ? (smu)->ppt_funcs->powergate_sdma((smu), (gate)) : 0)
+#define smu_powergate_vcn(smu, gate) \
+ ((smu)->ppt_funcs->powergate_vcn ? (smu)->ppt_funcs->powergate_vcn((smu), (gate)) : 0)
+
+#define smu_get_vbios_bootup_values(smu) \
+ ((smu)->ppt_funcs->get_vbios_bootup_values ? (smu)->ppt_funcs->get_vbios_bootup_values((smu)) : 0)
+#define smu_get_clk_info_from_vbios(smu) \
+ ((smu)->ppt_funcs->get_clk_info_from_vbios ? (smu)->ppt_funcs->get_clk_info_from_vbios((smu)) : 0)
+#define smu_check_pptable(smu) \
+ ((smu)->ppt_funcs->check_pptable ? (smu)->ppt_funcs->check_pptable((smu)) : 0)
+#define smu_parse_pptable(smu) \
+ ((smu)->ppt_funcs->parse_pptable ? (smu)->ppt_funcs->parse_pptable((smu)) : 0)
+#define smu_populate_smc_tables(smu) \
+ ((smu)->ppt_funcs->populate_smc_tables ? (smu)->ppt_funcs->populate_smc_tables((smu)) : 0)
+#define smu_check_fw_version(smu) \
+ ((smu)->ppt_funcs->check_fw_version ? (smu)->ppt_funcs->check_fw_version((smu)) : 0)
+#define smu_write_pptable(smu) \
+ ((smu)->ppt_funcs->write_pptable ? (smu)->ppt_funcs->write_pptable((smu)) : 0)
+#define smu_set_min_dcef_deep_sleep(smu) \
+ ((smu)->ppt_funcs->set_min_dcef_deep_sleep ? (smu)->ppt_funcs->set_min_dcef_deep_sleep((smu)) : 0)
+#define smu_set_tool_table_location(smu) \
+ ((smu)->ppt_funcs->set_tool_table_location ? (smu)->ppt_funcs->set_tool_table_location((smu)) : 0)
+#define smu_notify_memory_pool_location(smu) \
+ ((smu)->ppt_funcs->notify_memory_pool_location ? (smu)->ppt_funcs->notify_memory_pool_location((smu)) : 0)
+#define smu_gfx_off_control(smu, enable) \
+ ((smu)->ppt_funcs->gfx_off_control ? (smu)->ppt_funcs->gfx_off_control((smu), (enable)) : 0)
+
+#define smu_set_last_dcef_min_deep_sleep_clk(smu) \
+ ((smu)->ppt_funcs->set_last_dcef_min_deep_sleep_clk ? (smu)->ppt_funcs->set_last_dcef_min_deep_sleep_clk((smu)) : 0)
+#define smu_system_features_control(smu, en) \
+ ((smu)->ppt_funcs->system_features_control ? (smu)->ppt_funcs->system_features_control((smu), (en)) : 0)
+#define smu_init_max_sustainable_clocks(smu) \
+ ((smu)->ppt_funcs->init_max_sustainable_clocks ? (smu)->ppt_funcs->init_max_sustainable_clocks((smu)) : 0)
+#define smu_set_default_od_settings(smu, initialize) \
+ ((smu)->ppt_funcs->set_default_od_settings ? (smu)->ppt_funcs->set_default_od_settings((smu), (initialize)) : 0)
+
+#define smu_send_smc_msg(smu, msg) \
+ ((smu)->ppt_funcs->send_smc_msg? (smu)->ppt_funcs->send_smc_msg((smu), (msg)) : 0)
+#define smu_send_smc_msg_with_param(smu, msg, param) \
+ ((smu)->ppt_funcs->send_smc_msg_with_param? (smu)->ppt_funcs->send_smc_msg_with_param((smu), (msg), (param)) : 0)
+#define smu_read_smc_arg(smu, arg) \
+ ((smu)->ppt_funcs->read_smc_arg? (smu)->ppt_funcs->read_smc_arg((smu), (arg)) : 0)
+#define smu_alloc_dpm_context(smu) \
+ ((smu)->ppt_funcs->alloc_dpm_context ? (smu)->ppt_funcs->alloc_dpm_context((smu)) : 0)
+#define smu_init_display_count(smu, count) \
+ ((smu)->ppt_funcs->init_display_count ? (smu)->ppt_funcs->init_display_count((smu), (count)) : 0)
+#define smu_feature_set_allowed_mask(smu) \
+ ((smu)->ppt_funcs->set_allowed_mask? (smu)->ppt_funcs->set_allowed_mask((smu)) : 0)
+#define smu_feature_get_enabled_mask(smu, mask, num) \
+ ((smu)->ppt_funcs->get_enabled_mask? (smu)->ppt_funcs->get_enabled_mask((smu), (mask), (num)) : 0)
+#define smu_is_dpm_running(smu) \
+ ((smu)->ppt_funcs->is_dpm_running ? (smu)->ppt_funcs->is_dpm_running((smu)) : 0)
+#define smu_notify_display_change(smu) \
+ ((smu)->ppt_funcs->notify_display_change? (smu)->ppt_funcs->notify_display_change((smu)) : 0)
+#define smu_store_powerplay_table(smu) \
+ ((smu)->ppt_funcs->store_powerplay_table ? (smu)->ppt_funcs->store_powerplay_table((smu)) : 0)
+#define smu_check_powerplay_table(smu) \
+ ((smu)->ppt_funcs->check_powerplay_table ? (smu)->ppt_funcs->check_powerplay_table((smu)) : 0)
+#define smu_append_powerplay_table(smu) \
+ ((smu)->ppt_funcs->append_powerplay_table ? (smu)->ppt_funcs->append_powerplay_table((smu)) : 0)
+#define smu_set_default_dpm_table(smu) \
+ ((smu)->ppt_funcs->set_default_dpm_table ? (smu)->ppt_funcs->set_default_dpm_table((smu)) : 0)
+#define smu_populate_umd_state_clk(smu) \
+ ((smu)->ppt_funcs->populate_umd_state_clk ? (smu)->ppt_funcs->populate_umd_state_clk((smu)) : 0)
+#define smu_set_default_od8_settings(smu) \
+ ((smu)->ppt_funcs->set_default_od8_settings ? (smu)->ppt_funcs->set_default_od8_settings((smu)) : 0)
+
+#define smu_get_current_clk_freq(smu, clk_id, value) \
+ ((smu)->ppt_funcs->get_current_clk_freq? (smu)->ppt_funcs->get_current_clk_freq((smu), (clk_id), (value)) : 0)
+
+#define smu_tables_init(smu, tab) \
+ ((smu)->ppt_funcs->tables_init ? (smu)->ppt_funcs->tables_init((smu), (tab)) : 0)
+#define smu_set_thermal_fan_table(smu) \
+ ((smu)->ppt_funcs->set_thermal_fan_table ? (smu)->ppt_funcs->set_thermal_fan_table((smu)) : 0)
+#define smu_start_thermal_control(smu) \
+ ((smu)->ppt_funcs->start_thermal_control? (smu)->ppt_funcs->start_thermal_control((smu)) : 0)
+#define smu_stop_thermal_control(smu) \
+ ((smu)->ppt_funcs->stop_thermal_control? (smu)->ppt_funcs->stop_thermal_control((smu)) : 0)
+
+#define smu_smc_read_sensor(smu, sensor, data, size) \
+ ((smu)->ppt_funcs->read_sensor? (smu)->ppt_funcs->read_sensor((smu), (sensor), (data), (size)) : -EINVAL)
+
+#define smu_pre_display_config_changed(smu) \
+ ((smu)->ppt_funcs->pre_display_config_changed ? (smu)->ppt_funcs->pre_display_config_changed((smu)) : 0)
+#define smu_display_config_changed(smu) \
+ ((smu)->ppt_funcs->display_config_changed ? (smu)->ppt_funcs->display_config_changed((smu)) : 0)
+#define smu_apply_clocks_adjust_rules(smu) \
+ ((smu)->ppt_funcs->apply_clocks_adjust_rules ? (smu)->ppt_funcs->apply_clocks_adjust_rules((smu)) : 0)
+#define smu_notify_smc_dispaly_config(smu) \
+ ((smu)->ppt_funcs->notify_smc_dispaly_config ? (smu)->ppt_funcs->notify_smc_dispaly_config((smu)) : 0)
+#define smu_force_dpm_limit_value(smu, highest) \
+ ((smu)->ppt_funcs->force_dpm_limit_value ? (smu)->ppt_funcs->force_dpm_limit_value((smu), (highest)) : 0)
+#define smu_unforce_dpm_levels(smu) \
+ ((smu)->ppt_funcs->unforce_dpm_levels ? (smu)->ppt_funcs->unforce_dpm_levels((smu)) : 0)
+#define smu_get_profiling_clk_mask(smu, level, sclk_mask, mclk_mask, soc_mask) \
+ ((smu)->ppt_funcs->get_profiling_clk_mask ? (smu)->ppt_funcs->get_profiling_clk_mask((smu), (level), (sclk_mask), (mclk_mask), (soc_mask)) : 0)
+#define smu_set_cpu_power_state(smu) \
+ ((smu)->ppt_funcs->set_cpu_power_state ? (smu)->ppt_funcs->set_cpu_power_state((smu)) : 0)
+
+#define smu_msg_get_index(smu, msg) \
+ ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_msg_index? (smu)->ppt_funcs->get_smu_msg_index((smu), (msg)) : -EINVAL) : -EINVAL)
+#define smu_clk_get_index(smu, msg) \
+ ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_clk_index? (smu)->ppt_funcs->get_smu_clk_index((smu), (msg)) : -EINVAL) : -EINVAL)
+#define smu_feature_get_index(smu, msg) \
+ ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_feature_index? (smu)->ppt_funcs->get_smu_feature_index((smu), (msg)) : -EINVAL) : -EINVAL)
+#define smu_table_get_index(smu, tab) \
+ ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_table_index? (smu)->ppt_funcs->get_smu_table_index((smu), (tab)) : -EINVAL) : -EINVAL)
+#define smu_power_get_index(smu, src) \
+ ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_power_index? (smu)->ppt_funcs->get_smu_power_index((smu), (src)) : -EINVAL) : -EINVAL)
+#define smu_workload_get_type(smu, profile) \
+ ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_workload_type? (smu)->ppt_funcs->get_workload_type((smu), (profile)) : -EINVAL) : -EINVAL)
+#define smu_run_btc(smu) \
+ ((smu)->ppt_funcs? ((smu)->ppt_funcs->run_btc? (smu)->ppt_funcs->run_btc((smu)) : 0) : 0)
+#define smu_get_allowed_feature_mask(smu, feature_mask, num) \
+ ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_allowed_feature_mask? (smu)->ppt_funcs->get_allowed_feature_mask((smu), (feature_mask), (num)) : 0) : 0)
+
+
+#define smu_store_cc6_data(smu, st, cc6_dis, pst_dis, pst_sw_dis) \
+ ((smu)->ppt_funcs->store_cc6_data ? (smu)->ppt_funcs->store_cc6_data((smu), (st), (cc6_dis), (pst_dis), (pst_sw_dis)) : 0)
+
+#define smu_get_dal_power_level(smu, clocks) \
+ ((smu)->ppt_funcs->get_dal_power_level ? (smu)->ppt_funcs->get_dal_power_level((smu), (clocks)) : 0)
+#define smu_get_perf_level(smu, designation, level) \
+ ((smu)->ppt_funcs->get_perf_level ? (smu)->ppt_funcs->get_perf_level((smu), (designation), (level)) : 0)
+#define smu_get_current_shallow_sleep_clocks(smu, clocks) \
+ ((smu)->ppt_funcs->get_current_shallow_sleep_clocks ? (smu)->ppt_funcs->get_current_shallow_sleep_clocks((smu), (clocks)) : 0)
+
+#define smu_dpm_set_uvd_enable(smu, enable) \
+ ((smu)->ppt_funcs->dpm_set_uvd_enable ? (smu)->ppt_funcs->dpm_set_uvd_enable((smu), (enable)) : 0)
+#define smu_dpm_set_vce_enable(smu, enable) \
+ ((smu)->ppt_funcs->dpm_set_vce_enable ? (smu)->ppt_funcs->dpm_set_vce_enable((smu), (enable)) : 0)
+
+#define smu_set_watermarks_table(smu, tab, clock_ranges) \
+ ((smu)->ppt_funcs->set_watermarks_table ? (smu)->ppt_funcs->set_watermarks_table((smu), (tab), (clock_ranges)) : 0)
+#define smu_get_current_clk_freq_by_table(smu, clk_type, value) \
+ ((smu)->ppt_funcs->get_current_clk_freq_by_table ? (smu)->ppt_funcs->get_current_clk_freq_by_table((smu), (clk_type), (value)) : 0)
+#define smu_thermal_temperature_range_update(smu, range, rw) \
+ ((smu)->ppt_funcs->thermal_temperature_range_update? (smu)->ppt_funcs->thermal_temperature_range_update((smu), (range), (rw)) : 0)
+#define smu_get_thermal_temperature_range(smu, range) \
+ ((smu)->ppt_funcs->get_thermal_temperature_range? (smu)->ppt_funcs->get_thermal_temperature_range((smu), (range)) : 0)
+#define smu_register_irq_handler(smu) \
+ ((smu)->ppt_funcs->register_irq_handler ? (smu)->ppt_funcs->register_irq_handler(smu) : 0)
+
+#define smu_get_dpm_ultimate_freq(smu, param, min, max) \
+ ((smu)->ppt_funcs->get_dpm_ultimate_freq ? (smu)->ppt_funcs->get_dpm_ultimate_freq((smu), (param), (min), (max)) : 0)
+
+#define smu_asic_set_performance_level(smu, level) \
+ ((smu)->ppt_funcs->set_performance_level? (smu)->ppt_funcs->set_performance_level((smu), (level)) : -EINVAL);
+#define smu_dump_pptable(smu) \
+ ((smu)->ppt_funcs->dump_pptable ? (smu)->ppt_funcs->dump_pptable((smu)) : 0)
+#define smu_get_dpm_clk_limited(smu, clk_type, dpm_level, freq) \
+ ((smu)->ppt_funcs->get_dpm_clk_limited ? (smu)->ppt_funcs->get_dpm_clk_limited((smu), (clk_type), (dpm_level), (freq)) : -EINVAL)
+
+#define smu_set_soft_freq_limited_range(smu, clk_type, min, max) \
+ ((smu)->ppt_funcs->set_soft_freq_limited_range ? (smu)->ppt_funcs->set_soft_freq_limited_range((smu), (clk_type), (min), (max)) : -EINVAL)
+
+#define smu_override_pcie_parameters(smu) \
+ ((smu)->ppt_funcs->override_pcie_parameters ? (smu)->ppt_funcs->override_pcie_parameters((smu)) : 0)
+
+#define smu_update_pcie_parameters(smu, pcie_gen_cap, pcie_width_cap) \
+ ((smu)->ppt_funcs->update_pcie_parameters ? (smu)->ppt_funcs->update_pcie_parameters((smu), (pcie_gen_cap), (pcie_width_cap)) : 0)
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index c5257ae3188a..fc9679ea2368 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -24,17 +24,19 @@
#include <linux/module.h>
#include <linux/pci.h>
+#define SMU_11_0_PARTIAL_PPTABLE
+
#include "pp_debug.h"
#include "amdgpu.h"
#include "amdgpu_smu.h"
+#include "smu_internal.h"
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
#include "smu_v11_0.h"
+#include "smu_v11_0_pptable.h"
#include "soc15_common.h"
#include "atom.h"
-#include "vega20_ppt.h"
-#include "arcturus_ppt.h"
-#include "navi10_ppt.h"
+#include "amd_pcie.h"
#include "asic_reg/thm/thm_11_0_2_offset.h"
#include "asic_reg/thm/thm_11_0_2_sh_mask.h"
@@ -61,7 +63,7 @@ static int smu_v11_0_send_msg_without_waiting(struct smu_context *smu,
return 0;
}
-static int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg)
+int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg)
{
struct amdgpu_device *adev = smu->adev;
@@ -88,7 +90,7 @@ static int smu_v11_0_wait_for_response(struct smu_context *smu)
return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
}
-static int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg)
+int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg)
{
struct amdgpu_device *adev = smu->adev;
int ret = 0, index = 0;
@@ -113,7 +115,7 @@ static int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg)
}
-static int
+int
smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
uint32_t param)
{
@@ -144,7 +146,7 @@ smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
return ret;
}
-static int smu_v11_0_init_microcode(struct smu_context *smu)
+int smu_v11_0_init_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
const char *chip_name;
@@ -206,7 +208,7 @@ out:
return err;
}
-static int smu_v11_0_load_microcode(struct smu_context *smu)
+int smu_v11_0_load_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
const uint32_t *src;
@@ -244,7 +246,7 @@ static int smu_v11_0_load_microcode(struct smu_context *smu)
return 0;
}
-static int smu_v11_0_check_fw_status(struct smu_context *smu)
+int smu_v11_0_check_fw_status(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
uint32_t mp1_fw_flags;
@@ -259,7 +261,7 @@ static int smu_v11_0_check_fw_status(struct smu_context *smu)
return -EIO;
}
-static int smu_v11_0_check_fw_version(struct smu_context *smu)
+int smu_v11_0_check_fw_version(struct smu_context *smu)
{
uint32_t if_version = 0xff, smu_version = 0xff;
uint16_t smu_major;
@@ -354,7 +356,7 @@ static int smu_v11_0_set_pptable_v2_1(struct smu_context *smu, void **table,
return 0;
}
-static int smu_v11_0_setup_pptable(struct smu_context *smu)
+int smu_v11_0_setup_pptable(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
const struct smc_firmware_header_v1_0 *hdr;
@@ -369,6 +371,7 @@ static int smu_v11_0_setup_pptable(struct smu_context *smu)
version_major = le16_to_cpu(hdr->header.header_version_major);
version_minor = le16_to_cpu(hdr->header.header_version_minor);
if (version_major == 2 && smu->smu_table.boot_values.pp_table_id > 0) {
+ pr_info("use driver provided pptable %d\n", smu->smu_table.boot_values.pp_table_id);
switch (version_minor) {
case 0:
ret = smu_v11_0_set_pptable_v2_0(smu, &table, &size);
@@ -385,6 +388,7 @@ static int smu_v11_0_setup_pptable(struct smu_context *smu)
return ret;
} else {
+ pr_info("use vbios provided pptable\n");
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
powerplayinfo);
@@ -433,13 +437,13 @@ static int smu_v11_0_fini_dpm_context(struct smu_context *smu)
return 0;
}
-static int smu_v11_0_init_smc_tables(struct smu_context *smu)
+int smu_v11_0_init_smc_tables(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *tables = NULL;
int ret = 0;
- if (smu_table->tables || smu_table->table_count == 0)
+ if (smu_table->tables)
return -EINVAL;
tables = kcalloc(SMU_TABLE_COUNT, sizeof(struct smu_table),
@@ -460,18 +464,17 @@ static int smu_v11_0_init_smc_tables(struct smu_context *smu)
return 0;
}
-static int smu_v11_0_fini_smc_tables(struct smu_context *smu)
+int smu_v11_0_fini_smc_tables(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
int ret = 0;
- if (!smu_table->tables || smu_table->table_count == 0)
+ if (!smu_table->tables)
return -EINVAL;
kfree(smu_table->tables);
kfree(smu_table->metrics_table);
smu_table->tables = NULL;
- smu_table->table_count = 0;
smu_table->metrics_table = NULL;
smu_table->metrics_time = 0;
@@ -481,7 +484,7 @@ static int smu_v11_0_fini_smc_tables(struct smu_context *smu)
return 0;
}
-static int smu_v11_0_init_power(struct smu_context *smu)
+int smu_v11_0_init_power(struct smu_context *smu)
{
struct smu_power_context *smu_power = &smu->smu_power;
@@ -499,7 +502,7 @@ static int smu_v11_0_init_power(struct smu_context *smu)
return 0;
}
-static int smu_v11_0_fini_power(struct smu_context *smu)
+int smu_v11_0_fini_power(struct smu_context *smu)
{
struct smu_power_context *smu_power = &smu->smu_power;
@@ -576,7 +579,7 @@ int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu)
return 0;
}
-static int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu)
+int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu)
{
int ret, index;
struct amdgpu_device *adev = smu->adev;
@@ -673,7 +676,7 @@ static int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu)
return 0;
}
-static int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
+int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *memory_pool = &smu_table->memory_pool;
@@ -719,7 +722,7 @@ static int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
return ret;
}
-static int smu_v11_0_check_pptable(struct smu_context *smu)
+int smu_v11_0_check_pptable(struct smu_context *smu)
{
int ret;
@@ -727,7 +730,7 @@ static int smu_v11_0_check_pptable(struct smu_context *smu)
return ret;
}
-static int smu_v11_0_parse_pptable(struct smu_context *smu)
+int smu_v11_0_parse_pptable(struct smu_context *smu)
{
int ret;
@@ -751,7 +754,7 @@ static int smu_v11_0_parse_pptable(struct smu_context *smu)
return ret;
}
-static int smu_v11_0_populate_smc_pptable(struct smu_context *smu)
+int smu_v11_0_populate_smc_pptable(struct smu_context *smu)
{
int ret;
@@ -760,7 +763,7 @@ static int smu_v11_0_populate_smc_pptable(struct smu_context *smu)
return ret;
}
-static int smu_v11_0_write_pptable(struct smu_context *smu)
+int smu_v11_0_write_pptable(struct smu_context *smu)
{
struct smu_table_context *table_context = &smu->smu_table;
int ret = 0;
@@ -771,24 +774,7 @@ static int smu_v11_0_write_pptable(struct smu_context *smu)
return ret;
}
-static int smu_v11_0_write_watermarks_table(struct smu_context *smu)
-{
- int ret = 0;
- struct smu_table_context *smu_table = &smu->smu_table;
- struct smu_table *table = NULL;
-
- table = &smu_table->tables[SMU_TABLE_WATERMARKS];
-
- if (!table->cpu_addr)
- return -EINVAL;
-
- ret = smu_update_table(smu, SMU_TABLE_WATERMARKS, 0, table->cpu_addr,
- true);
-
- return ret;
-}
-
-static int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
+int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
{
int ret;
@@ -800,7 +786,7 @@ static int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t cl
return ret;
}
-static int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu)
+int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu)
{
struct smu_table_context *table_context = &smu->smu_table;
@@ -809,11 +795,10 @@ static int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu)
if (!table_context)
return -EINVAL;
- return smu_set_deep_sleep_dcefclk(smu,
- table_context->boot_values.dcefclk / 100);
+ return smu_v11_0_set_deep_sleep_dcefclk(smu, table_context->boot_values.dcefclk / 100);
}
-static int smu_v11_0_set_tool_table_location(struct smu_context *smu)
+int smu_v11_0_set_tool_table_location(struct smu_context *smu)
{
int ret = 0;
struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG];
@@ -831,7 +816,7 @@ static int smu_v11_0_set_tool_table_location(struct smu_context *smu)
return ret;
}
-static int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
+int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
{
int ret = 0;
@@ -843,7 +828,7 @@ static int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
}
-static int smu_v11_0_set_allowed_mask(struct smu_context *smu)
+int smu_v11_0_set_allowed_mask(struct smu_context *smu)
{
struct smu_feature *feature = &smu->smu_feature;
int ret = 0;
@@ -870,7 +855,7 @@ failed:
return ret;
}
-static int smu_v11_0_get_enabled_mask(struct smu_context *smu,
+int smu_v11_0_get_enabled_mask(struct smu_context *smu,
uint32_t *feature_mask, uint32_t num)
{
uint32_t feature_mask_high = 0, feature_mask_low = 0;
@@ -899,7 +884,7 @@ static int smu_v11_0_get_enabled_mask(struct smu_context *smu,
return ret;
}
-static int smu_v11_0_system_features_control(struct smu_context *smu,
+int smu_v11_0_system_features_control(struct smu_context *smu,
bool en)
{
struct smu_feature *feature = &smu->smu_feature;
@@ -925,7 +910,7 @@ static int smu_v11_0_system_features_control(struct smu_context *smu,
return ret;
}
-static int smu_v11_0_notify_display_change(struct smu_context *smu)
+int smu_v11_0_notify_display_change(struct smu_context *smu)
{
int ret = 0;
@@ -983,7 +968,7 @@ smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
return ret;
}
-static int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
+int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
{
struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks;
int ret = 0;
@@ -1063,13 +1048,44 @@ static int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
return 0;
}
-static int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
+uint32_t smu_v11_0_get_max_power_limit(struct smu_context *smu) {
+ uint32_t od_limit, max_power_limit;
+ struct smu_11_0_powerplay_table *powerplay_table = NULL;
+ struct smu_table_context *table_context = &smu->smu_table;
+ powerplay_table = table_context->power_play_table;
+
+ max_power_limit = smu_get_pptable_power_limit(smu);
+
+ if (!max_power_limit) {
+ // If we couldn't get the table limit, fall back on first-read value
+ if (!smu->default_power_limit)
+ smu->default_power_limit = smu->power_limit;
+ max_power_limit = smu->default_power_limit;
+ }
+
+ if (smu->od_enabled) {
+ od_limit = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
+
+ pr_debug("ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_limit, smu->default_power_limit);
+
+ max_power_limit *= (100 + od_limit);
+ max_power_limit /= 100;
+ }
+
+ return max_power_limit;
+}
+
+int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
{
int ret = 0;
+ uint32_t max_power_limit;
- if (n > smu->default_power_limit) {
- pr_err("New power limit is over the max allowed %d\n",
- smu->default_power_limit);
+ max_power_limit = smu_v11_0_get_max_power_limit(smu);
+
+ if (n > max_power_limit) {
+ pr_err("New power limit (%d) is over the max allowed %d\n",
+ n,
+ max_power_limit);
return -EINVAL;
}
@@ -1091,7 +1107,7 @@ static int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
return 0;
}
-static int smu_v11_0_get_current_clk_freq(struct smu_context *smu,
+int smu_v11_0_get_current_clk_freq(struct smu_context *smu,
enum smu_clk_type clk_id,
uint32_t *value)
{
@@ -1170,7 +1186,7 @@ static int smu_v11_0_enable_thermal_alert(struct smu_context *smu)
return 0;
}
-static int smu_v11_0_start_thermal_control(struct smu_context *smu)
+int smu_v11_0_start_thermal_control(struct smu_context *smu)
{
int ret = 0;
struct smu_temperature_range range;
@@ -1212,6 +1228,15 @@ static int smu_v11_0_start_thermal_control(struct smu_context *smu)
return ret;
}
+int smu_v11_0_stop_thermal_control(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, 0);
+
+ return 0;
+}
+
static uint16_t convert_to_vddc(uint8_t vid)
{
return (uint16_t) ((6200 - (vid * 25)) / SMU11_VOLTAGE_SCALE);
@@ -1236,7 +1261,7 @@ static int smu_v11_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value)
}
-static int smu_v11_0_read_sensor(struct smu_context *smu,
+int smu_v11_0_read_sensor(struct smu_context *smu,
enum amd_pp_sensors sensor,
void *data, uint32_t *size)
{
@@ -1273,7 +1298,7 @@ static int smu_v11_0_read_sensor(struct smu_context *smu,
return ret;
}
-static int
+int
smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
struct pp_display_clock_request
*clock_req)
@@ -1316,9 +1341,7 @@ smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
if (clk_select == SMU_UCLK && smu->disable_uclk_switch)
return 0;
- mutex_lock(&smu->mutex);
ret = smu_set_hard_freq_range(smu, clk_select, clk_freq, 0);
- mutex_unlock(&smu->mutex);
if(clk_select == SMU_UCLK)
smu->hard_min_uclk_req_from_dal = clk_freq;
@@ -1328,27 +1351,7 @@ failed:
return ret;
}
-static int
-smu_v11_0_set_watermarks_for_clock_ranges(struct smu_context *smu, struct
- dm_pp_wm_sets_with_clock_ranges_soc15
- *clock_ranges)
-{
- int ret = 0;
- struct smu_table *watermarks = &smu->smu_table.tables[SMU_TABLE_WATERMARKS];
- void *table = watermarks->cpu_addr;
-
- if (!smu->disable_watermark &&
- smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
- smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
- smu_set_watermarks_table(smu, table, clock_ranges);
- smu->watermarks_bitmap |= WATERMARKS_EXIST;
- smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
- }
-
- return ret;
-}
-
-static int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
+int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
{
int ret = 0;
struct amdgpu_device *adev = smu->adev;
@@ -1361,12 +1364,10 @@ static int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
case CHIP_NAVI12:
if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
return 0;
- mutex_lock(&smu->mutex);
if (enable)
ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff);
else
ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff);
- mutex_unlock(&smu->mutex);
break;
default:
break;
@@ -1375,7 +1376,7 @@ static int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
return ret;
}
-static uint32_t
+uint32_t
smu_v11_0_get_fan_control_mode(struct smu_context *smu)
{
if (!smu_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT))
@@ -1415,7 +1416,7 @@ smu_v11_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode)
return 0;
}
-static int
+int
smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
{
struct amdgpu_device *adev = smu->adev;
@@ -1444,7 +1445,7 @@ smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC);
}
-static int
+int
smu_v11_0_set_fan_control_mode(struct smu_context *smu,
uint32_t mode)
{
@@ -1472,7 +1473,7 @@ smu_v11_0_set_fan_control_mode(struct smu_context *smu,
return ret;
}
-static int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
+int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
uint32_t speed)
{
struct amdgpu_device *adev = smu->adev;
@@ -1482,10 +1483,9 @@ static int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
if (!speed)
return -EINVAL;
- mutex_lock(&(smu->mutex));
ret = smu_v11_0_auto_fan_control(smu, 0);
if (ret)
- goto set_fan_speed_rpm_failed;
+ return ret;
crystal_clock_freq = amdgpu_asic_get_xclk(adev);
tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
@@ -1496,23 +1496,16 @@ static int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
ret = smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM);
-set_fan_speed_rpm_failed:
- mutex_unlock(&(smu->mutex));
return ret;
}
-#define XGMI_STATE_D0 1
-#define XGMI_STATE_D3 0
-
-static int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
+int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
uint32_t pstate)
{
int ret = 0;
- mutex_lock(&(smu->mutex));
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetXgmiMode,
- pstate ? XGMI_STATE_D0 : XGMI_STATE_D3);
- mutex_unlock(&(smu->mutex));
+ pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3);
return ret;
}
@@ -1559,7 +1552,7 @@ static const struct amdgpu_irq_src_funcs smu_v11_0_irq_funcs =
.process = smu_v11_0_irq_process,
};
-static int smu_v11_0_register_irq_handler(struct smu_context *smu)
+int smu_v11_0_register_irq_handler(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
struct amdgpu_irq_src *irq_src = smu->irq_source;
@@ -1591,7 +1584,7 @@ static int smu_v11_0_register_irq_handler(struct smu_context *smu)
return ret;
}
-static int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
+int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
struct pp_smu_nv_clock_table *max_clocks)
{
struct smu_table_context *table_context = &smu->smu_table;
@@ -1621,13 +1614,11 @@ static int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
return 0;
}
-static int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu)
+int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu)
{
int ret = 0;
- mutex_lock(&smu->mutex);
ret = smu_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME);
- mutex_unlock(&smu->mutex);
return ret;
}
@@ -1637,7 +1628,7 @@ static int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_v
return smu_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq);
}
-static bool smu_v11_0_baco_is_support(struct smu_context *smu)
+bool smu_v11_0_baco_is_support(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
struct smu_baco_context *smu_baco = &smu->smu_baco;
@@ -1661,7 +1652,7 @@ static bool smu_v11_0_baco_is_support(struct smu_context *smu)
return false;
}
-static enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu)
+enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu)
{
struct smu_baco_context *smu_baco = &smu->smu_baco;
enum smu_baco_state baco_state;
@@ -1673,7 +1664,7 @@ static enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu)
return baco_state;
}
-static int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
+int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
{
struct smu_baco_context *smu_baco = &smu->smu_baco;
@@ -1697,7 +1688,7 @@ out:
return ret;
}
-static int smu_v11_0_baco_reset(struct smu_context *smu)
+int smu_v11_0_baco_reset(struct smu_context *smu)
{
int ret = 0;
@@ -1718,13 +1709,12 @@ static int smu_v11_0_baco_reset(struct smu_context *smu)
return ret;
}
-static int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
+int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t *min, uint32_t *max)
{
int ret = 0, clk_id = 0;
uint32_t param = 0;
- mutex_lock(&smu->mutex);
clk_id = smu_clk_get_index(smu, clk_type);
if (clk_id < 0) {
ret = -EINVAL;
@@ -1751,80 +1741,102 @@ static int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk
}
failed:
- mutex_unlock(&smu->mutex);
return ret;
}
-static const struct smu_funcs smu_v11_0_funcs = {
- .init_microcode = smu_v11_0_init_microcode,
- .load_microcode = smu_v11_0_load_microcode,
- .check_fw_status = smu_v11_0_check_fw_status,
- .check_fw_version = smu_v11_0_check_fw_version,
- .send_smc_msg = smu_v11_0_send_msg,
- .send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
- .read_smc_arg = smu_v11_0_read_arg,
- .setup_pptable = smu_v11_0_setup_pptable,
- .init_smc_tables = smu_v11_0_init_smc_tables,
- .fini_smc_tables = smu_v11_0_fini_smc_tables,
- .init_power = smu_v11_0_init_power,
- .fini_power = smu_v11_0_fini_power,
- .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
- .get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios,
- .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
- .check_pptable = smu_v11_0_check_pptable,
- .parse_pptable = smu_v11_0_parse_pptable,
- .populate_smc_tables = smu_v11_0_populate_smc_pptable,
- .write_pptable = smu_v11_0_write_pptable,
- .write_watermarks_table = smu_v11_0_write_watermarks_table,
- .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
- .set_tool_table_location = smu_v11_0_set_tool_table_location,
- .init_display_count = smu_v11_0_init_display_count,
- .set_allowed_mask = smu_v11_0_set_allowed_mask,
- .get_enabled_mask = smu_v11_0_get_enabled_mask,
- .system_features_control = smu_v11_0_system_features_control,
- .notify_display_change = smu_v11_0_notify_display_change,
- .set_power_limit = smu_v11_0_set_power_limit,
- .get_current_clk_freq = smu_v11_0_get_current_clk_freq,
- .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
- .start_thermal_control = smu_v11_0_start_thermal_control,
- .read_sensor = smu_v11_0_read_sensor,
- .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk,
- .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
- .set_watermarks_for_clock_ranges = smu_v11_0_set_watermarks_for_clock_ranges,
- .get_fan_control_mode = smu_v11_0_get_fan_control_mode,
- .set_fan_control_mode = smu_v11_0_set_fan_control_mode,
- .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
- .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
- .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
- .gfx_off_control = smu_v11_0_gfx_off_control,
- .register_irq_handler = smu_v11_0_register_irq_handler,
- .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
- .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
- .baco_is_support = smu_v11_0_baco_is_support,
- .baco_get_state = smu_v11_0_baco_get_state,
- .baco_set_state = smu_v11_0_baco_set_state,
- .baco_reset = smu_v11_0_baco_reset,
- .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
-};
+int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t min, uint32_t max)
+{
+ int ret = 0, clk_id = 0;
+ uint32_t param;
-void smu_v11_0_set_smu_funcs(struct smu_context *smu)
+ clk_id = smu_clk_get_index(smu, clk_type);
+ if (clk_id < 0)
+ return clk_id;
+
+ if (max > 0) {
+ param = (uint32_t)((clk_id << 16) | (max & 0xffff));
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
+ param);
+ if (ret)
+ return ret;
+ }
+
+ if (min > 0) {
+ param = (uint32_t)((clk_id << 16) | (min & 0xffff));
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
+ param);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+int smu_v11_0_override_pcie_parameters(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
+ uint32_t pcie_gen = 0, pcie_width = 0;
+ int ret;
- smu->funcs = &smu_v11_0_funcs;
- switch (adev->asic_type) {
- case CHIP_VEGA20:
- vega20_set_ppt_funcs(smu);
- break;
- case CHIP_ARCTURUS:
- arcturus_set_ppt_funcs(smu);
- break;
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_NAVI12:
- navi10_set_ppt_funcs(smu);
- break;
- default:
- pr_warn("Unknown asic for smu11\n");
+ if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
+ pcie_gen = 3;
+ else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
+ pcie_gen = 2;
+ else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
+ pcie_gen = 1;
+ else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
+ pcie_gen = 0;
+
+ /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
+ * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
+ * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
+ */
+ if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
+ pcie_width = 6;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
+ pcie_width = 5;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
+ pcie_width = 4;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
+ pcie_width = 3;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
+ pcie_width = 2;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
+ pcie_width = 1;
+
+ ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width);
+
+ if (ret)
+ pr_err("[%s] Attempt to override pcie params failed!\n", __func__);
+
+ return ret;
+
+}
+
+int smu_v11_0_set_default_od_settings(struct smu_context *smu, bool initialize, size_t overdrive_table_size)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ int ret = 0;
+
+ if (initialize) {
+ if (table_context->overdrive_table) {
+ return -EINVAL;
+ }
+ table_context->overdrive_table = kzalloc(overdrive_table_size, GFP_KERNEL);
+ if (!table_context->overdrive_table) {
+ return -ENOMEM;
+ }
+ ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, false);
+ if (ret) {
+ pr_err("Failed to export overdrive table!\n");
+ return ret;
+ }
}
+ ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, true);
+ if (ret) {
+ pr_err("Failed to import overdrive table!\n");
+ return ret;
+ }
+ return ret;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
index 9d2280ca1f4b..139dd737eaa5 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
@@ -24,12 +24,12 @@
#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_smu.h"
+#include "smu_internal.h"
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
#include "smu_v12_0.h"
#include "soc15_common.h"
#include "atom.h"
-#include "renoir_ppt.h"
#include "asic_reg/mp/mp_12_0_0_offset.h"
#include "asic_reg/mp/mp_12_0_0_sh_mask.h"
@@ -41,7 +41,7 @@
#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK 0x00000006L
#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT 0x1
-static int smu_v12_0_send_msg_without_waiting(struct smu_context *smu,
+int smu_v12_0_send_msg_without_waiting(struct smu_context *smu,
uint16_t msg)
{
struct amdgpu_device *adev = smu->adev;
@@ -50,7 +50,7 @@ static int smu_v12_0_send_msg_without_waiting(struct smu_context *smu,
return 0;
}
-static int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg)
+int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg)
{
struct amdgpu_device *adev = smu->adev;
@@ -58,7 +58,7 @@ static int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg)
return 0;
}
-static int smu_v12_0_wait_for_response(struct smu_context *smu)
+int smu_v12_0_wait_for_response(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
uint32_t cur_value, i;
@@ -77,7 +77,7 @@ static int smu_v12_0_wait_for_response(struct smu_context *smu)
return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
}
-static int smu_v12_0_send_msg(struct smu_context *smu, uint16_t msg)
+int smu_v12_0_send_msg(struct smu_context *smu, uint16_t msg)
{
struct amdgpu_device *adev = smu->adev;
int ret = 0, index = 0;
@@ -102,7 +102,7 @@ static int smu_v12_0_send_msg(struct smu_context *smu, uint16_t msg)
}
-static int
+int
smu_v12_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
uint32_t param)
{
@@ -132,7 +132,7 @@ smu_v12_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
return ret;
}
-static int smu_v12_0_check_fw_status(struct smu_context *smu)
+int smu_v12_0_check_fw_status(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
uint32_t mp1_fw_flags;
@@ -147,7 +147,7 @@ static int smu_v12_0_check_fw_status(struct smu_context *smu)
return -EIO;
}
-static int smu_v12_0_check_fw_version(struct smu_context *smu)
+int smu_v12_0_check_fw_version(struct smu_context *smu)
{
uint32_t if_version = 0xff, smu_version = 0xff;
uint16_t smu_major;
@@ -181,7 +181,7 @@ static int smu_v12_0_check_fw_version(struct smu_context *smu)
return ret;
}
-static int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate)
+int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate)
{
if (!(smu->adev->flags & AMD_IS_APU))
return 0;
@@ -192,7 +192,7 @@ static int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate)
return smu_send_smc_msg(smu, SMU_MSG_PowerUpSdma);
}
-static int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate)
+int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate)
{
if (!(smu->adev->flags & AMD_IS_APU))
return 0;
@@ -203,7 +203,7 @@ static int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate)
return smu_send_smc_msg(smu, SMU_MSG_PowerUpVcn);
}
-static int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
+int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
{
if (!(smu->adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
return 0;
@@ -224,7 +224,7 @@ static int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
* Returns 2=Not in GFXOFF.
* Returns 3=Transition into GFXOFF.
*/
-static uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu)
+uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu)
{
uint32_t reg;
uint32_t gfxOff_Status = 0;
@@ -237,22 +237,13 @@ static uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu)
return gfxOff_Status;
}
-static int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable)
+int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable)
{
int ret = 0, timeout = 500;
if (enable) {
ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff);
- /* confirm gfx is back to "off" state, timeout is 5 seconds */
- while (!(smu_v12_0_get_gfxoff_status(smu) == 0)) {
- msleep(10);
- timeout--;
- if (timeout == 0) {
- DRM_ERROR("enable gfxoff timeout and failed!\n");
- break;
- }
- }
} else {
ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff);
@@ -270,12 +261,12 @@ static int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable)
return ret;
}
-static int smu_v12_0_init_smc_tables(struct smu_context *smu)
+int smu_v12_0_init_smc_tables(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *tables = NULL;
- if (smu_table->tables || smu_table->table_count == 0)
+ if (smu_table->tables)
return -EINVAL;
tables = kcalloc(SMU_TABLE_COUNT, sizeof(struct smu_table),
@@ -288,11 +279,11 @@ static int smu_v12_0_init_smc_tables(struct smu_context *smu)
return smu_tables_init(smu, tables);
}
-static int smu_v12_0_fini_smc_tables(struct smu_context *smu)
+int smu_v12_0_fini_smc_tables(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
- if (!smu_table->tables || smu_table->table_count == 0)
+ if (!smu_table->tables)
return -EINVAL;
kfree(smu_table->clocks_table);
@@ -304,7 +295,7 @@ static int smu_v12_0_fini_smc_tables(struct smu_context *smu)
return 0;
}
-static int smu_v12_0_populate_smc_tables(struct smu_context *smu)
+int smu_v12_0_populate_smc_tables(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *table = NULL;
@@ -319,14 +310,20 @@ static int smu_v12_0_populate_smc_tables(struct smu_context *smu)
return smu_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false);
}
-static int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
+int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t *min, uint32_t *max)
{
int ret = 0;
-
- mutex_lock(&smu->mutex);
+ uint32_t mclk_mask, soc_mask;
if (max) {
+ ret = smu_get_profiling_clk_mask(smu, AMD_DPM_FORCED_LEVEL_PROFILE_PEAK,
+ NULL,
+ &mclk_mask,
+ &soc_mask);
+ if (ret)
+ goto failed;
+
switch (clk_type) {
case SMU_GFXCLK:
case SMU_SCLK:
@@ -340,14 +337,20 @@ static int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk
goto failed;
break;
case SMU_UCLK:
- ret = smu_get_dpm_uclk_limited(smu, max, true);
+ case SMU_FCLK:
+ case SMU_MCLK:
+ ret = smu_get_dpm_clk_limited(smu, clk_type, mclk_mask, max);
+ if (ret)
+ goto failed;
+ break;
+ case SMU_SOCCLK:
+ ret = smu_get_dpm_clk_limited(smu, clk_type, soc_mask, max);
if (ret)
goto failed;
break;
default:
ret = -EINVAL;
goto failed;
-
}
}
@@ -365,7 +368,14 @@ static int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk
goto failed;
break;
case SMU_UCLK:
- ret = smu_get_dpm_uclk_limited(smu, min, false);
+ case SMU_FCLK:
+ case SMU_MCLK:
+ ret = smu_get_dpm_clk_limited(smu, clk_type, 0, min);
+ if (ret)
+ goto failed;
+ break;
+ case SMU_SOCCLK:
+ ret = smu_get_dpm_clk_limited(smu, clk_type, 0, min);
if (ret)
goto failed;
break;
@@ -373,40 +383,65 @@ static int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk
ret = -EINVAL;
goto failed;
}
-
}
failed:
- mutex_unlock(&smu->mutex);
return ret;
}
-static const struct smu_funcs smu_v12_0_funcs = {
- .check_fw_status = smu_v12_0_check_fw_status,
- .check_fw_version = smu_v12_0_check_fw_version,
- .powergate_sdma = smu_v12_0_powergate_sdma,
- .powergate_vcn = smu_v12_0_powergate_vcn,
- .send_smc_msg = smu_v12_0_send_msg,
- .send_smc_msg_with_param = smu_v12_0_send_msg_with_param,
- .read_smc_arg = smu_v12_0_read_arg,
- .set_gfx_cgpg = smu_v12_0_set_gfx_cgpg,
- .gfx_off_control = smu_v12_0_gfx_off_control,
- .init_smc_tables = smu_v12_0_init_smc_tables,
- .fini_smc_tables = smu_v12_0_fini_smc_tables,
- .populate_smc_tables = smu_v12_0_populate_smc_tables,
- .get_dpm_ultimate_freq = smu_v12_0_get_dpm_ultimate_freq,
-};
-
-void smu_v12_0_set_smu_funcs(struct smu_context *smu)
+int smu_v12_0_mode2_reset(struct smu_context *smu){
+ return smu_v12_0_send_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2);
+}
+
+int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t min, uint32_t max)
{
- struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
- smu->funcs = &smu_v12_0_funcs;
+ if (max < min)
+ return -EINVAL;
- switch (adev->asic_type) {
- case CHIP_RENOIR:
- renoir_set_ppt_funcs(smu);
- break;
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, min);
+ if (ret)
+ return ret;
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, max);
+ if (ret)
+ return ret;
+ break;
+ case SMU_FCLK:
+ case SMU_MCLK:
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min);
+ if (ret)
+ return ret;
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max);
+ if (ret)
+ return ret;
+ break;
+ case SMU_SOCCLK:
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min);
+ if (ret)
+ return ret;
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max);
+ if (ret)
+ return ret;
+ break;
+ case SMU_VCLK:
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn, min);
+ if (ret)
+ return ret;
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn, max);
+ if (ret)
+ return ret;
+ break;
default:
- pr_warn("Unknown asic for smu12\n");
+ return -EINVAL;
}
+
+ return ret;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index dc754447f0dd..23c12018dbc1 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -655,7 +655,7 @@ static int polaris10_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
count = SMU_MAX_SMIO_LEVELS;
for (level = 0; level < count; level++) {
table->SmioTable2.Pattern[level].Voltage =
- PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
+ PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[level].value * VOLTAGE_SCALE);
/* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
table->SmioTable2.Pattern[level].Smio =
(uint8_t) level;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
index 3f12cf341511..aa0ee2b46135 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
@@ -137,7 +137,7 @@ static int smu10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
priv->smu_tables.entry[table_id].table_id);
/* flush hdp cache */
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
memcpy(table, (uint8_t *)priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
index 4728aa23a818..7dca04a89217 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
@@ -177,12 +177,10 @@ static int smu8_load_mec_firmware(struct pp_hwmgr *hwmgr)
uint32_t tmp;
int ret = 0;
struct cgs_firmware_info info = {0};
- struct smu8_smumgr *smu8_smu;
if (hwmgr == NULL || hwmgr->device == NULL)
return -EINVAL;
- smu8_smu = hwmgr->smu_backend;
ret = cgs_get_firmware_info(hwmgr->device,
CGS_UCODE_ID_CP_MEC, &info);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
index 0dbdde69f2d9..0f3836fd9666 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
@@ -58,7 +58,7 @@ static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
priv->smu_tables.entry[table_id].table_id);
/* flush hdp cache */
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
memcpy(table, priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
index f9589806bf83..90c782c132d2 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
@@ -66,7 +66,7 @@ static int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr,
return -EINVAL);
/* flush hdp cache */
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
memcpy(table, priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
index b9089c6bea85..f604612f411f 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
@@ -189,7 +189,7 @@ static int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr,
return ret);
/* flush hdp cache */
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
memcpy(table, priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
@@ -290,7 +290,7 @@ int vega20_get_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
return ret);
/* flush hdp cache */
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
memcpy(table, priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table,
priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
index 7c960b07746f..ae18fbcb26fb 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
@@ -456,7 +456,7 @@ static int vegam_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
count = SMU_MAX_SMIO_LEVELS;
for (level = 0; level < count; level++) {
table->SmioTable2.Pattern[level].Voltage = PP_HOST_TO_SMC_US(
- data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
+ data->mvdd_voltage_table.entries[level].value * VOLTAGE_SCALE);
/* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
table->SmioTable2.Pattern[level].Smio =
(uint8_t) level;
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index 64386ee3f878..0b4892833808 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -25,6 +25,7 @@
#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_smu.h"
+#include "smu_internal.h"
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
#include "smu_v11_0.h"
@@ -143,6 +144,7 @@ static struct smu_11_0_cmn2aisc_mapping vega20_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(PrepareMp1ForShutdown),
MSG_MAP(SetMGpuFanBoostLimitRpm),
MSG_MAP(GetAVFSVoltageByDpm),
+ MSG_MAP(DFCstateControl),
};
static struct smu_11_0_cmn2aisc_mapping vega20_clk_map[SMU_CLK_COUNT] = {
@@ -219,7 +221,7 @@ static struct smu_11_0_cmn2aisc_mapping vega20_workload_map[PP_SMC_POWER_PROFILE
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT),
- WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_CUSTOM_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
};
@@ -464,7 +466,6 @@ static int vega20_store_powerplay_table(struct smu_context *smu)
sizeof(PPTable_t));
table_context->thermal_controller_type = powerplay_table->ucThermalControllerType;
- table_context->TDPODLimit = le32_to_cpu(powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE]);
return 0;
}
@@ -634,7 +635,6 @@ amd_pm_state_type vega20_get_current_power_state(struct smu_context *smu)
!smu_dpm_ctx->dpm_current_power_state)
return -EINVAL;
- mutex_lock(&(smu->mutex));
switch (smu_dpm_ctx->dpm_current_power_state->classification.ui_label) {
case SMU_STATE_UI_LABEL_BATTERY:
pm_type = POWER_STATE_TYPE_BATTERY;
@@ -652,7 +652,6 @@ amd_pm_state_type vega20_get_current_power_state(struct smu_context *smu)
pm_type = POWER_STATE_TYPE_DEFAULT;
break;
}
- mutex_unlock(&(smu->mutex));
return pm_type;
}
@@ -1274,16 +1273,8 @@ static int vega20_force_clk_levels(struct smu_context *smu,
struct vega20_dpm_table *dpm_table;
struct vega20_single_dpm_table *single_dpm_table;
uint32_t soft_min_level, soft_max_level, hard_min_level;
- struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
int ret = 0;
- if (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
- pr_info("force clock level is for dpm manual mode only.\n");
- return -EINVAL;
- }
-
- mutex_lock(&(smu->mutex));
-
soft_min_level = mask ? (ffs(mask) - 1) : 0;
soft_max_level = mask ? (fls(mask) - 1) : 0;
@@ -1436,7 +1427,6 @@ static int vega20_force_clk_levels(struct smu_context *smu,
break;
}
- mutex_unlock(&(smu->mutex));
return ret;
}
@@ -1451,8 +1441,6 @@ static int vega20_get_clock_by_type_with_latency(struct smu_context *smu,
dpm_table = smu_dpm->dpm_context;
- mutex_lock(&smu->mutex);
-
switch (clk_type) {
case SMU_GFXCLK:
single_dpm_table = &(dpm_table->gfx_table);
@@ -1474,7 +1462,6 @@ static int vega20_get_clock_by_type_with_latency(struct smu_context *smu,
ret = -EINVAL;
}
- mutex_unlock(&smu->mutex);
return ret;
}
@@ -2260,7 +2247,7 @@ vega20_notify_smc_dispaly_config(struct smu_context *smu)
if (smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
clock_req.clock_type = amd_pp_dcef_clock;
clock_req.clock_freq_in_khz = min_clocks.dcef_clock * 10;
- if (!smu->funcs->display_clock_voltage_request(smu, &clock_req)) {
+ if (!smu_v11_0_display_clock_voltage_request(smu, &clock_req)) {
if (smu_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetMinDeepSleepDcefclk,
@@ -2547,8 +2534,6 @@ static int vega20_set_od_percentage(struct smu_context *smu,
int feature_enabled;
PPCLK_e clk_id;
- mutex_lock(&(smu->mutex));
-
dpm_table = smu_dpm->dpm_context;
golden_table = smu_dpm->golden_dpm_context;
@@ -2598,11 +2583,10 @@ static int vega20_set_od_percentage(struct smu_context *smu,
}
ret = smu_handle_task(smu, smu_dpm->dpm_level,
- AMD_PP_TASK_READJUST_POWER_STATE);
+ AMD_PP_TASK_READJUST_POWER_STATE,
+ false);
set_od_failed:
- mutex_unlock(&(smu->mutex));
-
return ret;
}
@@ -2827,10 +2811,9 @@ static int vega20_odn_edit_dpm_table(struct smu_context *smu,
}
if (type == PP_OD_COMMIT_DPM_TABLE) {
- mutex_lock(&(smu->mutex));
ret = smu_handle_task(smu, smu_dpm->dpm_level,
- AMD_PP_TASK_READJUST_POWER_STATE);
- mutex_unlock(&(smu->mutex));
+ AMD_PP_TASK_READJUST_POWER_STATE,
+ false);
}
return ret;
@@ -3023,6 +3006,7 @@ static int vega20_read_sensor(struct smu_context *smu,
if(!data || !size)
return -EINVAL;
+ mutex_lock(&smu->sensor_lock);
switch (sensor) {
case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
*(uint32_t *)data = pptable->FanMaximumRpm;
@@ -3046,8 +3030,9 @@ static int vega20_read_sensor(struct smu_context *smu,
*size = 4;
break;
default:
- ret = smu_smc_read_sensor(smu, sensor, data, size);
+ ret = smu_v11_0_read_sensor(smu, sensor, data, size);
}
+ mutex_unlock(&smu->sensor_lock);
return ret;
}
@@ -3139,6 +3124,49 @@ static int vega20_get_thermal_temperature_range(struct smu_context *smu,
return 0;
}
+static int vega20_set_df_cstate(struct smu_context *smu,
+ enum pp_df_cstate state)
+{
+ uint32_t smu_version;
+ int ret;
+
+ ret = smu_get_smc_version(smu, NULL, &smu_version);
+ if (ret) {
+ pr_err("Failed to get smu version!\n");
+ return ret;
+ }
+
+ /* PPSMC_MSG_DFCstateControl is supported with 40.50 and later fws */
+ if (smu_version < 0x283200) {
+ pr_err("Df cstate control is supported with 40.50 and later SMC fw!\n");
+ return -EINVAL;
+ }
+
+ return smu_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state);
+}
+
+static int vega20_update_pcie_parameters(struct smu_context *smu,
+ uint32_t pcie_gen_cap,
+ uint32_t pcie_width_cap)
+{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ int ret, i;
+ uint32_t smu_pcie_arg;
+
+ for (i = 0; i < NUM_LINK_LEVELS; i++) {
+ smu_pcie_arg = (i << 16) |
+ ((pptable->PcieGenSpeed[i] <= pcie_gen_cap) ? (pptable->PcieGenSpeed[i] << 8) :
+ (pcie_gen_cap << 8)) | ((pptable->PcieLaneCount[i] <= pcie_width_cap) ?
+ pptable->PcieLaneCount[i] : pcie_width_cap);
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg);
+ }
+
+ return ret;
+}
+
+
static const struct pptable_funcs vega20_ppt_funcs = {
.tables_init = vega20_tables_init,
.alloc_dpm_context = vega20_allocate_dpm_context,
@@ -3151,7 +3179,7 @@ static const struct pptable_funcs vega20_ppt_funcs = {
.get_smu_table_index = vega20_get_smu_table_index,
.get_smu_power_index = vega20_get_pwr_src_index,
.get_workload_type = vega20_get_workload_type,
- .run_afll_btc = vega20_run_btc_afll,
+ .run_btc = vega20_run_btc_afll,
.get_allowed_feature_mask = vega20_get_allowed_feature_mask,
.get_current_power_state = vega20_get_current_power_state,
.set_default_dpm_table = vega20_set_default_dpm_table,
@@ -3181,13 +3209,61 @@ static const struct pptable_funcs vega20_ppt_funcs = {
.get_fan_speed_percent = vega20_get_fan_speed_percent,
.get_fan_speed_rpm = vega20_get_fan_speed_rpm,
.set_watermarks_table = vega20_set_watermarks_table,
- .get_thermal_temperature_range = vega20_get_thermal_temperature_range
+ .get_thermal_temperature_range = vega20_get_thermal_temperature_range,
+ .set_df_cstate = vega20_set_df_cstate,
+ .update_pcie_parameters = vega20_update_pcie_parameters,
+ .init_microcode = smu_v11_0_init_microcode,
+ .load_microcode = smu_v11_0_load_microcode,
+ .init_smc_tables = smu_v11_0_init_smc_tables,
+ .fini_smc_tables = smu_v11_0_fini_smc_tables,
+ .init_power = smu_v11_0_init_power,
+ .fini_power = smu_v11_0_fini_power,
+ .check_fw_status = smu_v11_0_check_fw_status,
+ .setup_pptable = smu_v11_0_setup_pptable,
+ .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
+ .get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios,
+ .check_pptable = smu_v11_0_check_pptable,
+ .parse_pptable = smu_v11_0_parse_pptable,
+ .populate_smc_tables = smu_v11_0_populate_smc_pptable,
+ .check_fw_version = smu_v11_0_check_fw_version,
+ .write_pptable = smu_v11_0_write_pptable,
+ .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
+ .set_tool_table_location = smu_v11_0_set_tool_table_location,
+ .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
+ .system_features_control = smu_v11_0_system_features_control,
+ .send_smc_msg = smu_v11_0_send_msg,
+ .send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
+ .read_smc_arg = smu_v11_0_read_arg,
+ .init_display_count = smu_v11_0_init_display_count,
+ .set_allowed_mask = smu_v11_0_set_allowed_mask,
+ .get_enabled_mask = smu_v11_0_get_enabled_mask,
+ .notify_display_change = smu_v11_0_notify_display_change,
+ .set_power_limit = smu_v11_0_set_power_limit,
+ .get_current_clk_freq = smu_v11_0_get_current_clk_freq,
+ .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
+ .start_thermal_control = smu_v11_0_start_thermal_control,
+ .stop_thermal_control = smu_v11_0_stop_thermal_control,
+ .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk,
+ .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
+ .get_fan_control_mode = smu_v11_0_get_fan_control_mode,
+ .set_fan_control_mode = smu_v11_0_set_fan_control_mode,
+ .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
+ .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
+ .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
+ .gfx_off_control = smu_v11_0_gfx_off_control,
+ .register_irq_handler = smu_v11_0_register_irq_handler,
+ .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
+ .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
+ .baco_is_support= smu_v11_0_baco_is_support,
+ .baco_get_state = smu_v11_0_baco_get_state,
+ .baco_set_state = smu_v11_0_baco_set_state,
+ .baco_reset = smu_v11_0_baco_reset,
+ .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
+ .set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
+ .override_pcie_parameters = smu_v11_0_override_pcie_parameters,
};
void vega20_set_ppt_funcs(struct smu_context *smu)
{
- struct smu_table_context *smu_table = &smu->smu_table;
-
smu->ppt_funcs = &vega20_ppt_funcs;
- smu_table->table_count = TABLE_COUNT;
}