aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/powerplay/smu_v11_0.c')
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v11_0.c1354
1 files changed, 631 insertions, 723 deletions
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index aa76c2cea747..7b950a582a28 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -26,18 +26,18 @@
#include <linux/reboot.h>
#define SMU_11_0_PARTIAL_PPTABLE
+#define SWSMU_CODE_LAYER_L3
#include "amdgpu.h"
#include "amdgpu_smu.h"
-#include "smu_internal.h"
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
+#include "amdgpu_atombios.h"
#include "smu_v11_0.h"
-#include "smu_v11_0_pptable.h"
#include "soc15_common.h"
#include "atom.h"
-#include "amd_pcie.h"
#include "amdgpu_ras.h"
+#include "smu_cmn.h"
#include "asic_reg/thm/thm_11_0_2_offset.h"
#include "asic_reg/thm/thm_11_0_2_sh_mask.h"
@@ -46,96 +46,26 @@
#include "asic_reg/smuio/smuio_11_0_0_offset.h"
#include "asic_reg/smuio/smuio_11_0_0_sh_mask.h"
-MODULE_FIRMWARE("amdgpu/vega20_smc.bin");
+/*
+ * DO NOT use these for err/warn/info/debug messages.
+ * Use dev_err, dev_warn, dev_info and dev_dbg instead.
+ * They are more MGPU friendly.
+ */
+#undef pr_err
+#undef pr_warn
+#undef pr_info
+#undef pr_debug
+
MODULE_FIRMWARE("amdgpu/arcturus_smc.bin");
MODULE_FIRMWARE("amdgpu/navi10_smc.bin");
MODULE_FIRMWARE("amdgpu/navi14_smc.bin");
MODULE_FIRMWARE("amdgpu/navi12_smc.bin");
+MODULE_FIRMWARE("amdgpu/sienna_cichlid_smc.bin");
+MODULE_FIRMWARE("amdgpu/navy_flounder_smc.bin");
#define SMU11_VOLTAGE_SCALE 4
-static int smu_v11_0_send_msg_without_waiting(struct smu_context *smu,
- uint16_t msg)
-{
- struct amdgpu_device *adev = smu->adev;
- WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
- return 0;
-}
-
-static int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg)
-{
- struct amdgpu_device *adev = smu->adev;
-
- *arg = RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_82);
- return 0;
-}
-
-static int smu_v11_0_wait_for_response(struct smu_context *smu)
-{
- struct amdgpu_device *adev = smu->adev;
- uint32_t cur_value, i, timeout = adev->usec_timeout * 10;
-
- for (i = 0; i < timeout; i++) {
- cur_value = RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90);
- if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
- return cur_value == 0x1 ? 0 : -EIO;
-
- udelay(1);
- }
-
- /* timeout means wrong logic */
- if (i == timeout)
- return -ETIME;
-
- return RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
-}
-
-int
-smu_v11_0_send_msg_with_param(struct smu_context *smu,
- enum smu_message_type msg,
- uint32_t param,
- uint32_t *read_arg)
-{
- struct amdgpu_device *adev = smu->adev;
- int ret = 0, index = 0;
-
- index = smu_msg_get_index(smu, msg);
- if (index < 0)
- return index;
-
- mutex_lock(&smu->message_lock);
- ret = smu_v11_0_wait_for_response(smu);
- if (ret) {
- pr_err("Msg issuing pre-check failed and "
- "SMU may be not in the right state!\n");
- goto out;
- }
-
- WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
-
- WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
-
- smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
-
- ret = smu_v11_0_wait_for_response(smu);
- if (ret) {
- pr_err("failed send message: %10s (%d) \tparam: 0x%08x response %#x\n",
- smu_get_message_name(smu, msg), index, param, ret);
- goto out;
- }
-
- if (read_arg) {
- ret = smu_v11_0_read_arg(smu, read_arg);
- if (ret) {
- pr_err("failed to read message arg: %10s (%d) \tparam: 0x%08x response %#x\n",
- smu_get_message_name(smu, msg), index, param, ret);
- goto out;
- }
- }
-out:
- mutex_unlock(&smu->message_lock);
- return ret;
-}
+#define SMU11_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms
int smu_v11_0_init_microcode(struct smu_context *smu)
{
@@ -148,9 +78,6 @@ int smu_v11_0_init_microcode(struct smu_context *smu)
struct amdgpu_firmware_info *ucode = NULL;
switch (adev->asic_type) {
- case CHIP_VEGA20:
- chip_name = "vega20";
- break;
case CHIP_ARCTURUS:
chip_name = "arcturus";
break;
@@ -163,8 +90,15 @@ int smu_v11_0_init_microcode(struct smu_context *smu)
case CHIP_NAVI12:
chip_name = "navi12";
break;
+ case CHIP_SIENNA_CICHLID:
+ chip_name = "sienna_cichlid";
+ break;
+ case CHIP_NAVY_FLOUNDER:
+ chip_name = "navy_flounder";
+ break;
default:
- BUG();
+ dev_err(adev->dev, "Unsupported ASIC type %d\n", adev->asic_type);
+ return -EINVAL;
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
@@ -199,6 +133,15 @@ out:
return err;
}
+void smu_v11_0_fini_microcode(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ release_firmware(adev->pm.fw);
+ adev->pm.fw = NULL;
+ adev->pm.fw_version = 0;
+}
+
int smu_v11_0_load_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
@@ -261,7 +204,7 @@ int smu_v11_0_check_fw_version(struct smu_context *smu)
uint8_t smu_minor, smu_debug;
int ret = 0;
- ret = smu_get_smc_version(smu, &if_version, &smu_version);
+ ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
if (ret)
return ret;
@@ -270,9 +213,6 @@ int smu_v11_0_check_fw_version(struct smu_context *smu)
smu_debug = (smu_version >> 0) & 0xff;
switch (smu->adev->asic_type) {
- case CHIP_VEGA20:
- smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_VG20;
- break;
case CHIP_ARCTURUS:
smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_ARCT;
break;
@@ -285,8 +225,14 @@ int smu_v11_0_check_fw_version(struct smu_context *smu)
case CHIP_NAVI14:
smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV14;
break;
+ case CHIP_SIENNA_CICHLID:
+ smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Sienna_Cichlid;
+ break;
+ case CHIP_NAVY_FLOUNDER:
+ smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Navy_Flounder;
+ break;
default:
- pr_err("smu unsupported asic type:%d.\n", smu->adev->asic_type);
+ dev_err(smu->adev->dev, "smu unsupported asic type:%d.\n", smu->adev->asic_type);
smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_INV;
break;
}
@@ -300,11 +246,11 @@ int smu_v11_0_check_fw_version(struct smu_context *smu)
* of halt driver loading.
*/
if (if_version != smu->smc_driver_if_version) {
- pr_info("smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
+ dev_info(smu->adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
"smu fw version = 0x%08x (%d.%d.%d)\n",
smu->smc_driver_if_version, if_version,
smu_version, smu_major, smu_minor, smu_debug);
- pr_warn("SMU driver if version not matched\n");
+ dev_warn(smu->adev->dev, "SMU driver if version not matched\n");
}
return ret;
@@ -366,8 +312,9 @@ int smu_v11_0_setup_pptable(struct smu_context *smu)
hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
version_major = le16_to_cpu(hdr->header.header_version_major);
version_minor = le16_to_cpu(hdr->header.header_version_minor);
- if (version_major == 2 && smu->smu_table.boot_values.pp_table_id > 0) {
- pr_info("use driver provided pptable %d\n", smu->smu_table.boot_values.pp_table_id);
+ if ((version_major == 2 && smu->smu_table.boot_values.pp_table_id > 0) ||
+ adev->asic_type == CHIP_NAVY_FLOUNDER) {
+ dev_info(adev->dev, "use driver provided pptable %d\n", smu->smu_table.boot_values.pp_table_id);
switch (version_minor) {
case 0:
ret = smu_v11_0_set_pptable_v2_0(smu, &table, &size);
@@ -384,11 +331,11 @@ int smu_v11_0_setup_pptable(struct smu_context *smu)
return ret;
} else {
- pr_info("use vbios provided pptable\n");
+ dev_info(adev->dev, "use vbios provided pptable\n");
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
powerplayinfo);
- ret = smu_get_atom_data_table(smu, index, &atom_table_size, &frev, &crev,
+ ret = amdgpu_atombios_get_data_table(adev, index, &atom_table_size, &frev, &crev,
(uint8_t **)&table);
if (ret)
return ret;
@@ -403,82 +350,87 @@ int smu_v11_0_setup_pptable(struct smu_context *smu)
return 0;
}
-static int smu_v11_0_init_dpm_context(struct smu_context *smu)
-{
- struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
-
- if (smu_dpm->dpm_context || smu_dpm->dpm_context_size != 0)
- return -EINVAL;
-
- return smu_alloc_dpm_context(smu);
-}
-
-static int smu_v11_0_fini_dpm_context(struct smu_context *smu)
-{
- struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
-
- if (!smu_dpm->dpm_context || smu_dpm->dpm_context_size == 0)
- return -EINVAL;
-
- kfree(smu_dpm->dpm_context);
- kfree(smu_dpm->golden_dpm_context);
- kfree(smu_dpm->dpm_current_power_state);
- kfree(smu_dpm->dpm_request_power_state);
- smu_dpm->dpm_context = NULL;
- smu_dpm->golden_dpm_context = NULL;
- smu_dpm->dpm_context_size = 0;
- smu_dpm->dpm_current_power_state = NULL;
- smu_dpm->dpm_request_power_state = NULL;
-
- return 0;
-}
-
int smu_v11_0_init_smc_tables(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
- struct smu_table *tables = NULL;
+ struct smu_table *tables = smu_table->tables;
int ret = 0;
- if (smu_table->tables)
- return -EINVAL;
-
- tables = kcalloc(SMU_TABLE_COUNT, sizeof(struct smu_table),
- GFP_KERNEL);
- if (!tables)
- return -ENOMEM;
+ smu_table->driver_pptable =
+ kzalloc(tables[SMU_TABLE_PPTABLE].size, GFP_KERNEL);
+ if (!smu_table->driver_pptable) {
+ ret = -ENOMEM;
+ goto err0_out;
+ }
- smu_table->tables = tables;
+ smu_table->max_sustainable_clocks =
+ kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks), GFP_KERNEL);
+ if (!smu_table->max_sustainable_clocks) {
+ ret = -ENOMEM;
+ goto err1_out;
+ }
- ret = smu_tables_init(smu, tables);
- if (ret)
- return ret;
+ /* Arcturus does not support OVERDRIVE */
+ if (tables[SMU_TABLE_OVERDRIVE].size) {
+ smu_table->overdrive_table =
+ kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL);
+ if (!smu_table->overdrive_table) {
+ ret = -ENOMEM;
+ goto err2_out;
+ }
- ret = smu_v11_0_init_dpm_context(smu);
- if (ret)
- return ret;
+ smu_table->boot_overdrive_table =
+ kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL);
+ if (!smu_table->boot_overdrive_table) {
+ ret = -ENOMEM;
+ goto err3_out;
+ }
+ }
return 0;
+
+err3_out:
+ kfree(smu_table->overdrive_table);
+err2_out:
+ kfree(smu_table->max_sustainable_clocks);
+err1_out:
+ kfree(smu_table->driver_pptable);
+err0_out:
+ return ret;
}
int smu_v11_0_fini_smc_tables(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
- int ret = 0;
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
- if (!smu_table->tables)
- return -EINVAL;
+ kfree(smu_table->boot_overdrive_table);
+ kfree(smu_table->overdrive_table);
+ kfree(smu_table->max_sustainable_clocks);
+ kfree(smu_table->driver_pptable);
+ smu_table->boot_overdrive_table = NULL;
+ smu_table->overdrive_table = NULL;
+ smu_table->max_sustainable_clocks = NULL;
+ smu_table->driver_pptable = NULL;
+ kfree(smu_table->hardcode_pptable);
+ smu_table->hardcode_pptable = NULL;
- kfree(smu_table->tables);
kfree(smu_table->metrics_table);
kfree(smu_table->watermarks_table);
- smu_table->tables = NULL;
smu_table->metrics_table = NULL;
smu_table->watermarks_table = NULL;
smu_table->metrics_time = 0;
- ret = smu_v11_0_fini_dpm_context(smu);
- if (ret)
- return ret;
+ kfree(smu_dpm->dpm_context);
+ kfree(smu_dpm->golden_dpm_context);
+ kfree(smu_dpm->dpm_current_power_state);
+ kfree(smu_dpm->dpm_request_power_state);
+ smu_dpm->dpm_context = NULL;
+ smu_dpm->golden_dpm_context = NULL;
+ smu_dpm->dpm_context_size = 0;
+ smu_dpm->dpm_current_power_state = NULL;
+ smu_dpm->dpm_request_power_state = NULL;
+
return 0;
}
@@ -512,6 +464,32 @@ int smu_v11_0_fini_power(struct smu_context *smu)
return 0;
}
+static int smu_v11_0_atom_get_smu_clockinfo(struct amdgpu_device *adev,
+ uint8_t clk_id,
+ uint8_t syspll_id,
+ uint32_t *clk_freq)
+{
+ struct atom_get_smu_clock_info_parameters_v3_1 input = {0};
+ struct atom_get_smu_clock_info_output_parameters_v3_1 *output;
+ int ret, index;
+
+ input.clk_id = clk_id;
+ input.syspll_id = syspll_id;
+ input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
+ index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
+ getsmuclockinfo);
+
+ ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
+ (uint32_t *)&input);
+ if (ret)
+ return -EINVAL;
+
+ output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
+ *clk_freq = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
+
+ return 0;
+}
+
int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu)
{
int ret, index;
@@ -524,13 +502,13 @@ int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu)
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
firmwareinfo);
- ret = smu_get_atom_data_table(smu, index, &size, &frev, &crev,
+ ret = amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev,
(uint8_t **)&header);
if (ret)
return ret;
if (header->format_revision != 3) {
- pr_err("unknown atom_firmware_info version! for smu11\n");
+ dev_err(smu->adev->dev, "unknown atom_firmware_info version! for smu11\n");
return -EINVAL;
}
@@ -570,102 +548,37 @@ int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu)
smu->smu_table.boot_values.format_revision = header->format_revision;
smu->smu_table.boot_values.content_revision = header->content_revision;
- return 0;
-}
+ smu_v11_0_atom_get_smu_clockinfo(smu->adev,
+ (uint8_t)SMU11_SYSPLL0_SOCCLK_ID,
+ (uint8_t)0,
+ &smu->smu_table.boot_values.socclk);
-int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu)
-{
- int ret, index;
- struct amdgpu_device *adev = smu->adev;
- struct atom_get_smu_clock_info_parameters_v3_1 input = {0};
- struct atom_get_smu_clock_info_output_parameters_v3_1 *output;
-
- input.clk_id = SMU11_SYSPLL0_SOCCLK_ID;
- input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
- index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
- getsmuclockinfo);
-
- ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
- (uint32_t *)&input);
- if (ret)
- return -EINVAL;
-
- output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
- smu->smu_table.boot_values.socclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
-
- memset(&input, 0, sizeof(input));
- input.clk_id = SMU11_SYSPLL0_DCEFCLK_ID;
- input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
- index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
- getsmuclockinfo);
-
- ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
- (uint32_t *)&input);
- if (ret)
- return -EINVAL;
-
- output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
- smu->smu_table.boot_values.dcefclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
-
- memset(&input, 0, sizeof(input));
- input.clk_id = SMU11_SYSPLL0_ECLK_ID;
- input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
- index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
- getsmuclockinfo);
-
- ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
- (uint32_t *)&input);
- if (ret)
- return -EINVAL;
-
- output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
- smu->smu_table.boot_values.eclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
+ smu_v11_0_atom_get_smu_clockinfo(smu->adev,
+ (uint8_t)SMU11_SYSPLL0_DCEFCLK_ID,
+ (uint8_t)0,
+ &smu->smu_table.boot_values.dcefclk);
- memset(&input, 0, sizeof(input));
- input.clk_id = SMU11_SYSPLL0_VCLK_ID;
- input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
- index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
- getsmuclockinfo);
-
- ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
- (uint32_t *)&input);
- if (ret)
- return -EINVAL;
-
- output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
- smu->smu_table.boot_values.vclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
-
- memset(&input, 0, sizeof(input));
- input.clk_id = SMU11_SYSPLL0_DCLK_ID;
- input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
- index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
- getsmuclockinfo);
+ smu_v11_0_atom_get_smu_clockinfo(smu->adev,
+ (uint8_t)SMU11_SYSPLL0_ECLK_ID,
+ (uint8_t)0,
+ &smu->smu_table.boot_values.eclk);
- ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
- (uint32_t *)&input);
- if (ret)
- return -EINVAL;
+ smu_v11_0_atom_get_smu_clockinfo(smu->adev,
+ (uint8_t)SMU11_SYSPLL0_VCLK_ID,
+ (uint8_t)0,
+ &smu->smu_table.boot_values.vclk);
- output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
- smu->smu_table.boot_values.dclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
+ smu_v11_0_atom_get_smu_clockinfo(smu->adev,
+ (uint8_t)SMU11_SYSPLL0_DCLK_ID,
+ (uint8_t)0,
+ &smu->smu_table.boot_values.dclk);
if ((smu->smu_table.boot_values.format_revision == 3) &&
- (smu->smu_table.boot_values.content_revision >= 2)) {
- memset(&input, 0, sizeof(input));
- input.clk_id = SMU11_SYSPLL1_0_FCLK_ID;
- input.syspll_id = SMU11_SYSPLL1_2_ID;
- input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
- index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
- getsmuclockinfo);
-
- ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
- (uint32_t *)&input);
- if (ret)
- return -EINVAL;
-
- output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
- smu->smu_table.boot_values.fclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
- }
+ (smu->smu_table.boot_values.content_revision >= 2))
+ smu_v11_0_atom_get_smu_clockinfo(smu->adev,
+ (uint8_t)SMU11_SYSPLL1_0_FCLK_ID,
+ (uint8_t)SMU11_SYSPLL1_2_ID,
+ &smu->smu_table.boot_values.fclk);
return 0;
}
@@ -685,13 +598,13 @@ int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
address_high = (uint32_t)upper_32_bits(address);
address_low = (uint32_t)lower_32_bits(address);
- ret = smu_send_smc_msg_with_param(smu,
+ ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetSystemVirtualDramAddrHigh,
address_high,
NULL);
if (ret)
return ret;
- ret = smu_send_smc_msg_with_param(smu,
+ ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetSystemVirtualDramAddrLow,
address_low,
NULL);
@@ -702,15 +615,15 @@ int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
address_high = (uint32_t)upper_32_bits(address);
address_low = (uint32_t)lower_32_bits(address);
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
address_high, NULL);
if (ret)
return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
address_low, NULL);
if (ret)
return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
(uint32_t)memory_pool->size, NULL);
if (ret)
return ret;
@@ -718,96 +631,30 @@ int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
return ret;
}
-int smu_v11_0_check_pptable(struct smu_context *smu)
+int smu_v11_0_set_min_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
{
int ret;
- ret = smu_check_powerplay_table(smu);
- return ret;
-}
-
-int smu_v11_0_parse_pptable(struct smu_context *smu)
-{
- int ret;
-
- struct smu_table_context *table_context = &smu->smu_table;
- struct smu_table *table = &table_context->tables[SMU_TABLE_PPTABLE];
-
- /* during TDR we need to free and alloc the pptable */
- if (table_context->driver_pptable)
- kfree(table_context->driver_pptable);
-
- table_context->driver_pptable = kzalloc(table->size, GFP_KERNEL);
-
- if (!table_context->driver_pptable)
- return -ENOMEM;
-
- ret = smu_store_powerplay_table(smu);
- if (ret)
- return -EINVAL;
-
- ret = smu_append_powerplay_table(smu);
-
- return ret;
-}
-
-int smu_v11_0_populate_smc_pptable(struct smu_context *smu)
-{
- int ret;
-
- ret = smu_set_default_dpm_table(smu);
-
- return ret;
-}
-
-int smu_v11_0_write_pptable(struct smu_context *smu)
-{
- struct smu_table_context *table_context = &smu->smu_table;
- int ret = 0;
-
- ret = smu_update_table(smu, SMU_TABLE_PPTABLE, 0,
- table_context->driver_pptable, true);
-
- return ret;
-}
-
-int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
-{
- int ret;
-
- if (amdgpu_sriov_vf(smu->adev))
- return 0;
-
- ret = smu_send_smc_msg_with_param(smu,
+ ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetMinDeepSleepDcefclk, clk, NULL);
if (ret)
- pr_err("SMU11 attempt to set divider for DCEFCLK Failed!");
+ dev_err(smu->adev->dev, "SMU11 attempt to set divider for DCEFCLK Failed!");
return ret;
}
-int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu)
-{
- struct smu_table_context *table_context = &smu->smu_table;
-
- if (!table_context)
- return -EINVAL;
-
- return smu_v11_0_set_deep_sleep_dcefclk(smu, table_context->boot_values.dcefclk / 100);
-}
-
int smu_v11_0_set_driver_table_location(struct smu_context *smu)
{
struct smu_table *driver_table = &smu->smu_table.driver_table;
int ret = 0;
if (driver_table->mc_address) {
- ret = smu_send_smc_msg_with_param(smu,
+ ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetDriverDramAddrHigh,
upper_32_bits(driver_table->mc_address),
NULL);
if (!ret)
- ret = smu_send_smc_msg_with_param(smu,
+ ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetDriverDramAddrLow,
lower_32_bits(driver_table->mc_address),
NULL);
@@ -821,16 +668,13 @@ int smu_v11_0_set_tool_table_location(struct smu_context *smu)
int ret = 0;
struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG];
- if (amdgpu_sriov_vf(smu->adev))
- return 0;
-
if (tool_table->mc_address) {
- ret = smu_send_smc_msg_with_param(smu,
+ ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetToolsDramAddrHigh,
upper_32_bits(tool_table->mc_address),
NULL);
if (!ret)
- ret = smu_send_smc_msg_with_param(smu,
+ ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetToolsDramAddrLow,
lower_32_bits(tool_table->mc_address),
NULL);
@@ -842,14 +686,16 @@ int smu_v11_0_set_tool_table_location(struct smu_context *smu)
int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
{
int ret = 0;
+ struct amdgpu_device *adev = smu->adev;
- if (amdgpu_sriov_vf(smu->adev))
+ /* Navy_Flounder do not support to change display num currently */
+ if (adev->asic_type == CHIP_NAVY_FLOUNDER)
return 0;
if (!smu->pm_enabled)
return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count, NULL);
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count, NULL);
return ret;
}
@@ -860,21 +706,18 @@ int smu_v11_0_set_allowed_mask(struct smu_context *smu)
int ret = 0;
uint32_t feature_mask[2];
- if (amdgpu_sriov_vf(smu->adev))
- return 0;
-
mutex_lock(&feature->mutex);
if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || feature->feature_num < 64)
goto failed;
bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64);
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
feature_mask[1], NULL);
if (ret)
goto failed;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow,
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow,
feature_mask[0], NULL);
if (ret)
goto failed;
@@ -884,38 +727,6 @@ failed:
return ret;
}
-int smu_v11_0_get_enabled_mask(struct smu_context *smu,
- uint32_t *feature_mask, uint32_t num)
-{
- uint32_t feature_mask_high = 0, feature_mask_low = 0;
- struct smu_feature *feature = &smu->smu_feature;
- int ret = 0;
-
- if (amdgpu_sriov_vf(smu->adev) && !amdgpu_sriov_is_pp_one_vf(smu->adev))
- return 0;
-
- if (!feature_mask || num < 2)
- return -EINVAL;
-
- if (bitmap_empty(feature->enabled, feature->feature_num)) {
- ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh, &feature_mask_high);
- if (ret)
- return ret;
-
- ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow, &feature_mask_low);
- if (ret)
- return ret;
-
- feature_mask[0] = feature_mask_low;
- feature_mask[1] = feature_mask_high;
- } else {
- bitmap_copy((unsigned long *)feature_mask, feature->enabled,
- feature->feature_num);
- }
-
- return ret;
-}
-
int smu_v11_0_system_features_control(struct smu_context *smu,
bool en)
{
@@ -923,7 +734,7 @@ int smu_v11_0_system_features_control(struct smu_context *smu,
uint32_t feature_mask[2];
int ret = 0;
- ret = smu_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
+ ret = smu_cmn_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
SMU_MSG_DisableAllSmuFeatures), NULL);
if (ret)
return ret;
@@ -932,7 +743,7 @@ int smu_v11_0_system_features_control(struct smu_context *smu,
bitmap_zero(feature->supported, feature->feature_num);
if (en) {
- ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
+ ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2);
if (ret)
return ret;
@@ -949,15 +760,12 @@ int smu_v11_0_notify_display_change(struct smu_context *smu)
{
int ret = 0;
- if (amdgpu_sriov_vf(smu->adev))
- return 0;
-
if (!smu->pm_enabled)
return ret;
- if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM)
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL);
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL);
return ret;
}
@@ -969,18 +777,20 @@ smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
int ret = 0;
int clk_id;
- if ((smu_msg_get_index(smu, SMU_MSG_GetDcModeMaxDpmFreq) < 0) ||
- (smu_msg_get_index(smu, SMU_MSG_GetMaxDpmFreq) < 0))
+ if ((smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GetDcModeMaxDpmFreq) < 0) ||
+ (smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GetMaxDpmFreq) < 0))
return 0;
- clk_id = smu_clk_get_index(smu, clock_select);
+ clk_id = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_CLK,
+ clock_select);
if (clk_id < 0)
return -EINVAL;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq,
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq,
clk_id << 16, clock);
if (ret) {
- pr_err("[GetMaxSustainableClock] Failed to get max DC clock from SMC!");
+ dev_err(smu->adev->dev, "[GetMaxSustainableClock] Failed to get max DC clock from SMC!");
return ret;
}
@@ -988,10 +798,10 @@ smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
return 0;
/* if DC limit is zero, return AC limit */
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
clk_id << 16, clock);
if (ret) {
- pr_err("[GetMaxSustainableClock] failed to get max AC clock from SMC!");
+ dev_err(smu->adev->dev, "[GetMaxSustainableClock] failed to get max AC clock from SMC!");
return ret;
}
@@ -1000,17 +810,10 @@ smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
{
- struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks;
+ struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks =
+ smu->smu_table.max_sustainable_clocks;
int ret = 0;
- if (!smu->smu_table.max_sustainable_clocks)
- max_sustainable_clocks = kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks),
- GFP_KERNEL);
- else
- max_sustainable_clocks = smu->smu_table.max_sustainable_clocks;
-
- smu->smu_table.max_sustainable_clocks = (void *)max_sustainable_clocks;
-
max_sustainable_clocks->uclock = smu->smu_table.boot_values.uclk / 100;
max_sustainable_clocks->soc_clock = smu->smu_table.boot_values.socclk / 100;
max_sustainable_clocks->dcef_clock = smu->smu_table.boot_values.dcefclk / 100;
@@ -1018,34 +821,34 @@ int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
max_sustainable_clocks->phy_clock = 0xFFFFFFFF;
max_sustainable_clocks->pixel_clock = 0xFFFFFFFF;
- if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
ret = smu_v11_0_get_max_sustainable_clock(smu,
&(max_sustainable_clocks->uclock),
SMU_UCLK);
if (ret) {
- pr_err("[%s] failed to get max UCLK from SMC!",
+ dev_err(smu->adev->dev, "[%s] failed to get max UCLK from SMC!",
__func__);
return ret;
}
}
- if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
ret = smu_v11_0_get_max_sustainable_clock(smu,
&(max_sustainable_clocks->soc_clock),
SMU_SOCCLK);
if (ret) {
- pr_err("[%s] failed to get max SOCCLK from SMC!",
+ dev_err(smu->adev->dev, "[%s] failed to get max SOCCLK from SMC!",
__func__);
return ret;
}
}
- if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
ret = smu_v11_0_get_max_sustainable_clock(smu,
&(max_sustainable_clocks->dcef_clock),
SMU_DCEFCLK);
if (ret) {
- pr_err("[%s] failed to get max DCEFCLK from SMC!",
+ dev_err(smu->adev->dev, "[%s] failed to get max DCEFCLK from SMC!",
__func__);
return ret;
}
@@ -1054,7 +857,7 @@ int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
&(max_sustainable_clocks->display_clock),
SMU_DISPCLK);
if (ret) {
- pr_err("[%s] failed to get max DISPCLK from SMC!",
+ dev_err(smu->adev->dev, "[%s] failed to get max DISPCLK from SMC!",
__func__);
return ret;
}
@@ -1062,7 +865,7 @@ int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
&(max_sustainable_clocks->phy_clock),
SMU_PHYCLK);
if (ret) {
- pr_err("[%s] failed to get max PHYCLK from SMC!",
+ dev_err(smu->adev->dev, "[%s] failed to get max PHYCLK from SMC!",
__func__);
return ret;
}
@@ -1070,7 +873,7 @@ int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
&(max_sustainable_clocks->pixel_clock),
SMU_PIXCLK);
if (ret) {
- pr_err("[%s] failed to get max PIXCLK from SMC!",
+ dev_err(smu->adev->dev, "[%s] failed to get max PIXCLK from SMC!",
__func__);
return ret;
}
@@ -1082,190 +885,64 @@ int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
return 0;
}
-uint32_t smu_v11_0_get_max_power_limit(struct smu_context *smu) {
- uint32_t od_limit, max_power_limit;
- struct smu_11_0_powerplay_table *powerplay_table = NULL;
- struct smu_table_context *table_context = &smu->smu_table;
- powerplay_table = table_context->power_play_table;
-
- max_power_limit = smu_get_pptable_power_limit(smu);
-
- if (!max_power_limit) {
- // If we couldn't get the table limit, fall back on first-read value
- if (!smu->default_power_limit)
- smu->default_power_limit = smu->power_limit;
- max_power_limit = smu->default_power_limit;
- }
+int smu_v11_0_get_current_power_limit(struct smu_context *smu,
+ uint32_t *power_limit)
+{
+ int power_src;
+ int ret = 0;
- if (smu->od_enabled) {
- od_limit = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
+ if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT))
+ return -EINVAL;
- pr_debug("ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_limit, smu->default_power_limit);
+ power_src = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_PWR,
+ smu->adev->pm.ac_power ?
+ SMU_POWER_SOURCE_AC :
+ SMU_POWER_SOURCE_DC);
+ if (power_src < 0)
+ return -EINVAL;
- max_power_limit *= (100 + od_limit);
- max_power_limit /= 100;
- }
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetPptLimit,
+ power_src << 16,
+ power_limit);
+ if (ret)
+ dev_err(smu->adev->dev, "[%s] get PPT limit failed!", __func__);
- return max_power_limit;
+ return ret;
}
int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
{
int ret = 0;
- uint32_t max_power_limit;
-
- if (amdgpu_sriov_vf(smu->adev))
- return 0;
-
- max_power_limit = smu_v11_0_get_max_power_limit(smu);
-
- if (n > max_power_limit) {
- pr_err("New power limit (%d) is over the max allowed %d\n",
- n,
- max_power_limit);
- return -EINVAL;
- }
- if (n == 0)
- n = smu->default_power_limit;
-
- if (!smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
- pr_err("Setting new power limit is not supported!\n");
+ if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
+ dev_err(smu->adev->dev, "Setting new power limit is not supported!\n");
return -EOPNOTSUPP;
}
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n, NULL);
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n, NULL);
if (ret) {
- pr_err("[%s] Set power limit Failed!\n", __func__);
+ dev_err(smu->adev->dev, "[%s] Set power limit Failed!\n", __func__);
return ret;
}
- smu->power_limit = n;
-
- return 0;
-}
-
-int smu_v11_0_get_current_clk_freq(struct smu_context *smu,
- enum smu_clk_type clk_id,
- uint32_t *value)
-{
- int ret = 0;
- uint32_t freq = 0;
- int asic_clk_id;
-
- if (clk_id >= SMU_CLK_COUNT || !value)
- return -EINVAL;
-
- asic_clk_id = smu_clk_get_index(smu, clk_id);
- if (asic_clk_id < 0)
- return -EINVAL;
-
- /* if don't has GetDpmClockFreq Message, try get current clock by SmuMetrics_t */
- if (smu_msg_get_index(smu, SMU_MSG_GetDpmClockFreq) < 0)
- ret = smu_get_current_clk_freq_by_table(smu, clk_id, &freq);
- else {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmClockFreq,
- (asic_clk_id << 16), &freq);
- if (ret)
- return ret;
- }
-
- freq *= 100;
- *value = freq;
-
- return ret;
-}
-
-static int smu_v11_0_set_thermal_range(struct smu_context *smu,
- struct smu_temperature_range range)
-{
- struct amdgpu_device *adev = smu->adev;
- int low = SMU_THERMAL_MINIMUM_ALERT_TEMP;
- int high = SMU_THERMAL_MAXIMUM_ALERT_TEMP;
- uint32_t val;
- struct smu_table_context *table_context = &smu->smu_table;
- struct smu_11_0_powerplay_table *powerplay_table = table_context->power_play_table;
-
- low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP,
- range.min / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES);
- high = min((uint16_t)SMU_THERMAL_MAXIMUM_ALERT_TEMP, powerplay_table->software_shutdown_temp);
-
- if (low > high)
- return -EINVAL;
- val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL);
- val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
- val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
- val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0);
- val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0);
- val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high & 0xff));
- val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low & 0xff));
- val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
-
- WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
+ smu->current_power_limit = n;
return 0;
}
-static int smu_v11_0_enable_thermal_alert(struct smu_context *smu)
+int smu_v11_0_enable_thermal_alert(struct smu_context *smu)
{
- struct amdgpu_device *adev = smu->adev;
- uint32_t val = 0;
-
- val |= (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT);
- val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT);
- val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT);
-
- WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, val);
+ if (smu->smu_table.thermal_controller_type)
+ return amdgpu_irq_get(smu->adev, &smu->irq_source, 0);
return 0;
}
-int smu_v11_0_start_thermal_control(struct smu_context *smu)
-{
- int ret = 0;
- struct smu_temperature_range range;
- struct amdgpu_device *adev = smu->adev;
-
- memcpy(&range, &smu11_thermal_policy[0], sizeof(struct smu_temperature_range));
-
- ret = smu_get_thermal_temperature_range(smu, &range);
- if (ret)
- return ret;
-
- if (smu->smu_table.thermal_controller_type) {
- ret = smu_v11_0_set_thermal_range(smu, range);
- if (ret)
- return ret;
-
- ret = smu_v11_0_enable_thermal_alert(smu);
- if (ret)
- return ret;
-
- ret = smu_set_thermal_fan_table(smu);
- if (ret)
- return ret;
- }
-
- adev->pm.dpm.thermal.min_temp = range.min;
- adev->pm.dpm.thermal.max_temp = range.max;
- adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max;
- adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min;
- adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max;
- adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max;
- adev->pm.dpm.thermal.min_mem_temp = range.mem_min;
- adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max;
- adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max;
-
- return ret;
-}
-
-int smu_v11_0_stop_thermal_control(struct smu_context *smu)
+int smu_v11_0_disable_thermal_alert(struct smu_context *smu)
{
- struct amdgpu_device *adev = smu->adev;
-
- WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, 0);
-
- return 0;
+ return amdgpu_irq_put(smu->adev, &smu->irq_source, 0);
}
static uint16_t convert_to_vddc(uint8_t vid)
@@ -1273,7 +950,7 @@ static uint16_t convert_to_vddc(uint8_t vid)
return (uint16_t) ((6200 - (vid * 25)) / SMU11_VOLTAGE_SCALE);
}
-static int smu_v11_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value)
+int smu_v11_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value)
{
struct amdgpu_device *adev = smu->adev;
uint32_t vdd = 0, val_vid = 0;
@@ -1292,43 +969,6 @@ static int smu_v11_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value)
}
-int smu_v11_0_read_sensor(struct smu_context *smu,
- enum amd_pp_sensors sensor,
- void *data, uint32_t *size)
-{
- int ret = 0;
-
- if(!data || !size)
- return -EINVAL;
-
- switch (sensor) {
- case AMDGPU_PP_SENSOR_GFX_MCLK:
- ret = smu_get_current_clk_freq(smu, SMU_UCLK, (uint32_t *)data);
- *size = 4;
- break;
- case AMDGPU_PP_SENSOR_GFX_SCLK:
- ret = smu_get_current_clk_freq(smu, SMU_GFXCLK, (uint32_t *)data);
- *size = 4;
- break;
- case AMDGPU_PP_SENSOR_VDDGFX:
- ret = smu_v11_0_get_gfx_vdd(smu, (uint32_t *)data);
- *size = 4;
- break;
- case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
- *(uint32_t *)data = 0;
- *size = 4;
- break;
- default:
- ret = smu_common_read_sensor(smu, sensor, data, size);
- break;
- }
-
- if (ret)
- *size = 0;
-
- return ret;
-}
-
int
smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
struct pp_display_clock_request
@@ -1339,8 +979,8 @@ smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
enum smu_clk_type clk_select = 0;
uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
- if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) ||
- smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) ||
+ smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
switch (clk_type) {
case amd_pp_dcef_clock:
clk_select = SMU_DCEFCLK;
@@ -1358,7 +998,7 @@ smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
clk_select = SMU_UCLK;
break;
default:
- pr_info("[%s] Invalid Clock Type!", __func__);
+ dev_info(smu->adev->dev, "[%s] Invalid Clock Type!", __func__);
ret = -EINVAL;
break;
}
@@ -1369,7 +1009,7 @@ smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
if (clk_select == SMU_UCLK && smu->disable_uclk_switch)
return 0;
- ret = smu_set_hard_freq_range(smu, clk_select, clk_freq, 0);
+ ret = smu_v11_0_set_hard_freq_limited_range(smu, clk_select, clk_freq, 0);
if(clk_select == SMU_UCLK)
smu->hard_min_uclk_req_from_dal = clk_freq;
@@ -1385,17 +1025,17 @@ int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
struct amdgpu_device *adev = smu->adev;
switch (adev->asic_type) {
- case CHIP_VEGA20:
- break;
case CHIP_NAVI10:
case CHIP_NAVI14:
case CHIP_NAVI12:
+ case CHIP_SIENNA_CICHLID:
+ case CHIP_NAVY_FLOUNDER:
if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
return 0;
if (enable)
- ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
else
- ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
break;
default:
break;
@@ -1407,7 +1047,7 @@ int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
uint32_t
smu_v11_0_get_fan_control_mode(struct smu_context *smu)
{
- if (!smu_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT))
+ if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT))
return AMD_FAN_CTRL_MANUAL;
else
return AMD_FAN_CTRL_AUTO;
@@ -1418,12 +1058,12 @@ smu_v11_0_auto_fan_control(struct smu_context *smu, bool auto_fan_control)
{
int ret = 0;
- if (!smu_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT))
+ if (!smu_cmn_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT))
return 0;
- ret = smu_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, auto_fan_control);
+ ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, auto_fan_control);
if (ret)
- pr_err("[%s]%s smc FAN CONTROL feature failed!",
+ dev_err(smu->adev->dev, "[%s]%s smc FAN CONTROL feature failed!",
__func__, (auto_fan_control ? "Start" : "Stop"));
return ret;
@@ -1494,7 +1134,7 @@ smu_v11_0_set_fan_control_mode(struct smu_context *smu,
}
if (ret) {
- pr_err("[%s]Set fan control mode failed!", __func__);
+ dev_err(smu->adev->dev, "[%s]Set fan control mode failed!", __func__);
return -EINVAL;
}
@@ -1531,16 +1171,81 @@ int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
uint32_t pstate)
{
int ret = 0;
- ret = smu_send_smc_msg_with_param(smu,
+ ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetXgmiMode,
pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3,
NULL);
return ret;
}
+static int smu_v11_0_set_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned tyep,
+ enum amdgpu_interrupt_state state)
+{
+ struct smu_context *smu = &adev->smu;
+ uint32_t low, high;
+ uint32_t val = 0;
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ /* For THM irqs */
+ val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL);
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 1);
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 1);
+ WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
+
+ WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, 0);
+
+ /* For MP1 SW irqs */
+ val = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1);
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL, val);
+
+ break;
+ case AMDGPU_IRQ_STATE_ENABLE:
+ /* For THM irqs */
+ low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP,
+ smu->thermal_range.min / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES);
+ high = min(SMU_THERMAL_MAXIMUM_ALERT_TEMP,
+ smu->thermal_range.software_shutdown_temp);
+
+ val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL);
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0);
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0);
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high & 0xff));
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low & 0xff));
+ val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
+ WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
+
+ val = (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT);
+ val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT);
+ val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT);
+ WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, val);
+
+ /* For MP1 SW irqs */
+ val = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0);
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT, val);
+
+ val = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0);
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL, val);
+
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static int smu_v11_0_ack_ac_dc_interrupt(struct smu_context *smu)
{
- return smu_send_smc_msg(smu,
+ return smu_cmn_send_smc_msg(smu,
SMU_MSG_ReenableAcDcInterrupt,
NULL);
}
@@ -1554,6 +1259,7 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
+ struct smu_context *smu = &adev->smu;
uint32_t client_id = entry->client_id;
uint32_t src_id = entry->src_id;
/*
@@ -1605,6 +1311,14 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev,
dev_dbg(adev->dev, "Switched to DC mode!\n");
smu_v11_0_ack_ac_dc_interrupt(&adev->smu);
break;
+ case 0x7:
+ if (!atomic_read(&adev->throttling_logging_enabled))
+ return 0;
+
+ if (__ratelimit(&adev->throttling_logging_rs))
+ schedule_work(&smu->throttling_logging_work);
+
+ break;
}
}
}
@@ -1614,24 +1328,17 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev,
static const struct amdgpu_irq_src_funcs smu_v11_0_irq_funcs =
{
+ .set = smu_v11_0_set_irq_state,
.process = smu_v11_0_irq_process,
};
int smu_v11_0_register_irq_handler(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- struct amdgpu_irq_src *irq_src = smu->irq_source;
+ struct amdgpu_irq_src *irq_src = &smu->irq_source;
int ret = 0;
- /* already register */
- if (irq_src)
- return 0;
-
- irq_src = kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
- if (!irq_src)
- return -ENOMEM;
- smu->irq_source = irq_src;
-
+ irq_src->num_types = 1;
irq_src->funcs = &smu_v11_0_irq_funcs;
ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM,
@@ -1696,14 +1403,14 @@ int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu)
{
int ret = 0;
- ret = smu_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME, NULL);
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME, NULL);
return ret;
}
static int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_v11_0_baco_seq baco_seq)
{
- return smu_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq, NULL);
+ return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq, NULL);
}
bool smu_v11_0_baco_is_support(struct smu_context *smu)
@@ -1719,8 +1426,8 @@ bool smu_v11_0_baco_is_support(struct smu_context *smu)
return false;
/* Arcturus does not support this bit mask */
- if (smu_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
- !smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
+ !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
return false;
return true;
@@ -1757,21 +1464,15 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
data |= 0x80000000;
WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data);
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 0, NULL);
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 0, NULL);
} else {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 1, NULL);
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 1, NULL);
}
} else {
- ret = smu_send_smc_msg(smu, SMU_MSG_ExitBaco, NULL);
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_ExitBaco, NULL);
if (ret)
goto out;
- if (ras && ras->supported) {
- ret = smu_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
- if (ret)
- goto out;
- }
-
/* clear vbios scratch 6 and 7 for coming asic reinit */
WREG32(adev->bios_scratch_reg_offset + 6, 0);
WREG32(adev->bios_scratch_reg_offset + 7, 0);
@@ -1817,13 +1518,54 @@ int smu_v11_0_baco_exit(struct smu_context *smu)
return ret;
}
+int smu_v11_0_mode1_reset(struct smu_context *smu)
+{
+ int ret = 0;
+
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL);
+ if (!ret)
+ msleep(SMU11_MODE1_RESET_WAIT_TIME_IN_MS);
+
+ return ret;
+}
+
int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t *min, uint32_t *max)
{
int ret = 0, clk_id = 0;
uint32_t param = 0;
+ uint32_t clock_limit;
- clk_id = smu_clk_get_index(smu, clk_type);
+ if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) {
+ switch (clk_type) {
+ case SMU_MCLK:
+ case SMU_UCLK:
+ clock_limit = smu->smu_table.boot_values.uclk;
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ clock_limit = smu->smu_table.boot_values.gfxclk;
+ break;
+ case SMU_SOCCLK:
+ clock_limit = smu->smu_table.boot_values.socclk;
+ break;
+ default:
+ clock_limit = 0;
+ break;
+ }
+
+ /* clock in Mhz unit */
+ if (min)
+ *min = clock_limit / 100;
+ if (max)
+ *max = clock_limit / 100;
+
+ return 0;
+ }
+
+ clk_id = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_CLK,
+ clk_type);
if (clk_id < 0) {
ret = -EINVAL;
goto failed;
@@ -1831,13 +1573,13 @@ int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c
param = (clk_id & 0xffff) << 16;
if (max) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param, max);
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param, max);
if (ret)
goto failed;
}
if (min) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param, min);
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param, min);
if (ret)
goto failed;
}
@@ -1846,147 +1588,187 @@ failed:
return ret;
}
-int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
- uint32_t min, uint32_t max)
+int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t min,
+ uint32_t max)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0, clk_id = 0;
uint32_t param;
- clk_id = smu_clk_get_index(smu, clk_type);
+ if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
+ return 0;
+
+ clk_id = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_CLK,
+ clk_type);
if (clk_id < 0)
return clk_id;
+ if (clk_type == SMU_GFXCLK)
+ amdgpu_gfx_off_ctrl(adev, false);
+
if (max > 0) {
param = (uint32_t)((clk_id << 16) | (max & 0xffff));
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
param, NULL);
if (ret)
- return ret;
+ goto out;
}
if (min > 0) {
param = (uint32_t)((clk_id << 16) | (min & 0xffff));
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
param, NULL);
if (ret)
- return ret;
+ goto out;
}
+out:
+ if (clk_type == SMU_GFXCLK)
+ amdgpu_gfx_off_ctrl(adev, true);
+
return ret;
}
-int smu_v11_0_override_pcie_parameters(struct smu_context *smu)
+int smu_v11_0_set_hard_freq_limited_range(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t min,
+ uint32_t max)
{
- struct amdgpu_device *adev = smu->adev;
- uint32_t pcie_gen = 0, pcie_width = 0;
- int ret;
-
- if (amdgpu_sriov_vf(smu->adev))
- return 0;
-
- if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
- pcie_gen = 3;
- else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
- pcie_gen = 2;
- else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
- pcie_gen = 1;
- else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
- pcie_gen = 0;
-
- /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
- * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
- * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
- */
- if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
- pcie_width = 6;
- else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
- pcie_width = 5;
- else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
- pcie_width = 4;
- else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
- pcie_width = 3;
- else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
- pcie_width = 2;
- else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
- pcie_width = 1;
-
- ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width);
-
- if (ret)
- pr_err("[%s] Attempt to override pcie params failed!\n", __func__);
+ int ret = 0, clk_id = 0;
+ uint32_t param;
- return ret;
+ if (min <= 0 && max <= 0)
+ return -EINVAL;
-}
+ if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
+ return 0;
-int smu_v11_0_set_default_od_settings(struct smu_context *smu, bool initialize, size_t overdrive_table_size)
-{
- struct smu_table_context *table_context = &smu->smu_table;
- int ret = 0;
+ clk_id = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_CLK,
+ clk_type);
+ if (clk_id < 0)
+ return clk_id;
- if (initialize) {
- if (table_context->overdrive_table) {
- return -EINVAL;
- }
- table_context->overdrive_table = kzalloc(overdrive_table_size, GFP_KERNEL);
- if (!table_context->overdrive_table) {
- return -ENOMEM;
- }
- ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, false);
- if (ret) {
- pr_err("Failed to export overdrive table!\n");
+ if (max > 0) {
+ param = (uint32_t)((clk_id << 16) | (max & 0xffff));
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
+ param, NULL);
+ if (ret)
return ret;
- }
- if (!table_context->boot_overdrive_table) {
- table_context->boot_overdrive_table = kmemdup(table_context->overdrive_table, overdrive_table_size, GFP_KERNEL);
- if (!table_context->boot_overdrive_table) {
- return -ENOMEM;
- }
- }
}
- ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, true);
- if (ret) {
- pr_err("Failed to import overdrive table!\n");
- return ret;
+
+ if (min > 0) {
+ param = (uint32_t)((clk_id << 16) | (min & 0xffff));
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
+ param, NULL);
+ if (ret)
+ return ret;
}
+
return ret;
}
int smu_v11_0_set_performance_level(struct smu_context *smu,
enum amd_dpm_forced_level level)
{
+ struct smu_11_0_dpm_context *dpm_context =
+ smu->smu_dpm.dpm_context;
+ struct smu_11_0_dpm_table *gfx_table =
+ &dpm_context->dpm_tables.gfx_table;
+ struct smu_11_0_dpm_table *mem_table =
+ &dpm_context->dpm_tables.uclk_table;
+ struct smu_11_0_dpm_table *soc_table =
+ &dpm_context->dpm_tables.soc_table;
+ struct smu_umd_pstate_table *pstate_table =
+ &smu->pstate_table;
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t sclk_min = 0, sclk_max = 0;
+ uint32_t mclk_min = 0, mclk_max = 0;
+ uint32_t socclk_min = 0, socclk_max = 0;
int ret = 0;
- uint32_t sclk_mask, mclk_mask, soc_mask;
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
- ret = smu_force_dpm_limit_value(smu, true);
+ sclk_min = sclk_max = gfx_table->max;
+ mclk_min = mclk_max = mem_table->max;
+ socclk_min = socclk_max = soc_table->max;
break;
case AMD_DPM_FORCED_LEVEL_LOW:
- ret = smu_force_dpm_limit_value(smu, false);
+ sclk_min = sclk_max = gfx_table->min;
+ mclk_min = mclk_max = mem_table->min;
+ socclk_min = socclk_max = soc_table->min;
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
+ sclk_min = gfx_table->min;
+ sclk_max = gfx_table->max;
+ mclk_min = mem_table->min;
+ mclk_max = mem_table->max;
+ socclk_min = soc_table->min;
+ socclk_max = soc_table->max;
+ break;
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
- ret = smu_unforce_dpm_levels(smu);
+ sclk_min = sclk_max = pstate_table->gfxclk_pstate.standard;
+ mclk_min = mclk_max = pstate_table->uclk_pstate.standard;
+ socclk_min = socclk_max = pstate_table->socclk_pstate.standard;
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+ sclk_min = sclk_max = pstate_table->gfxclk_pstate.min;
+ break;
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ mclk_min = mclk_max = pstate_table->uclk_pstate.min;
+ break;
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
- ret = smu_get_profiling_clk_mask(smu, level,
- &sclk_mask,
- &mclk_mask,
- &soc_mask);
- if (ret)
- return ret;
- smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false);
- smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask, false);
- smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask, false);
+ sclk_min = sclk_max = pstate_table->gfxclk_pstate.peak;
+ mclk_min = mclk_max = pstate_table->uclk_pstate.peak;
+ socclk_min = socclk_max = pstate_table->socclk_pstate.peak;
break;
case AMD_DPM_FORCED_LEVEL_MANUAL:
case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
+ return 0;
default:
- break;
+ dev_err(adev->dev, "Invalid performance level %d\n", level);
+ return -EINVAL;
}
+
+ /*
+ * Separate MCLK and SOCCLK soft min/max settings are not allowed
+ * on Arcturus.
+ */
+ if (adev->asic_type == CHIP_ARCTURUS) {
+ mclk_min = mclk_max = 0;
+ socclk_min = socclk_max = 0;
+ }
+
+ if (sclk_min && sclk_max) {
+ ret = smu_v11_0_set_soft_freq_limited_range(smu,
+ SMU_GFXCLK,
+ sclk_min,
+ sclk_max);
+ if (ret)
+ return ret;
+ }
+
+ if (mclk_min && mclk_max) {
+ ret = smu_v11_0_set_soft_freq_limited_range(smu,
+ SMU_MCLK,
+ mclk_min,
+ mclk_max);
+ if (ret)
+ return ret;
+ }
+
+ if (socclk_min && socclk_max) {
+ ret = smu_v11_0_set_soft_freq_limited_range(smu,
+ SMU_SOCCLK,
+ socclk_min,
+ socclk_max);
+ if (ret)
+ return ret;
+ }
+
return ret;
}
@@ -1995,13 +1777,139 @@ int smu_v11_0_set_power_source(struct smu_context *smu,
{
int pwr_source;
- pwr_source = smu_power_get_index(smu, (uint32_t)power_src);
+ pwr_source = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_PWR,
+ (uint32_t)power_src);
if (pwr_source < 0)
return -EINVAL;
- return smu_send_smc_msg_with_param(smu,
+ return smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_NotifyPowerSource,
pwr_source,
NULL);
}
+int smu_v11_0_get_dpm_freq_by_index(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint16_t level,
+ uint32_t *value)
+{
+ int ret = 0, clk_id = 0;
+ uint32_t param;
+
+ if (!value)
+ return -EINVAL;
+
+ if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
+ return 0;
+
+ clk_id = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_CLK,
+ clk_type);
+ if (clk_id < 0)
+ return clk_id;
+
+ param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetDpmFreqByIndex,
+ param,
+ value);
+ if (ret)
+ return ret;
+
+ /*
+ * BIT31: 0 - Fine grained DPM, 1 - Dicrete DPM
+ * now, we un-support it
+ */
+ *value = *value & 0x7fffffff;
+
+ return ret;
+}
+
+int smu_v11_0_get_dpm_level_count(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *value)
+{
+ return smu_v11_0_get_dpm_freq_by_index(smu,
+ clk_type,
+ 0xff,
+ value);
+}
+
+int smu_v11_0_set_single_dpm_table(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ struct smu_11_0_dpm_table *single_dpm_table)
+{
+ int ret = 0;
+ uint32_t clk;
+ int i;
+
+ ret = smu_v11_0_get_dpm_level_count(smu,
+ clk_type,
+ &single_dpm_table->count);
+ if (ret) {
+ dev_err(smu->adev->dev, "[%s] failed to get dpm levels!\n", __func__);
+ return ret;
+ }
+
+ for (i = 0; i < single_dpm_table->count; i++) {
+ ret = smu_v11_0_get_dpm_freq_by_index(smu,
+ clk_type,
+ i,
+ &clk);
+ if (ret) {
+ dev_err(smu->adev->dev, "[%s] failed to get dpm freq by index!\n", __func__);
+ return ret;
+ }
+
+ single_dpm_table->dpm_levels[i].value = clk;
+ single_dpm_table->dpm_levels[i].enabled = true;
+
+ if (i == 0)
+ single_dpm_table->min = clk;
+ else if (i == single_dpm_table->count - 1)
+ single_dpm_table->max = clk;
+ }
+
+ return 0;
+}
+
+int smu_v11_0_get_dpm_level_range(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *min_value,
+ uint32_t *max_value)
+{
+ uint32_t level_count = 0;
+ int ret = 0;
+
+ if (!min_value && !max_value)
+ return -EINVAL;
+
+ if (min_value) {
+ /* by default, level 0 clock value as min value */
+ ret = smu_v11_0_get_dpm_freq_by_index(smu,
+ clk_type,
+ 0,
+ min_value);
+ if (ret)
+ return ret;
+ }
+
+ if (max_value) {
+ ret = smu_v11_0_get_dpm_level_count(smu,
+ clk_type,
+ &level_count);
+ if (ret)
+ return ret;
+
+ ret = smu_v11_0_get_dpm_freq_by_index(smu,
+ clk_type,
+ level_count - 1,
+ max_value);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}