aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/pm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/pm')
-rw-r--r--drivers/gpu/drm/amd/pm/Makefile13
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c2498
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c95
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c618
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h373
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h32
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/Makefile32
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/cik_dpm.h (renamed from drivers/gpu/drm/amd/pm/powerplay/cik_dpm.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c (renamed from drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c)37
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.h (renamed from drivers/gpu/drm/amd/pm/powerplay/kv_dpm.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/kv_smc.c (renamed from drivers/gpu/drm/amd/pm/powerplay/kv_smc.c)0
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c1081
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h38
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/ppsmc.h (renamed from drivers/gpu/drm/amd/pm/powerplay/ppsmc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/r600_dpm.h (renamed from drivers/gpu/drm/amd/pm/powerplay/r600_dpm.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c (renamed from drivers/gpu/drm/amd/pm/powerplay/si_dpm.c)188
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.h (renamed from drivers/gpu/drm/amd/pm/powerplay/si_dpm.h)15
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c (renamed from drivers/gpu/drm/amd/pm/powerplay/si_smc.c)0
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/sislands_smc.h (renamed from drivers/gpu/drm/amd/pm/powerplay/sislands_smc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/Makefile4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c400
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c8
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c10
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/amd_powerplay.h (renamed from drivers/gpu/drm/amd/pm/inc/amd_powerplay.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/cz_ppsmc.h (renamed from drivers/gpu/drm/amd/pm/inc/cz_ppsmc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/fiji_ppsmc.h (renamed from drivers/gpu/drm/amd/pm/inc/fiji_ppsmc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h (renamed from drivers/gpu/drm/amd/pm/inc/hardwaremanager.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h (renamed from drivers/gpu/drm/amd/pm/inc/hwmgr.h)4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/polaris10_pwrvirus.h (renamed from drivers/gpu/drm/amd/pm/inc/polaris10_pwrvirus.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/power_state.h (renamed from drivers/gpu/drm/amd/pm/inc/power_state.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/pp_debug.h (renamed from drivers/gpu/drm/amd/pm/inc/pp_debug.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/pp_endian.h (renamed from drivers/gpu/drm/amd/pm/inc/pp_endian.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/pp_thermal.h (renamed from drivers/gpu/drm/amd/pm/inc/pp_thermal.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/ppinterrupt.h (renamed from drivers/gpu/drm/amd/pm/inc/ppinterrupt.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/rv_ppsmc.h (renamed from drivers/gpu/drm/amd/pm/inc/rv_ppsmc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu10.h (renamed from drivers/gpu/drm/amd/pm/inc/smu10.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu10_driver_if.h (renamed from drivers/gpu/drm/amd/pm/inc/smu10_driver_if.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu11_driver_if.h (renamed from drivers/gpu/drm/amd/pm/inc/smu11_driver_if.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu7.h (renamed from drivers/gpu/drm/amd/pm/inc/smu7.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu71.h (renamed from drivers/gpu/drm/amd/pm/inc/smu71.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu71_discrete.h (renamed from drivers/gpu/drm/amd/pm/inc/smu71_discrete.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu72.h (renamed from drivers/gpu/drm/amd/pm/inc/smu72.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu72_discrete.h (renamed from drivers/gpu/drm/amd/pm/inc/smu72_discrete.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu73.h (renamed from drivers/gpu/drm/amd/pm/inc/smu73.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu73_discrete.h (renamed from drivers/gpu/drm/amd/pm/inc/smu73_discrete.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu74.h (renamed from drivers/gpu/drm/amd/pm/inc/smu74.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu74_discrete.h (renamed from drivers/gpu/drm/amd/pm/inc/smu74_discrete.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu75.h (renamed from drivers/gpu/drm/amd/pm/inc/smu75.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu75_discrete.h (renamed from drivers/gpu/drm/amd/pm/inc/smu75_discrete.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu7_common.h (renamed from drivers/gpu/drm/amd/pm/inc/smu7_common.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu7_discrete.h (renamed from drivers/gpu/drm/amd/pm/inc/smu7_discrete.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu7_fusion.h (renamed from drivers/gpu/drm/amd/pm/inc/smu7_fusion.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu7_ppsmc.h (renamed from drivers/gpu/drm/amd/pm/inc/smu7_ppsmc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu8.h (renamed from drivers/gpu/drm/amd/pm/inc/smu8.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu8_fusion.h (renamed from drivers/gpu/drm/amd/pm/inc/smu8_fusion.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu9.h (renamed from drivers/gpu/drm/amd/pm/inc/smu9.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu9_driver_if.h (renamed from drivers/gpu/drm/amd/pm/inc/smu9_driver_if.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu_ucode_xfer_cz.h (renamed from drivers/gpu/drm/amd/pm/inc/smu_ucode_xfer_cz.h)2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu_ucode_xfer_vi.h (renamed from drivers/gpu/drm/amd/pm/inc/smu_ucode_xfer_vi.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smumgr.h (renamed from drivers/gpu/drm/amd/pm/inc/smumgr.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/tonga_ppsmc.h (renamed from drivers/gpu/drm/amd/pm/inc/tonga_ppsmc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/vega10_ppsmc.h (renamed from drivers/gpu/drm/amd/pm/inc/vega10_ppsmc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/vega12/smu9_driver_if.h (renamed from drivers/gpu/drm/amd/pm/inc/vega12/smu9_driver_if.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/vega12_ppsmc.h (renamed from drivers/gpu/drm/amd/pm/inc/vega12_ppsmc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/vega20_ppsmc.h (renamed from drivers/gpu/drm/amd/pm/inc/vega20_ppsmc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c11
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/smu9_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c677
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h (renamed from drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h)59
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/aldebaran_ppsmc.h (renamed from drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/arcturus_ppsmc.h (renamed from drivers/gpu/drm/amd/pm/inc/arcturus_ppsmc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_arcturus.h (renamed from drivers/gpu/drm/amd/pm/inc/smu11_driver_if_arcturus.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_cyan_skillfish.h (renamed from drivers/gpu/drm/amd/pm/inc/smu11_driver_if_cyan_skillfish.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_navi10.h (renamed from drivers/gpu/drm/amd/pm/inc/smu11_driver_if_navi10.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_sienna_cichlid.h (renamed from drivers/gpu/drm/amd/pm/inc/smu11_driver_if_sienna_cichlid.h)24
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_vangogh.h (renamed from drivers/gpu/drm/amd/pm/inc/smu11_driver_if_vangogh.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu12_driver_if.h (renamed from drivers/gpu/drm/amd/pm/inc/smu12_driver_if.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_aldebaran.h (renamed from drivers/gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_yellow_carp.h (renamed from drivers/gpu/drm/amd/pm/inc/smu13_driver_if_yellow_carp.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_0_7_ppsmc.h (renamed from drivers/gpu/drm/amd/pm/inc/smu_v11_0_7_ppsmc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_0_ppsmc.h (renamed from drivers/gpu/drm/amd/pm/inc/smu_v11_0_ppsmc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_5_pmfw.h (renamed from drivers/gpu/drm/amd/pm/inc/smu_v11_5_pmfw.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_5_ppsmc.h (renamed from drivers/gpu/drm/amd/pm/inc/smu_v11_5_ppsmc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_8_pmfw.h (renamed from drivers/gpu/drm/amd/pm/inc/smu_v11_8_pmfw.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_8_ppsmc.h (renamed from drivers/gpu/drm/amd/pm/inc/smu_v11_8_ppsmc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v12_0_ppsmc.h (renamed from drivers/gpu/drm/amd/pm/inc/smu_v12_0_ppsmc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_1_pmfw.h (renamed from drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_pmfw.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_1_ppsmc.h (renamed from drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_ppsmc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_11_0_cdr_table.h (renamed from drivers/gpu/drm/amd/pm/inc/smu_11_0_cdr_table.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h (renamed from drivers/gpu/drm/amd/pm/inc/smu_types.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h (renamed from drivers/gpu/drm/amd/pm/inc/smu_v11_0.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_7_pptable.h (renamed from drivers/gpu/drm/amd/pm/inc/smu_v11_0_7_pptable.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_pptable.h (renamed from drivers/gpu/drm/amd/pm/inc/smu_v11_0_pptable.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v12_0.h (renamed from drivers/gpu/drm/amd/pm/inc/smu_v12_0.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h (renamed from drivers/gpu/drm/amd/pm/inc/smu_v13_0.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_pptable.h (renamed from drivers/gpu/drm/amd/pm/inc/smu_v13_0_pptable.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c102
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c70
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c476
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c358
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c70
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c79
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c16
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c10
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c90
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c76
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c47
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c222
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h16
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_internal.h10
114 files changed, 4475 insertions, 3412 deletions
diff --git a/drivers/gpu/drm/amd/pm/Makefile b/drivers/gpu/drm/amd/pm/Makefile
index 8cf6eff1ea93..51751db436b0 100644
--- a/drivers/gpu/drm/amd/pm/Makefile
+++ b/drivers/gpu/drm/amd/pm/Makefile
@@ -21,26 +21,29 @@
#
subdir-ccflags-y += \
- -I$(FULL_AMD_PATH)/pm/inc/ \
-I$(FULL_AMD_PATH)/include/asic_reg \
-I$(FULL_AMD_PATH)/include \
+ -I$(FULL_AMD_PATH)/pm/inc/ \
-I$(FULL_AMD_PATH)/pm/swsmu \
+ -I$(FULL_AMD_PATH)/pm/swsmu/inc \
+ -I$(FULL_AMD_PATH)/pm/swsmu/inc/pmfw_if \
-I$(FULL_AMD_PATH)/pm/swsmu/smu11 \
-I$(FULL_AMD_PATH)/pm/swsmu/smu12 \
-I$(FULL_AMD_PATH)/pm/swsmu/smu13 \
- -I$(FULL_AMD_PATH)/pm/powerplay \
+ -I$(FULL_AMD_PATH)/pm/powerplay/inc \
-I$(FULL_AMD_PATH)/pm/powerplay/smumgr\
- -I$(FULL_AMD_PATH)/pm/powerplay/hwmgr
+ -I$(FULL_AMD_PATH)/pm/powerplay/hwmgr \
+ -I$(FULL_AMD_PATH)/pm/legacy-dpm
AMD_PM_PATH = ../pm
-PM_LIBS = swsmu powerplay
+PM_LIBS = swsmu powerplay legacy-dpm
AMD_PM = $(addsuffix /Makefile,$(addprefix $(FULL_AMD_PATH)/pm/,$(PM_LIBS)))
include $(AMD_PM)
-PM_MGR = amdgpu_dpm.o amdgpu_pm.o
+PM_MGR = amdgpu_dpm.o amdgpu_pm.o amdgpu_dpm_internal.o
AMD_PM_POWER = $(addprefix $(AMD_PM_PATH)/,$(PM_MGR))
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 08362d506534..1d63f1e8884c 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -31,896 +31,41 @@
#include "amdgpu_display.h"
#include "hwmgr.h"
#include <linux/power_supply.h>
+#include "amdgpu_smu.h"
-#define WIDTH_4K 3840
+#define amdgpu_dpm_enable_bapm(adev, e) \
+ ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
-void amdgpu_dpm_print_class_info(u32 class, u32 class2)
-{
- const char *s;
-
- switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
- case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
- default:
- s = "none";
- break;
- case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
- s = "battery";
- break;
- case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
- s = "balanced";
- break;
- case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
- s = "performance";
- break;
- }
- printk("\tui class: %s\n", s);
- printk("\tinternal class:");
- if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
- (class2 == 0))
- pr_cont(" none");
- else {
- if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
- pr_cont(" boot");
- if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
- pr_cont(" thermal");
- if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
- pr_cont(" limited_pwr");
- if (class & ATOM_PPLIB_CLASSIFICATION_REST)
- pr_cont(" rest");
- if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
- pr_cont(" forced");
- if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
- pr_cont(" 3d_perf");
- if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
- pr_cont(" ovrdrv");
- if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
- pr_cont(" uvd");
- if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
- pr_cont(" 3d_low");
- if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
- pr_cont(" acpi");
- if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
- pr_cont(" uvd_hd2");
- if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
- pr_cont(" uvd_hd");
- if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
- pr_cont(" uvd_sd");
- if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
- pr_cont(" limited_pwr2");
- if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
- pr_cont(" ulv");
- if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
- pr_cont(" uvd_mvc");
- }
- pr_cont("\n");
-}
-
-void amdgpu_dpm_print_cap_info(u32 caps)
-{
- printk("\tcaps:");
- if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
- pr_cont(" single_disp");
- if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
- pr_cont(" video");
- if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
- pr_cont(" no_dc");
- pr_cont("\n");
-}
-
-void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
- struct amdgpu_ps *rps)
-{
- printk("\tstatus:");
- if (rps == adev->pm.dpm.current_ps)
- pr_cont(" c");
- if (rps == adev->pm.dpm.requested_ps)
- pr_cont(" r");
- if (rps == adev->pm.dpm.boot_ps)
- pr_cont(" b");
- pr_cont("\n");
-}
-
-void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev)
-{
- struct drm_device *ddev = adev_to_drm(adev);
- struct drm_crtc *crtc;
- struct amdgpu_crtc *amdgpu_crtc;
-
- adev->pm.dpm.new_active_crtcs = 0;
- adev->pm.dpm.new_active_crtc_count = 0;
- if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
- list_for_each_entry(crtc,
- &ddev->mode_config.crtc_list, head) {
- amdgpu_crtc = to_amdgpu_crtc(crtc);
- if (amdgpu_crtc->enabled) {
- adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
- adev->pm.dpm.new_active_crtc_count++;
- }
- }
- }
-}
-
-
-u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
-{
- struct drm_device *dev = adev_to_drm(adev);
- struct drm_crtc *crtc;
- struct amdgpu_crtc *amdgpu_crtc;
- u32 vblank_in_pixels;
- u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
-
- if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- amdgpu_crtc = to_amdgpu_crtc(crtc);
- if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
- vblank_in_pixels =
- amdgpu_crtc->hw_mode.crtc_htotal *
- (amdgpu_crtc->hw_mode.crtc_vblank_end -
- amdgpu_crtc->hw_mode.crtc_vdisplay +
- (amdgpu_crtc->v_border * 2));
-
- vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
- break;
- }
- }
- }
-
- return vblank_time_us;
-}
-
-u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
-{
- struct drm_device *dev = adev_to_drm(adev);
- struct drm_crtc *crtc;
- struct amdgpu_crtc *amdgpu_crtc;
- u32 vrefresh = 0;
-
- if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- amdgpu_crtc = to_amdgpu_crtc(crtc);
- if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
- vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
- break;
- }
- }
- }
-
- return vrefresh;
-}
-
-bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)
-{
- switch (sensor) {
- case THERMAL_TYPE_RV6XX:
- case THERMAL_TYPE_RV770:
- case THERMAL_TYPE_EVERGREEN:
- case THERMAL_TYPE_SUMO:
- case THERMAL_TYPE_NI:
- case THERMAL_TYPE_SI:
- case THERMAL_TYPE_CI:
- case THERMAL_TYPE_KV:
- return true;
- case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
- case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
- return false; /* need special handling */
- case THERMAL_TYPE_NONE:
- case THERMAL_TYPE_EXTERNAL:
- case THERMAL_TYPE_EXTERNAL_GPIO:
- default:
- return false;
- }
-}
-
-union power_info {
- struct _ATOM_POWERPLAY_INFO info;
- struct _ATOM_POWERPLAY_INFO_V2 info_2;
- struct _ATOM_POWERPLAY_INFO_V3 info_3;
- struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
- struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
- struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
- struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
- struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
-};
-
-union fan_info {
- struct _ATOM_PPLIB_FANTABLE fan;
- struct _ATOM_PPLIB_FANTABLE2 fan2;
- struct _ATOM_PPLIB_FANTABLE3 fan3;
-};
-
-static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table,
- ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
-{
- u32 size = atom_table->ucNumEntries *
- sizeof(struct amdgpu_clock_voltage_dependency_entry);
- int i;
- ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
-
- amdgpu_table->entries = kzalloc(size, GFP_KERNEL);
- if (!amdgpu_table->entries)
- return -ENOMEM;
-
- entry = &atom_table->entries[0];
- for (i = 0; i < atom_table->ucNumEntries; i++) {
- amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
- (entry->ucClockHigh << 16);
- amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage);
- entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
- ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
- }
- amdgpu_table->count = atom_table->ucNumEntries;
-
- return 0;
-}
-
-int amdgpu_get_platform_caps(struct amdgpu_device *adev)
-{
- struct amdgpu_mode_info *mode_info = &adev->mode_info;
- union power_info *power_info;
- int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
- u16 data_offset;
- u8 frev, crev;
-
- if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
- &frev, &crev, &data_offset))
- return -EINVAL;
- power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
-
- adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
- adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
- adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
-
- return 0;
-}
-
-/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
-#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
-#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
-#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
-#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
-#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
-#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
-#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
-#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
-
-int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
-{
- struct amdgpu_mode_info *mode_info = &adev->mode_info;
- union power_info *power_info;
- union fan_info *fan_info;
- ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
- int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
- u16 data_offset;
- u8 frev, crev;
- int ret, i;
-
- if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
- &frev, &crev, &data_offset))
- return -EINVAL;
- power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
-
- /* fan table */
- if (le16_to_cpu(power_info->pplib.usTableSize) >=
- sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
- if (power_info->pplib3.usFanTableOffset) {
- fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib3.usFanTableOffset));
- adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
- adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
- adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
- adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
- adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
- adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
- adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
- if (fan_info->fan.ucFanTableFormat >= 2)
- adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
- else
- adev->pm.dpm.fan.t_max = 10900;
- adev->pm.dpm.fan.cycle_delay = 100000;
- if (fan_info->fan.ucFanTableFormat >= 3) {
- adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
- adev->pm.dpm.fan.default_max_fan_pwm =
- le16_to_cpu(fan_info->fan3.usFanPWMMax);
- adev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
- adev->pm.dpm.fan.fan_output_sensitivity =
- le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
- }
- adev->pm.dpm.fan.ucode_fan_control = true;
- }
- }
-
- /* clock dependancy tables, shedding tables */
- if (le16_to_cpu(power_info->pplib.usTableSize) >=
- sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
- if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
- dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
- ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
- dep_table);
- if (ret) {
- amdgpu_free_extended_power_table(adev);
- return ret;
- }
- }
- if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
- dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
- ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
- dep_table);
- if (ret) {
- amdgpu_free_extended_power_table(adev);
- return ret;
- }
- }
- if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
- dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
- ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
- dep_table);
- if (ret) {
- amdgpu_free_extended_power_table(adev);
- return ret;
- }
- }
- if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
- dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
- ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
- dep_table);
- if (ret) {
- amdgpu_free_extended_power_table(adev);
- return ret;
- }
- }
- if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
- ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
- (ATOM_PPLIB_Clock_Voltage_Limit_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
- if (clk_v->ucNumEntries) {
- adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
- le16_to_cpu(clk_v->entries[0].usSclkLow) |
- (clk_v->entries[0].ucSclkHigh << 16);
- adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
- le16_to_cpu(clk_v->entries[0].usMclkLow) |
- (clk_v->entries[0].ucMclkHigh << 16);
- adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
- le16_to_cpu(clk_v->entries[0].usVddc);
- adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
- le16_to_cpu(clk_v->entries[0].usVddci);
- }
- }
- if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
- ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
- (ATOM_PPLIB_PhaseSheddingLimits_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
- ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
-
- adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
- kcalloc(psl->ucNumEntries,
- sizeof(struct amdgpu_phase_shedding_limits_entry),
- GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
- amdgpu_free_extended_power_table(adev);
- return -ENOMEM;
- }
-
- entry = &psl->entries[0];
- for (i = 0; i < psl->ucNumEntries; i++) {
- adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
- le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
- adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
- le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
- adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
- le16_to_cpu(entry->usVoltage);
- entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
- ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
- }
- adev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
- psl->ucNumEntries;
- }
- }
-
- /* cac data */
- if (le16_to_cpu(power_info->pplib.usTableSize) >=
- sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
- adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
- adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
- adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit;
- adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
- if (adev->pm.dpm.tdp_od_limit)
- adev->pm.dpm.power_control = true;
- else
- adev->pm.dpm.power_control = false;
- adev->pm.dpm.tdp_adjustment = 0;
- adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
- adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
- adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
- if (power_info->pplib5.usCACLeakageTableOffset) {
- ATOM_PPLIB_CAC_Leakage_Table *cac_table =
- (ATOM_PPLIB_CAC_Leakage_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
- ATOM_PPLIB_CAC_Leakage_Record *entry;
- u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
- adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
- amdgpu_free_extended_power_table(adev);
- return -ENOMEM;
- }
- entry = &cac_table->entries[0];
- for (i = 0; i < cac_table->ucNumEntries; i++) {
- if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
- adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
- le16_to_cpu(entry->usVddc1);
- adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
- le16_to_cpu(entry->usVddc2);
- adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
- le16_to_cpu(entry->usVddc3);
- } else {
- adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
- le16_to_cpu(entry->usVddc);
- adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
- le32_to_cpu(entry->ulLeakageValue);
- }
- entry = (ATOM_PPLIB_CAC_Leakage_Record *)
- ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
- }
- adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
- }
- }
-
- /* ext tables */
- if (le16_to_cpu(power_info->pplib.usTableSize) >=
- sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
- ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
- if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
- ext_hdr->usVCETableOffset) {
- VCEClockInfoArray *array = (VCEClockInfoArray *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
- ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
- (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
- 1 + array->ucNumEntries * sizeof(VCEClockInfo));
- ATOM_PPLIB_VCE_State_Table *states =
- (ATOM_PPLIB_VCE_State_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
- 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
- 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
- ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
- ATOM_PPLIB_VCE_State_Record *state_entry;
- VCEClockInfo *vce_clk;
- u32 size = limits->numEntries *
- sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
- adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
- kzalloc(size, GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
- amdgpu_free_extended_power_table(adev);
- return -ENOMEM;
- }
- adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
- limits->numEntries;
- entry = &limits->entries[0];
- state_entry = &states->entries[0];
- for (i = 0; i < limits->numEntries; i++) {
- vce_clk = (VCEClockInfo *)
- ((u8 *)&array->entries[0] +
- (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
- adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
- le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
- adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
- le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
- adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
- le16_to_cpu(entry->usVoltage);
- entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
- ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
- }
- adev->pm.dpm.num_of_vce_states =
- states->numEntries > AMD_MAX_VCE_LEVELS ?
- AMD_MAX_VCE_LEVELS : states->numEntries;
- for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
- vce_clk = (VCEClockInfo *)
- ((u8 *)&array->entries[0] +
- (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
- adev->pm.dpm.vce_states[i].evclk =
- le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
- adev->pm.dpm.vce_states[i].ecclk =
- le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
- adev->pm.dpm.vce_states[i].clk_idx =
- state_entry->ucClockInfoIndex & 0x3f;
- adev->pm.dpm.vce_states[i].pstate =
- (state_entry->ucClockInfoIndex & 0xc0) >> 6;
- state_entry = (ATOM_PPLIB_VCE_State_Record *)
- ((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
- }
- }
- if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
- ext_hdr->usUVDTableOffset) {
- UVDClockInfoArray *array = (UVDClockInfoArray *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
- ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
- (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
- 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
- ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
- u32 size = limits->numEntries *
- sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
- adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
- kzalloc(size, GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
- amdgpu_free_extended_power_table(adev);
- return -ENOMEM;
- }
- adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
- limits->numEntries;
- entry = &limits->entries[0];
- for (i = 0; i < limits->numEntries; i++) {
- UVDClockInfo *uvd_clk = (UVDClockInfo *)
- ((u8 *)&array->entries[0] +
- (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
- adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
- le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
- adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
- le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
- adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
- le16_to_cpu(entry->usVoltage);
- entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
- ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
- }
- }
- if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
- ext_hdr->usSAMUTableOffset) {
- ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
- (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
- ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
- u32 size = limits->numEntries *
- sizeof(struct amdgpu_clock_voltage_dependency_entry);
- adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
- kzalloc(size, GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
- amdgpu_free_extended_power_table(adev);
- return -ENOMEM;
- }
- adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
- limits->numEntries;
- entry = &limits->entries[0];
- for (i = 0; i < limits->numEntries; i++) {
- adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
- le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
- adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
- le16_to_cpu(entry->usVoltage);
- entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
- ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
- }
- }
- if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
- ext_hdr->usPPMTableOffset) {
- ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(ext_hdr->usPPMTableOffset));
- adev->pm.dpm.dyn_state.ppm_table =
- kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.ppm_table) {
- amdgpu_free_extended_power_table(adev);
- return -ENOMEM;
- }
- adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
- adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
- le16_to_cpu(ppm->usCpuCoreNumber);
- adev->pm.dpm.dyn_state.ppm_table->platform_tdp =
- le32_to_cpu(ppm->ulPlatformTDP);
- adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
- le32_to_cpu(ppm->ulSmallACPlatformTDP);
- adev->pm.dpm.dyn_state.ppm_table->platform_tdc =
- le32_to_cpu(ppm->ulPlatformTDC);
- adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
- le32_to_cpu(ppm->ulSmallACPlatformTDC);
- adev->pm.dpm.dyn_state.ppm_table->apu_tdp =
- le32_to_cpu(ppm->ulApuTDP);
- adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
- le32_to_cpu(ppm->ulDGpuTDP);
- adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
- le32_to_cpu(ppm->ulDGpuUlvPower);
- adev->pm.dpm.dyn_state.ppm_table->tj_max =
- le32_to_cpu(ppm->ulTjmax);
- }
- if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
- ext_hdr->usACPTableOffset) {
- ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
- (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
- ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
- u32 size = limits->numEntries *
- sizeof(struct amdgpu_clock_voltage_dependency_entry);
- adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
- kzalloc(size, GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
- amdgpu_free_extended_power_table(adev);
- return -ENOMEM;
- }
- adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
- limits->numEntries;
- entry = &limits->entries[0];
- for (i = 0; i < limits->numEntries; i++) {
- adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
- le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
- adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
- le16_to_cpu(entry->usVoltage);
- entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
- ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
- }
- }
- if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
- ext_hdr->usPowerTuneTableOffset) {
- u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
- le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
- ATOM_PowerTune_Table *pt;
- adev->pm.dpm.dyn_state.cac_tdp_table =
- kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
- amdgpu_free_extended_power_table(adev);
- return -ENOMEM;
- }
- if (rev > 0) {
- ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
- adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
- ppt->usMaximumPowerDeliveryLimit;
- pt = &ppt->power_tune_table;
- } else {
- ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
- adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
- pt = &ppt->power_tune_table;
- }
- adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
- adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
- le16_to_cpu(pt->usConfigurableTDP);
- adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
- adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
- le16_to_cpu(pt->usBatteryPowerLimit);
- adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
- le16_to_cpu(pt->usSmallPowerLimit);
- adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
- le16_to_cpu(pt->usLowCACLeakage);
- adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
- le16_to_cpu(pt->usHighCACLeakage);
- }
- if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) &&
- ext_hdr->usSclkVddgfxTableOffset) {
- dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset));
- ret = amdgpu_parse_clk_voltage_dep_table(
- &adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
- dep_table);
- if (ret) {
- kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
- return ret;
- }
- }
- }
-
- return 0;
-}
-
-void amdgpu_free_extended_power_table(struct amdgpu_device *adev)
-{
- struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state;
-
- kfree(dyn_state->vddc_dependency_on_sclk.entries);
- kfree(dyn_state->vddci_dependency_on_mclk.entries);
- kfree(dyn_state->vddc_dependency_on_mclk.entries);
- kfree(dyn_state->mvdd_dependency_on_mclk.entries);
- kfree(dyn_state->cac_leakage_table.entries);
- kfree(dyn_state->phase_shedding_limits_table.entries);
- kfree(dyn_state->ppm_table);
- kfree(dyn_state->cac_tdp_table);
- kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
- kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
- kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
- kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
- kfree(dyn_state->vddgfx_dependency_on_sclk.entries);
-}
-
-static const char *pp_lib_thermal_controller_names[] = {
- "NONE",
- "lm63",
- "adm1032",
- "adm1030",
- "max6649",
- "lm64",
- "f75375",
- "RV6xx",
- "RV770",
- "adt7473",
- "NONE",
- "External GPIO",
- "Evergreen",
- "emc2103",
- "Sumo",
- "Northern Islands",
- "Southern Islands",
- "lm96163",
- "Sea Islands",
- "Kaveri/Kabini",
-};
-
-void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
-{
- struct amdgpu_mode_info *mode_info = &adev->mode_info;
- ATOM_PPLIB_POWERPLAYTABLE *power_table;
- int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
- ATOM_PPLIB_THERMALCONTROLLER *controller;
- struct amdgpu_i2c_bus_rec i2c_bus;
- u16 data_offset;
- u8 frev, crev;
-
- if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
- &frev, &crev, &data_offset))
- return;
- power_table = (ATOM_PPLIB_POWERPLAYTABLE *)
- (mode_info->atom_context->bios + data_offset);
- controller = &power_table->sThermalController;
-
- /* add the i2c bus for thermal/fan chip */
- if (controller->ucType > 0) {
- if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
- adev->pm.no_fan = true;
- adev->pm.fan_pulses_per_revolution =
- controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
- if (adev->pm.fan_pulses_per_revolution) {
- adev->pm.fan_min_rpm = controller->ucFanMinRPM;
- adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
- }
- if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
- DRM_INFO("Internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
- } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
- DRM_INFO("Internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
- } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
- DRM_INFO("Internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
- } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
- DRM_INFO("Internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
- } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
- DRM_INFO("Internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- adev->pm.int_thermal_type = THERMAL_TYPE_NI;
- } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
- DRM_INFO("Internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- adev->pm.int_thermal_type = THERMAL_TYPE_SI;
- } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
- DRM_INFO("Internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- adev->pm.int_thermal_type = THERMAL_TYPE_CI;
- } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
- DRM_INFO("Internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- adev->pm.int_thermal_type = THERMAL_TYPE_KV;
- } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
- DRM_INFO("External GPIO thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
- } else if (controller->ucType ==
- ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
- DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
- } else if (controller->ucType ==
- ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
- DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
- } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
- DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
- pp_lib_thermal_controller_names[controller->ucType],
- controller->ucI2cAddress >> 1,
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
- i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine);
- adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus);
- if (adev->pm.i2c_bus) {
- struct i2c_board_info info = { };
- const char *name = pp_lib_thermal_controller_names[controller->ucType];
- info.addr = controller->ucI2cAddress >> 1;
- strlcpy(info.type, name, sizeof(info.type));
- i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info);
- }
- } else {
- DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
- controller->ucType,
- controller->ucI2cAddress >> 1,
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- }
- }
-}
-
-enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
- u32 sys_mask,
- enum amdgpu_pcie_gen asic_gen,
- enum amdgpu_pcie_gen default_gen)
+int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
{
- switch (asic_gen) {
- case AMDGPU_PCIE_GEN1:
- return AMDGPU_PCIE_GEN1;
- case AMDGPU_PCIE_GEN2:
- return AMDGPU_PCIE_GEN2;
- case AMDGPU_PCIE_GEN3:
- return AMDGPU_PCIE_GEN3;
- default:
- if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) &&
- (default_gen == AMDGPU_PCIE_GEN3))
- return AMDGPU_PCIE_GEN3;
- else if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) &&
- (default_gen == AMDGPU_PCIE_GEN2))
- return AMDGPU_PCIE_GEN2;
- else
- return AMDGPU_PCIE_GEN1;
- }
- return AMDGPU_PCIE_GEN1;
-}
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
-struct amd_vce_state*
-amdgpu_get_vce_clock_state(void *handle, u32 idx)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (!pp_funcs->get_sclk)
+ return 0;
- if (idx < adev->pm.dpm.num_of_vce_states)
- return &adev->pm.dpm.vce_states[idx];
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle,
+ low);
+ mutex_unlock(&adev->pm.mutex);
- return NULL;
+ return ret;
}
-int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
+int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
- return pp_funcs->get_sclk((adev)->powerplay.pp_handle, (low));
-}
+ if (!pp_funcs->get_mclk)
+ return 0;
-int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
-{
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_mclk((adev)->powerplay.pp_handle,
+ low);
+ mutex_unlock(&adev->pm.mutex);
- return pp_funcs->get_mclk((adev)->powerplay.pp_handle, (low));
+ return ret;
}
int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate)
@@ -935,52 +80,20 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block
return 0;
}
+ mutex_lock(&adev->pm.mutex);
+
switch (block_type) {
case AMD_IP_BLOCK_TYPE_UVD:
case AMD_IP_BLOCK_TYPE_VCE:
- if (pp_funcs && pp_funcs->set_powergating_by_smu) {
- /*
- * TODO: need a better lock mechanism
- *
- * Here adev->pm.mutex lock protection is enforced on
- * UVD and VCE cases only. Since for other cases, there
- * may be already lock protection in amdgpu_pm.c.
- * This is a quick fix for the deadlock issue below.
- * NFO: task ocltst:2028 blocked for more than 120 seconds.
- * Tainted: G OE 5.0.0-37-generic #40~18.04.1-Ubuntu
- * echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
- * cltst D 0 2028 2026 0x00000000
- * all Trace:
- * __schedule+0x2c0/0x870
- * schedule+0x2c/0x70
- * schedule_preempt_disabled+0xe/0x10
- * __mutex_lock.isra.9+0x26d/0x4e0
- * __mutex_lock_slowpath+0x13/0x20
- * ? __mutex_lock_slowpath+0x13/0x20
- * mutex_lock+0x2f/0x40
- * amdgpu_dpm_set_powergating_by_smu+0x64/0xe0 [amdgpu]
- * gfx_v8_0_enable_gfx_static_mg_power_gating+0x3c/0x70 [amdgpu]
- * gfx_v8_0_set_powergating_state+0x66/0x260 [amdgpu]
- * amdgpu_device_ip_set_powergating_state+0x62/0xb0 [amdgpu]
- * pp_dpm_force_performance_level+0xe7/0x100 [amdgpu]
- * amdgpu_set_dpm_forced_performance_level+0x129/0x330 [amdgpu]
- */
- mutex_lock(&adev->pm.mutex);
- ret = (pp_funcs->set_powergating_by_smu(
- (adev)->powerplay.pp_handle, block_type, gate));
- mutex_unlock(&adev->pm.mutex);
- }
- break;
case AMD_IP_BLOCK_TYPE_GFX:
case AMD_IP_BLOCK_TYPE_VCN:
case AMD_IP_BLOCK_TYPE_SDMA:
case AMD_IP_BLOCK_TYPE_JPEG:
case AMD_IP_BLOCK_TYPE_GMC:
case AMD_IP_BLOCK_TYPE_ACP:
- if (pp_funcs && pp_funcs->set_powergating_by_smu) {
+ if (pp_funcs && pp_funcs->set_powergating_by_smu)
ret = (pp_funcs->set_powergating_by_smu(
(adev)->powerplay.pp_handle, block_type, gate));
- }
break;
default:
break;
@@ -989,6 +102,8 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block
if (!ret)
atomic_set(&adev->pm.pwr_state[block_type], pwr_state);
+ mutex_unlock(&adev->pm.mutex);
+
return ret;
}
@@ -1001,9 +116,13 @@ int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
if (!pp_funcs || !pp_funcs->set_asic_baco_state)
return -ENOENT;
+ mutex_lock(&adev->pm.mutex);
+
/* enter BACO state */
ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
+ mutex_unlock(&adev->pm.mutex);
+
return ret;
}
@@ -1016,9 +135,13 @@ int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
if (!pp_funcs || !pp_funcs->set_asic_baco_state)
return -ENOENT;
+ mutex_lock(&adev->pm.mutex);
+
/* exit BACO state */
ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
+ mutex_unlock(&adev->pm.mutex);
+
return ret;
}
@@ -1029,9 +152,13 @@ int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
if (pp_funcs && pp_funcs->set_mp1_state) {
+ mutex_lock(&adev->pm.mutex);
+
ret = pp_funcs->set_mp1_state(
adev->powerplay.pp_handle,
mp1_state);
+
+ mutex_unlock(&adev->pm.mutex);
}
return ret;
@@ -1042,25 +169,37 @@ bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
void *pp_handle = adev->powerplay.pp_handle;
bool baco_cap;
+ int ret = 0;
if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
return false;
- if (pp_funcs->get_asic_baco_capability(pp_handle, &baco_cap))
- return false;
+ mutex_lock(&adev->pm.mutex);
- return baco_cap;
+ ret = pp_funcs->get_asic_baco_capability(pp_handle,
+ &baco_cap);
+
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret ? false : baco_cap;
}
int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
void *pp_handle = adev->powerplay.pp_handle;
+ int ret = 0;
if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
return -ENOENT;
- return pp_funcs->asic_reset_mode_2(pp_handle);
+ mutex_lock(&adev->pm.mutex);
+
+ ret = pp_funcs->asic_reset_mode_2(pp_handle);
+
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
}
int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
@@ -1072,37 +211,47 @@ int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
if (!pp_funcs || !pp_funcs->set_asic_baco_state)
return -ENOENT;
+ mutex_lock(&adev->pm.mutex);
+
/* enter BACO state */
ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
if (ret)
- return ret;
+ goto out;
/* exit BACO state */
ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
- if (ret)
- return ret;
- return 0;
+out:
+ mutex_unlock(&adev->pm.mutex);
+ return ret;
}
bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
{
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ bool support_mode1_reset = false;
- if (is_support_sw_smu(adev))
- return smu_mode1_reset_is_support(smu);
+ if (is_support_sw_smu(adev)) {
+ mutex_lock(&adev->pm.mutex);
+ support_mode1_reset = smu_mode1_reset_is_support(smu);
+ mutex_unlock(&adev->pm.mutex);
+ }
- return false;
+ return support_mode1_reset;
}
int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
{
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret = -EOPNOTSUPP;
- if (is_support_sw_smu(adev))
- return smu_mode1_reset(smu);
+ if (is_support_sw_smu(adev)) {
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_mode1_reset(smu);
+ mutex_unlock(&adev->pm.mutex);
+ }
- return -EOPNOTSUPP;
+ return ret;
}
int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
@@ -1115,9 +264,12 @@ int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
if (amdgpu_sriov_vf(adev))
return 0;
- if (pp_funcs && pp_funcs->switch_power_profile)
+ if (pp_funcs && pp_funcs->switch_power_profile) {
+ mutex_lock(&adev->pm.mutex);
ret = pp_funcs->switch_power_profile(
adev->powerplay.pp_handle, type, en);
+ mutex_unlock(&adev->pm.mutex);
+ }
return ret;
}
@@ -1128,9 +280,12 @@ int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
- if (pp_funcs && pp_funcs->set_xgmi_pstate)
+ if (pp_funcs && pp_funcs->set_xgmi_pstate) {
+ mutex_lock(&adev->pm.mutex);
ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
pstate);
+ mutex_unlock(&adev->pm.mutex);
+ }
return ret;
}
@@ -1142,20 +297,27 @@ int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
void *pp_handle = adev->powerplay.pp_handle;
- if (pp_funcs && pp_funcs->set_df_cstate)
+ if (pp_funcs && pp_funcs->set_df_cstate) {
+ mutex_lock(&adev->pm.mutex);
ret = pp_funcs->set_df_cstate(pp_handle, cstate);
+ mutex_unlock(&adev->pm.mutex);
+ }
return ret;
}
int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en)
{
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret = 0;
- if (is_support_sw_smu(adev))
- return smu_allow_xgmi_power_down(smu, en);
+ if (is_support_sw_smu(adev)) {
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_allow_xgmi_power_down(smu, en);
+ mutex_unlock(&adev->pm.mutex);
+ }
- return 0;
+ return ret;
}
int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
@@ -1165,8 +327,11 @@ int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
adev->powerplay.pp_funcs;
int ret = 0;
- if (pp_funcs && pp_funcs->enable_mgpu_fan_boost)
+ if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) {
+ mutex_lock(&adev->pm.mutex);
ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
+ mutex_unlock(&adev->pm.mutex);
+ }
return ret;
}
@@ -1179,9 +344,12 @@ int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
adev->powerplay.pp_funcs;
int ret = 0;
- if (pp_funcs && pp_funcs->set_clockgating_by_smu)
+ if (pp_funcs && pp_funcs->set_clockgating_by_smu) {
+ mutex_lock(&adev->pm.mutex);
ret = pp_funcs->set_clockgating_by_smu(pp_handle,
msg_id);
+ mutex_unlock(&adev->pm.mutex);
+ }
return ret;
}
@@ -1194,9 +362,12 @@ int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
adev->powerplay.pp_funcs;
int ret = -EOPNOTSUPP;
- if (pp_funcs && pp_funcs->smu_i2c_bus_access)
+ if (pp_funcs && pp_funcs->smu_i2c_bus_access) {
+ mutex_lock(&adev->pm.mutex);
ret = pp_funcs->smu_i2c_bus_access(pp_handle,
acquire);
+ mutex_unlock(&adev->pm.mutex);
+ }
return ret;
}
@@ -1209,13 +380,15 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
adev->pm.ac_power = true;
else
adev->pm.ac_power = false;
+
if (adev->powerplay.pp_funcs &&
adev->powerplay.pp_funcs->enable_bapm)
amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
- mutex_unlock(&adev->pm.mutex);
if (is_support_sw_smu(adev))
- smu_set_ac_dc(&adev->smu);
+ smu_set_ac_dc(adev->powerplay.pp_handle);
+
+ mutex_unlock(&adev->pm.mutex);
}
}
@@ -1223,394 +396,1219 @@ int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors senso
void *data, uint32_t *size)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
- int ret = 0;
+ int ret = -EINVAL;
if (!data || !size)
return -EINVAL;
- if (pp_funcs && pp_funcs->read_sensor)
- ret = pp_funcs->read_sensor((adev)->powerplay.pp_handle,
- sensor, data, size);
- else
- ret = -EINVAL;
+ if (pp_funcs && pp_funcs->read_sensor) {
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->read_sensor(adev->powerplay.pp_handle,
+ sensor,
+ data,
+ size);
+ mutex_unlock(&adev->pm.mutex);
+ }
return ret;
}
-void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
+void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev =
- container_of(work, struct amdgpu_device,
- pm.dpm.thermal.work);
- /* switch to the thermal state */
- enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
- int temp, size = sizeof(temp);
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
if (!adev->pm.dpm_enabled)
return;
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
- (void *)&temp, &size)) {
- if (temp < adev->pm.dpm.thermal.min_temp)
- /* switch back the user state */
- dpm_state = adev->pm.dpm.user_state;
- } else {
- if (adev->pm.dpm.thermal.high_to_low)
- /* switch back the user state */
- dpm_state = adev->pm.dpm.user_state;
- }
+ if (!pp_funcs->pm_compute_clocks)
+ return;
+
mutex_lock(&adev->pm.mutex);
- if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
- adev->pm.dpm.thermal_active = true;
- else
- adev->pm.dpm.thermal_active = false;
- adev->pm.dpm.state = dpm_state;
+ pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle);
mutex_unlock(&adev->pm.mutex);
+}
+
+void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
+{
+ int ret = 0;
- amdgpu_pm_compute_clocks(adev);
+ ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
+ if (ret)
+ DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
+ enable ? "enable" : "disable", ret);
}
-static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
- enum amd_pm_state_type dpm_state)
+void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
{
- int i;
- struct amdgpu_ps *ps;
- u32 ui_class;
- bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
- true : false;
+ int ret = 0;
- /* check if the vblank period is too short to adjust the mclk */
- if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
- if (amdgpu_dpm_vblank_too_short(adev))
- single_display = false;
- }
+ ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
+ if (ret)
+ DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
+ enable ? "enable" : "disable", ret);
+}
- /* certain older asics have a separare 3D performance state,
- * so try that first if the user selected performance
- */
- if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
- dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
- /* balanced states don't exist at the moment */
- if (dpm_state == POWER_STATE_TYPE_BALANCED)
- dpm_state = POWER_STATE_TYPE_PERFORMANCE;
-
-restart_search:
- /* Pick the best power state based on current conditions */
- for (i = 0; i < adev->pm.dpm.num_ps; i++) {
- ps = &adev->pm.dpm.ps[i];
- ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
- switch (dpm_state) {
- /* user states */
- case POWER_STATE_TYPE_BATTERY:
- if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
- if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
- if (single_display)
- return ps;
- } else
- return ps;
- }
- break;
- case POWER_STATE_TYPE_BALANCED:
- if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
- if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
- if (single_display)
- return ps;
- } else
- return ps;
- }
- break;
- case POWER_STATE_TYPE_PERFORMANCE:
- if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
- if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
- if (single_display)
- return ps;
- } else
- return ps;
- }
- break;
- /* internal states */
- case POWER_STATE_TYPE_INTERNAL_UVD:
- if (adev->pm.dpm.uvd_ps)
- return adev->pm.dpm.uvd_ps;
- else
- break;
- case POWER_STATE_TYPE_INTERNAL_UVD_SD:
- if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
- return ps;
- break;
- case POWER_STATE_TYPE_INTERNAL_UVD_HD:
- if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
- return ps;
- break;
- case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
- if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
- return ps;
- break;
- case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
- if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
- return ps;
- break;
- case POWER_STATE_TYPE_INTERNAL_BOOT:
- return adev->pm.dpm.boot_ps;
- case POWER_STATE_TYPE_INTERNAL_THERMAL:
- if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
- return ps;
- break;
- case POWER_STATE_TYPE_INTERNAL_ACPI:
- if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
- return ps;
- break;
- case POWER_STATE_TYPE_INTERNAL_ULV:
- if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
- return ps;
- break;
- case POWER_STATE_TYPE_INTERNAL_3DPERF:
- if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
- return ps;
- break;
- default:
- break;
- }
- }
- /* use a fallback state if we didn't match */
- switch (dpm_state) {
- case POWER_STATE_TYPE_INTERNAL_UVD_SD:
- dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
- goto restart_search;
- case POWER_STATE_TYPE_INTERNAL_UVD_HD:
- case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
- case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
- if (adev->pm.dpm.uvd_ps) {
- return adev->pm.dpm.uvd_ps;
- } else {
- dpm_state = POWER_STATE_TYPE_PERFORMANCE;
- goto restart_search;
- }
- case POWER_STATE_TYPE_INTERNAL_THERMAL:
- dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
- goto restart_search;
- case POWER_STATE_TYPE_INTERNAL_ACPI:
- dpm_state = POWER_STATE_TYPE_BATTERY;
- goto restart_search;
- case POWER_STATE_TYPE_BATTERY:
- case POWER_STATE_TYPE_BALANCED:
- case POWER_STATE_TYPE_INTERNAL_3DPERF:
- dpm_state = POWER_STATE_TYPE_PERFORMANCE;
- goto restart_search;
- default:
- break;
- }
+void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
+{
+ int ret = 0;
- return NULL;
+ ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
+ if (ret)
+ DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
+ enable ? "enable" : "disable", ret);
}
-static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
+int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
{
- struct amdgpu_ps *ps;
- enum amd_pm_state_type dpm_state;
- int ret;
- bool equal = false;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int r = 0;
- /* if dpm init failed */
- if (!adev->pm.dpm_enabled)
- return;
+ if (!pp_funcs || !pp_funcs->load_firmware)
+ return 0;
- if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
- /* add other state override checks here */
- if ((!adev->pm.dpm.thermal_active) &&
- (!adev->pm.dpm.uvd_active))
- adev->pm.dpm.state = adev->pm.dpm.user_state;
+ mutex_lock(&adev->pm.mutex);
+ r = pp_funcs->load_firmware(adev->powerplay.pp_handle);
+ if (r) {
+ pr_err("smu firmware loading failed\n");
+ goto out;
}
- dpm_state = adev->pm.dpm.state;
- ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
- if (ps)
- adev->pm.dpm.requested_ps = ps;
- else
- return;
+ if (smu_version)
+ *smu_version = adev->pm.fw_version;
- if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) {
- printk("switching from power state:\n");
- amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
- printk("switching to power state:\n");
- amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
+out:
+ mutex_unlock(&adev->pm.mutex);
+ return r;
+}
+
+int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable)
+{
+ int ret = 0;
+
+ if (is_support_sw_smu(adev)) {
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle,
+ enable);
+ mutex_unlock(&adev->pm.mutex);
}
- /* update whether vce is active */
- ps->vce_active = adev->pm.dpm.vce_active;
- if (adev->powerplay.pp_funcs->display_configuration_changed)
- amdgpu_dpm_display_configuration_changed(adev);
+ return ret;
+}
- ret = amdgpu_dpm_pre_set_power_state(adev);
- if (ret)
- return;
+int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret = 0;
- if (adev->powerplay.pp_funcs->check_state_equal) {
- if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))
- equal = false;
- }
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_send_hbm_bad_pages_num(smu, size);
+ mutex_unlock(&adev->pm.mutex);
- if (equal)
- return;
+ return ret;
+}
- amdgpu_dpm_set_power_state(adev);
- amdgpu_dpm_post_set_power_state(adev);
-
- adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
- adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
-
- if (adev->powerplay.pp_funcs->force_performance_level) {
- if (adev->pm.dpm.thermal_active) {
- enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
- /* force low perf level for thermal */
- amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW);
- /* save the user's level */
- adev->pm.dpm.forced_level = level;
- } else {
- /* otherwise, user selected level */
- amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
- }
+int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
+ enum pp_clock_type type,
+ uint32_t *min,
+ uint32_t *max)
+{
+ int ret = 0;
+
+ if (type != PP_SCLK)
+ return -EINVAL;
+
+ if (!is_support_sw_smu(adev))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_get_dpm_freq_range(adev->powerplay.pp_handle,
+ SMU_SCLK,
+ min,
+ max);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
+ enum pp_clock_type type,
+ uint32_t min,
+ uint32_t max)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret = 0;
+
+ if (type != PP_SCLK)
+ return -EINVAL;
+
+ if (!is_support_sw_smu(adev))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_set_soft_freq_range(smu,
+ SMU_SCLK,
+ min,
+ max);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret = 0;
+
+ if (!is_support_sw_smu(adev))
+ return 0;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_write_watermarks_table(smu);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev,
+ enum smu_event_type event,
+ uint64_t event_arg)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret = 0;
+
+ if (!is_support_sw_smu(adev))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_wait_for_event(smu, event, event_arg);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret = 0;
+
+ if (!is_support_sw_smu(adev))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_get_status_gfxoff(smu, value);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+
+ if (!is_support_sw_smu(adev))
+ return 0;
+
+ return atomic64_read(&smu->throttle_int_counter);
+}
+
+/* amdgpu_dpm_gfx_state_change - Handle gfx power state change set
+ * @adev: amdgpu_device pointer
+ * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry)
+ *
+ */
+void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
+ enum gfx_change_state state)
+{
+ mutex_lock(&adev->pm.mutex);
+ if (adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->gfx_state_change_set)
+ ((adev)->powerplay.pp_funcs->gfx_state_change_set(
+ (adev)->powerplay.pp_handle, state));
+ mutex_unlock(&adev->pm.mutex);
+}
+
+int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev,
+ void *umc_ecc)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+
+ if (!is_support_sw_smu(adev))
+ return -EOPNOTSUPP;
+
+ return smu_get_ecc_info(smu, umc_ecc);
+}
+
+struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
+ uint32_t idx)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ struct amd_vce_state *vstate = NULL;
+
+ if (!pp_funcs->get_vce_clock_state)
+ return NULL;
+
+ mutex_lock(&adev->pm.mutex);
+ vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle,
+ idx);
+ mutex_unlock(&adev->pm.mutex);
+
+ return vstate;
+}
+
+void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev,
+ enum amd_pm_state_type *state)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+ mutex_lock(&adev->pm.mutex);
+
+ if (!pp_funcs->get_current_power_state) {
+ *state = adev->pm.dpm.user_state;
+ goto out;
}
+
+ *state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle);
+ if (*state < POWER_STATE_TYPE_DEFAULT ||
+ *state > POWER_STATE_TYPE_INTERNAL_3DPERF)
+ *state = adev->pm.dpm.user_state;
+
+out:
+ mutex_unlock(&adev->pm.mutex);
}
-void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
+void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
+ enum amd_pm_state_type state)
{
- int i = 0;
+ mutex_lock(&adev->pm.mutex);
+ adev->pm.dpm.user_state = state;
+ mutex_unlock(&adev->pm.mutex);
- if (!adev->pm.dpm_enabled)
+ if (is_support_sw_smu(adev))
return;
- if (adev->mode_info.num_crtc)
- amdgpu_display_bandwidth_update(adev);
+ if (amdgpu_dpm_dispatch_task(adev,
+ AMD_PP_TASK_ENABLE_USER_STATE,
+ &state) == -EOPNOTSUPP)
+ amdgpu_dpm_compute_clocks(adev);
+}
- for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
- struct amdgpu_ring *ring = adev->rings[i];
- if (ring && ring->sched.ready)
- amdgpu_fence_wait_empty(ring);
- }
+enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ enum amd_dpm_forced_level level;
- if (adev->powerplay.pp_funcs->dispatch_tasks) {
- if (!amdgpu_device_has_dc_support(adev)) {
- mutex_lock(&adev->pm.mutex);
- amdgpu_dpm_get_active_displays(adev);
- adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
- adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
- adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
- /* we have issues with mclk switching with
- * refresh rates over 120 hz on the non-DC code.
- */
- if (adev->pm.pm_display_cfg.vrefresh > 120)
- adev->pm.pm_display_cfg.min_vblank_time = 0;
- if (adev->powerplay.pp_funcs->display_configuration_change)
- adev->powerplay.pp_funcs->display_configuration_change(
- adev->powerplay.pp_handle,
- &adev->pm.pm_display_cfg);
- mutex_unlock(&adev->pm.mutex);
+ mutex_lock(&adev->pm.mutex);
+ if (pp_funcs->get_performance_level)
+ level = pp_funcs->get_performance_level(adev->powerplay.pp_handle);
+ else
+ level = adev->pm.dpm.forced_level;
+ mutex_unlock(&adev->pm.mutex);
+
+ return level;
+}
+
+int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
+ enum amd_dpm_forced_level level)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ enum amd_dpm_forced_level current_level;
+ uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
+ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
+ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
+ AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
+
+ if (!pp_funcs->force_performance_level)
+ return 0;
+
+ if (adev->pm.dpm.thermal_active)
+ return -EINVAL;
+
+ current_level = amdgpu_dpm_get_performance_level(adev);
+ if (current_level == level)
+ return 0;
+
+ if (adev->asic_type == CHIP_RAVEN) {
+ if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
+ if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
+ level == AMD_DPM_FORCED_LEVEL_MANUAL)
+ amdgpu_gfx_off_ctrl(adev, false);
+ else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL &&
+ level != AMD_DPM_FORCED_LEVEL_MANUAL)
+ amdgpu_gfx_off_ctrl(adev, true);
}
- amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
- } else {
- mutex_lock(&adev->pm.mutex);
- amdgpu_dpm_get_active_displays(adev);
- amdgpu_dpm_change_power_state_locked(adev);
+ }
+
+ if (!(current_level & profile_mode_mask) &&
+ (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT))
+ return -EINVAL;
+
+ if (!(current_level & profile_mode_mask) &&
+ (level & profile_mode_mask)) {
+ /* enter UMD Pstate */
+ amdgpu_device_ip_set_powergating_state(adev,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_PG_STATE_UNGATE);
+ amdgpu_device_ip_set_clockgating_state(adev,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_CG_STATE_UNGATE);
+ } else if ((current_level & profile_mode_mask) &&
+ !(level & profile_mode_mask)) {
+ /* exit UMD Pstate */
+ amdgpu_device_ip_set_clockgating_state(adev,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_CG_STATE_GATE);
+ amdgpu_device_ip_set_powergating_state(adev,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_PG_STATE_GATE);
+ }
+
+ mutex_lock(&adev->pm.mutex);
+
+ if (pp_funcs->force_performance_level(adev->powerplay.pp_handle,
+ level)) {
mutex_unlock(&adev->pm.mutex);
+ return -EINVAL;
}
+
+ adev->pm.dpm.forced_level = level;
+
+ mutex_unlock(&adev->pm.mutex);
+
+ return 0;
}
-void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
+int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev,
+ struct pp_states_info *states)
{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
- if (adev->family == AMDGPU_FAMILY_SI) {
- mutex_lock(&adev->pm.mutex);
- if (enable) {
- adev->pm.dpm.uvd_active = true;
- adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
- } else {
- adev->pm.dpm.uvd_active = false;
- }
- mutex_unlock(&adev->pm.mutex);
+ if (!pp_funcs->get_pp_num_states)
+ return -EOPNOTSUPP;
- amdgpu_pm_compute_clocks(adev);
- } else {
- ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
- if (ret)
- DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
- enable ? "enable" : "disable", ret);
-
- /* enable/disable Low Memory PState for UVD (4k videos) */
- if (adev->asic_type == CHIP_STONEY &&
- adev->uvd.decode_image_width >= WIDTH_4K) {
- struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
-
- if (hwmgr && hwmgr->hwmgr_func &&
- hwmgr->hwmgr_func->update_nbdpm_pstate)
- hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr,
- !enable,
- true);
- }
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_pp_num_states(adev->powerplay.pp_handle,
+ states);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev,
+ enum amd_pp_task task_id,
+ enum amd_pm_state_type *user_state)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->dispatch_tasks)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->dispatch_tasks(adev->powerplay.pp_handle,
+ task_id,
+ user_state);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->get_pp_table)
+ return 0;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle,
+ table);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev,
+ uint32_t type,
+ long *input,
+ uint32_t size)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->set_fine_grain_clk_vol)
+ return 0;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle,
+ type,
+ input,
+ size);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev,
+ uint32_t type,
+ long *input,
+ uint32_t size)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->odn_edit_dpm_table)
+ return 0;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle,
+ type,
+ input,
+ size);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev,
+ enum pp_clock_type type,
+ char *buf)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->print_clock_levels)
+ return 0;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->print_clock_levels(adev->powerplay.pp_handle,
+ type,
+ buf);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev,
+ enum pp_clock_type type,
+ char *buf,
+ int *offset)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->emit_clock_levels)
+ return -ENOENT;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->emit_clock_levels(adev->powerplay.pp_handle,
+ type,
+ buf,
+ offset);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev,
+ uint64_t ppfeature_masks)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->set_ppfeature_status)
+ return 0;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle,
+ ppfeature_masks);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->get_ppfeature_status)
+ return 0;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle,
+ buf);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev,
+ enum pp_clock_type type,
+ uint32_t mask)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->force_clock_level)
+ return 0;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->force_clock_level(adev->powerplay.pp_handle,
+ type,
+ mask);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->get_sclk_od)
+ return 0;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+ if (is_support_sw_smu(adev))
+ return 0;
+
+ mutex_lock(&adev->pm.mutex);
+ if (pp_funcs->set_sclk_od)
+ pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value);
+ mutex_unlock(&adev->pm.mutex);
+
+ if (amdgpu_dpm_dispatch_task(adev,
+ AMD_PP_TASK_READJUST_POWER_STATE,
+ NULL) == -EOPNOTSUPP) {
+ adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
+ amdgpu_dpm_compute_clocks(adev);
}
+
+ return 0;
}
-void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
+int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev)
{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
- if (adev->family == AMDGPU_FAMILY_SI) {
- mutex_lock(&adev->pm.mutex);
- if (enable) {
- adev->pm.dpm.vce_active = true;
- /* XXX select vce level based on ring/task */
- adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
- } else {
- adev->pm.dpm.vce_active = false;
- }
- mutex_unlock(&adev->pm.mutex);
+ if (!pp_funcs->get_mclk_od)
+ return 0;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+ if (is_support_sw_smu(adev))
+ return 0;
- amdgpu_pm_compute_clocks(adev);
- } else {
- ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
- if (ret)
- DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
- enable ? "enable" : "disable", ret);
+ mutex_lock(&adev->pm.mutex);
+ if (pp_funcs->set_mclk_od)
+ pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value);
+ mutex_unlock(&adev->pm.mutex);
+
+ if (amdgpu_dpm_dispatch_task(adev,
+ AMD_PP_TASK_READJUST_POWER_STATE,
+ NULL) == -EOPNOTSUPP) {
+ adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
+ amdgpu_dpm_compute_clocks(adev);
}
+
+ return 0;
}
-void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
+int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
+ char *buf)
{
- int i;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
- if (adev->powerplay.pp_funcs->print_power_state == NULL)
- return;
+ if (!pp_funcs->get_power_profile_mode)
+ return -EOPNOTSUPP;
- for (i = 0; i < adev->pm.dpm.num_ps; i++)
- amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle,
+ buf);
+ mutex_unlock(&adev->pm.mutex);
+ return ret;
}
-void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
+int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
+ long *input, uint32_t size)
{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
- ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
- if (ret)
- DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
- enable ? "enable" : "disable", ret);
+ if (!pp_funcs->set_power_profile_mode)
+ return 0;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle,
+ input,
+ size);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
}
-int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
+int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->get_gpu_metrics)
+ return 0;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle,
+ table);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev,
+ uint32_t *fan_mode)
{
- int r;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
- if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) {
- r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
- if (r) {
- pr_err("smu firmware loading failed\n");
- return r;
- }
+ if (!pp_funcs->get_fan_control_mode)
+ return -EOPNOTSUPP;
- if (smu_version)
- *smu_version = adev->pm.fw_version;
- }
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle,
+ fan_mode);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev,
+ uint32_t speed)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->set_fan_speed_pwm)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle,
+ speed);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev,
+ uint32_t *speed)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->get_fan_speed_pwm)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle,
+ speed);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev,
+ uint32_t *speed)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->get_fan_speed_rpm)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle,
+ speed);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev,
+ uint32_t speed)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->set_fan_speed_rpm)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle,
+ speed);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev,
+ uint32_t mode)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->set_fan_control_mode)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle,
+ mode);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev,
+ uint32_t *limit,
+ enum pp_power_limit_level pp_limit_level,
+ enum pp_power_type power_type)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->get_power_limit)
+ return -ENODATA;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_power_limit(adev->powerplay.pp_handle,
+ limit,
+ pp_limit_level,
+ power_type);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
+ uint32_t limit)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->set_power_limit)
+ return -EINVAL;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle,
+ limit);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev)
+{
+ bool cclk_dpm_supported = false;
+
+ if (!is_support_sw_smu(adev))
+ return false;
+
+ mutex_lock(&adev->pm.mutex);
+ cclk_dpm_supported = is_support_cclk_dpm(adev);
+ mutex_unlock(&adev->pm.mutex);
+
+ return (int)cclk_dpm_supported;
+}
+
+int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
+ struct seq_file *m)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+ if (!pp_funcs->debugfs_print_current_performance_level)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle,
+ m);
+ mutex_unlock(&adev->pm.mutex);
return 0;
}
+
+int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
+ void **addr,
+ size_t *size)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->get_smu_prv_buf_details)
+ return -ENOSYS;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle,
+ addr,
+ size);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev)
+{
+ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+ struct smu_context *smu = adev->powerplay.pp_handle;
+
+ if ((is_support_sw_smu(adev) && smu->od_enabled) ||
+ (is_support_sw_smu(adev) && smu->is_apu) ||
+ (!is_support_sw_smu(adev) && hwmgr->od_enabled))
+ return true;
+
+ return false;
+}
+
+int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
+ const char *buf,
+ size_t size)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->set_pp_table)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->set_pp_table(adev->powerplay.pp_handle,
+ buf,
+ size);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+
+ if (!is_support_sw_smu(adev))
+ return INT_MAX;
+
+ return smu->cpu_core_num;
+}
+
+void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev)
+{
+ if (!is_support_sw_smu(adev))
+ return;
+
+ amdgpu_smu_stb_debug_fs_init(adev);
+}
+
+int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev,
+ const struct amd_pp_display_configuration *input)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->display_configuration_change)
+ return 0;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->display_configuration_change(adev->powerplay.pp_handle,
+ input);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev,
+ enum amd_pp_clock_type type,
+ struct amd_pp_clocks *clocks)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->get_clock_by_type)
+ return 0;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_clock_by_type(adev->powerplay.pp_handle,
+ type,
+ clocks);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev,
+ struct amd_pp_simple_clock_info *clocks)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->get_display_mode_validation_clocks)
+ return 0;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle,
+ clocks);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev,
+ enum amd_pp_clock_type type,
+ struct pp_clock_levels_with_latency *clocks)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->get_clock_by_type_with_latency)
+ return 0;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle,
+ type,
+ clocks);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev,
+ enum amd_pp_clock_type type,
+ struct pp_clock_levels_with_voltage *clocks)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->get_clock_by_type_with_voltage)
+ return 0;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle,
+ type,
+ clocks);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev,
+ void *clock_ranges)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->set_watermarks_for_clocks_ranges)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle,
+ clock_ranges);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev,
+ struct pp_display_clock_request *clock)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->display_clock_voltage_request)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle,
+ clock);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev,
+ struct amd_pp_clock_info *clocks)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->get_current_clocks)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_current_clocks(adev->powerplay.pp_handle,
+ clocks);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+ if (!pp_funcs->notify_smu_enable_pwe)
+ return;
+
+ mutex_lock(&adev->pm.mutex);
+ pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle);
+ mutex_unlock(&adev->pm.mutex);
+}
+
+int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev,
+ uint32_t count)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->set_active_display_count)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->set_active_display_count(adev->powerplay.pp_handle,
+ count);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev,
+ uint32_t clock)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->set_min_deep_sleep_dcefclk)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle,
+ clock);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev,
+ uint32_t clock)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+ if (!pp_funcs->set_hard_min_dcefclk_by_freq)
+ return;
+
+ mutex_lock(&adev->pm.mutex);
+ pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle,
+ clock);
+ mutex_unlock(&adev->pm.mutex);
+}
+
+void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev,
+ uint32_t clock)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+ if (!pp_funcs->set_hard_min_fclk_by_freq)
+ return;
+
+ mutex_lock(&adev->pm.mutex);
+ pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle,
+ clock);
+ mutex_unlock(&adev->pm.mutex);
+}
+
+int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev,
+ bool disable_memory_clock_switch)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->display_disable_memory_clock_switch)
+ return 0;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle,
+ disable_memory_clock_switch);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev,
+ struct pp_smu_nv_clock_table *max_clocks)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->get_max_sustainable_clocks_by_dc)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle,
+ max_clocks);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev,
+ unsigned int *clock_values_in_khz,
+ unsigned int *num_states)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->get_uclk_dpm_states)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle,
+ clock_values_in_khz,
+ num_states);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev,
+ struct dpm_clocks *clock_table)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->get_dpm_clock_table)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle,
+ clock_table);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c
new file mode 100644
index 000000000000..42efe838fa85
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_display.h"
+#include "hwmgr.h"
+#include "amdgpu_smu.h"
+#include "amdgpu_dpm_internal.h"
+
+void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev)
+{
+ struct drm_device *ddev = adev_to_drm(adev);
+ struct drm_crtc *crtc;
+ struct amdgpu_crtc *amdgpu_crtc;
+
+ adev->pm.dpm.new_active_crtcs = 0;
+ adev->pm.dpm.new_active_crtc_count = 0;
+ if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
+ list_for_each_entry(crtc,
+ &ddev->mode_config.crtc_list, head) {
+ amdgpu_crtc = to_amdgpu_crtc(crtc);
+ if (amdgpu_crtc->enabled) {
+ adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
+ adev->pm.dpm.new_active_crtc_count++;
+ }
+ }
+ }
+}
+
+u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
+{
+ struct drm_device *dev = adev_to_drm(adev);
+ struct drm_crtc *crtc;
+ struct amdgpu_crtc *amdgpu_crtc;
+ u32 vblank_in_pixels;
+ u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
+
+ if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ amdgpu_crtc = to_amdgpu_crtc(crtc);
+ if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
+ vblank_in_pixels =
+ amdgpu_crtc->hw_mode.crtc_htotal *
+ (amdgpu_crtc->hw_mode.crtc_vblank_end -
+ amdgpu_crtc->hw_mode.crtc_vdisplay +
+ (amdgpu_crtc->v_border * 2));
+
+ vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
+ break;
+ }
+ }
+ }
+
+ return vblank_time_us;
+}
+
+u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
+{
+ struct drm_device *dev = adev_to_drm(adev);
+ struct drm_crtc *crtc;
+ struct amdgpu_crtc *amdgpu_crtc;
+ u32 vrefresh = 0;
+
+ if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ amdgpu_crtc = to_amdgpu_crtc(crtc);
+ if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
+ vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
+ break;
+ }
+ }
+ }
+
+ return vrefresh;
+}
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index e2cae97f4ff1..541c9f237e9c 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -34,7 +34,6 @@
#include <linux/nospec.h>
#include <linux/pm_runtime.h>
#include <asm/processor.h>
-#include "hwmgr.h"
static const struct cg_flag_name clocks[] = {
{AMD_CG_SUPPORT_GFX_FGCG, "Graphics Fine Grain Clock Gating"},
@@ -132,7 +131,6 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
enum amd_pm_state_type pm;
int ret;
@@ -147,11 +145,7 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
return ret;
}
- if (pp_funcs->get_current_power_state) {
- pm = amdgpu_dpm_get_current_power_state(adev);
- } else {
- pm = adev->pm.dpm.user_state;
- }
+ amdgpu_dpm_get_current_power_state(adev, &pm);
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
@@ -191,19 +185,8 @@ static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
return ret;
}
- if (is_support_sw_smu(adev)) {
- mutex_lock(&adev->pm.mutex);
- adev->pm.dpm.user_state = state;
- mutex_unlock(&adev->pm.mutex);
- } else if (adev->powerplay.pp_funcs->dispatch_tasks) {
- amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state);
- } else {
- mutex_lock(&adev->pm.mutex);
- adev->pm.dpm.user_state = state;
- mutex_unlock(&adev->pm.mutex);
+ amdgpu_dpm_set_power_state(adev, state);
- amdgpu_pm_compute_clocks(adev);
- }
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
@@ -290,10 +273,7 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
return ret;
}
- if (adev->powerplay.pp_funcs->get_performance_level)
- level = amdgpu_dpm_get_performance_level(adev);
- else
- level = adev->pm.dpm.forced_level;
+ level = amdgpu_dpm_get_performance_level(adev);
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
@@ -318,9 +298,7 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
enum amd_dpm_forced_level level;
- enum amd_dpm_forced_level current_level;
int ret = 0;
if (amdgpu_in_reset(adev))
@@ -358,57 +336,17 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
return ret;
}
- if (pp_funcs->get_performance_level)
- current_level = amdgpu_dpm_get_performance_level(adev);
- else
- current_level = adev->pm.dpm.forced_level;
-
- if (current_level == level) {
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return count;
- }
-
- if (adev->asic_type == CHIP_RAVEN) {
- if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
- if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL && level == AMD_DPM_FORCED_LEVEL_MANUAL)
- amdgpu_gfx_off_ctrl(adev, false);
- else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL && level != AMD_DPM_FORCED_LEVEL_MANUAL)
- amdgpu_gfx_off_ctrl(adev, true);
- }
- }
-
- /* profile_exit setting is valid only when current mode is in profile mode */
- if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
- AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
- AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
- AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) &&
- (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) {
- pr_err("Currently not in any profile mode!\n");
+ mutex_lock(&adev->pm.stable_pstate_ctx_lock);
+ if (amdgpu_dpm_force_performance_level(adev, level)) {
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
+ mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
return -EINVAL;
}
+ /* override whatever a user ctx may have set */
+ adev->pm.stable_pstate_ctx = NULL;
+ mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
- if (pp_funcs->force_performance_level) {
- mutex_lock(&adev->pm.mutex);
- if (adev->pm.dpm.thermal_active) {
- mutex_unlock(&adev->pm.mutex);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return -EINVAL;
- }
- ret = amdgpu_dpm_force_performance_level(adev, level);
- if (ret) {
- mutex_unlock(&adev->pm.mutex);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return -EINVAL;
- } else {
- adev->pm.dpm.forced_level = level;
- }
- mutex_unlock(&adev->pm.mutex);
- }
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
@@ -421,7 +359,6 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
struct pp_states_info data;
uint32_t i;
int buf_len, ret;
@@ -437,11 +374,8 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
return ret;
}
- if (pp_funcs->get_pp_num_states) {
- amdgpu_dpm_get_pp_num_states(adev, &data);
- } else {
+ if (amdgpu_dpm_get_pp_num_states(adev, &data))
memset(&data, 0, sizeof(data));
- }
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
@@ -463,7 +397,6 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
struct pp_states_info data = {0};
enum amd_pm_state_type pm = 0;
int i = 0, ret = 0;
@@ -479,15 +412,16 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
return ret;
}
- if (pp_funcs->get_current_power_state
- && pp_funcs->get_pp_num_states) {
- pm = amdgpu_dpm_get_current_power_state(adev);
- amdgpu_dpm_get_pp_num_states(adev, &data);
- }
+ amdgpu_dpm_get_current_power_state(adev, &pm);
+
+ ret = amdgpu_dpm_get_pp_num_states(adev, &data);
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
+ if (ret)
+ return ret;
+
for (i = 0; i < data.nums; i++) {
if (pm == data.states[i])
break;
@@ -511,7 +445,7 @@ static ssize_t amdgpu_get_pp_force_state(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- if (adev->pp_force_state_enabled)
+ if (adev->pm.pp_force_state_enabled)
return amdgpu_get_pp_cur_state(dev, attr, buf);
else
return sysfs_emit(buf, "\n");
@@ -525,6 +459,7 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
enum amd_pm_state_type state = 0;
+ struct pp_states_info data;
unsigned long idx;
int ret;
@@ -533,41 +468,49 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- if (strlen(buf) == 1)
- adev->pp_force_state_enabled = false;
- else if (is_support_sw_smu(adev))
- adev->pp_force_state_enabled = false;
- else if (adev->powerplay.pp_funcs->dispatch_tasks &&
- adev->powerplay.pp_funcs->get_pp_num_states) {
- struct pp_states_info data;
-
- ret = kstrtoul(buf, 0, &idx);
- if (ret || idx >= ARRAY_SIZE(data.states))
- return -EINVAL;
+ adev->pm.pp_force_state_enabled = false;
- idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
+ if (strlen(buf) == 1)
+ return count;
- amdgpu_dpm_get_pp_num_states(adev, &data);
- state = data.states[idx];
+ ret = kstrtoul(buf, 0, &idx);
+ if (ret || idx >= ARRAY_SIZE(data.states))
+ return -EINVAL;
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
+ idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
- /* only set user selected power states */
- if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
- state != POWER_STATE_TYPE_DEFAULT) {
- amdgpu_dpm_dispatch_task(adev,
- AMD_PP_TASK_ENABLE_USER_STATE, &state);
- adev->pp_force_state_enabled = true;
- }
- pm_runtime_mark_last_busy(ddev->dev);
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
+ return ret;
+ }
+
+ ret = amdgpu_dpm_get_pp_num_states(adev, &data);
+ if (ret)
+ goto err_out;
+
+ state = data.states[idx];
+
+ /* only set user selected power states */
+ if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
+ state != POWER_STATE_TYPE_DEFAULT) {
+ ret = amdgpu_dpm_dispatch_task(adev,
+ AMD_PP_TASK_ENABLE_USER_STATE, &state);
+ if (ret)
+ goto err_out;
+
+ adev->pm.pp_force_state_enabled = true;
}
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
return count;
+
+err_out:
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+ return ret;
}
/**
@@ -601,17 +544,13 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
return ret;
}
- if (adev->powerplay.pp_funcs->get_pp_table) {
- size = amdgpu_dpm_get_pp_table(adev, &table);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- if (size < 0)
- return size;
- } else {
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return 0;
- }
+ size = amdgpu_dpm_get_pp_table(adev, &table);
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ if (size <= 0)
+ return size;
if (size >= PAGE_SIZE)
size = PAGE_SIZE - 1;
@@ -642,15 +581,13 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
}
ret = amdgpu_dpm_set_pp_table(adev, buf, count);
- if (ret) {
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
+ if (ret)
+ return ret;
+
return count;
}
@@ -866,46 +803,32 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
return ret;
}
- if (adev->powerplay.pp_funcs->set_fine_grain_clk_vol) {
- ret = amdgpu_dpm_set_fine_grain_clk_vol(adev, type,
- parameter,
- parameter_size);
- if (ret) {
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return -EINVAL;
- }
- }
+ if (amdgpu_dpm_set_fine_grain_clk_vol(adev,
+ type,
+ parameter,
+ parameter_size))
+ goto err_out;
- if (adev->powerplay.pp_funcs->odn_edit_dpm_table) {
- ret = amdgpu_dpm_odn_edit_dpm_table(adev, type,
- parameter, parameter_size);
- if (ret) {
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return -EINVAL;
- }
- }
+ if (amdgpu_dpm_odn_edit_dpm_table(adev, type,
+ parameter, parameter_size))
+ goto err_out;
if (type == PP_OD_COMMIT_DPM_TABLE) {
- if (adev->powerplay.pp_funcs->dispatch_tasks) {
- amdgpu_dpm_dispatch_task(adev,
- AMD_PP_TASK_READJUST_POWER_STATE,
- NULL);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return count;
- } else {
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return -EINVAL;
- }
+ if (amdgpu_dpm_dispatch_task(adev,
+ AMD_PP_TASK_READJUST_POWER_STATE,
+ NULL))
+ goto err_out;
}
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return count;
+
+err_out:
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+ return -EINVAL;
}
static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
@@ -914,8 +837,17 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- ssize_t size;
+ int size = 0;
int ret;
+ enum pp_clock_type od_clocks[6] = {
+ OD_SCLK,
+ OD_MCLK,
+ OD_VDDC_CURVE,
+ OD_RANGE,
+ OD_VDDGFX_OFFSET,
+ OD_CCLK,
+ };
+ uint clk_index;
if (amdgpu_in_reset(adev))
return -EPERM;
@@ -928,16 +860,25 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
return ret;
}
- if (adev->powerplay.pp_funcs->print_clock_levels) {
+ for (clk_index = 0 ; clk_index < 6 ; clk_index++) {
+ ret = amdgpu_dpm_emit_clock_levels(adev, od_clocks[clk_index], buf, &size);
+ if (ret)
+ break;
+ }
+ if (ret == -ENOENT) {
size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
- size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size);
- size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size);
- size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf+size);
- size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size);
- size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK, buf+size);
- } else {
- size = sysfs_emit(buf, "\n");
+ if (size > 0) {
+ size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf + size);
+ size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf + size);
+ size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf + size);
+ size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf + size);
+ size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK, buf + size);
+ }
}
+
+ if (size == 0)
+ size = sysfs_emit(buf, "\n");
+
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
@@ -985,17 +926,14 @@ static ssize_t amdgpu_set_pp_features(struct device *dev,
return ret;
}
- if (adev->powerplay.pp_funcs->set_ppfeature_status) {
- ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
- if (ret) {
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return -EINVAL;
- }
- }
+ ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
+
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
+ if (ret)
+ return -EINVAL;
+
return count;
}
@@ -1019,9 +957,8 @@ static ssize_t amdgpu_get_pp_features(struct device *dev,
return ret;
}
- if (adev->powerplay.pp_funcs->get_ppfeature_status)
- size = amdgpu_dpm_get_ppfeature_status(adev, buf);
- else
+ size = amdgpu_dpm_get_ppfeature_status(adev, buf);
+ if (size <= 0)
size = sysfs_emit(buf, "\n");
pm_runtime_mark_last_busy(ddev->dev);
@@ -1066,8 +1003,8 @@ static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- ssize_t size;
- int ret;
+ int size = 0;
+ int ret = 0;
if (amdgpu_in_reset(adev))
return -EPERM;
@@ -1080,9 +1017,11 @@ static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
return ret;
}
- if (adev->powerplay.pp_funcs->print_clock_levels)
+ ret = amdgpu_dpm_emit_clock_levels(adev, type, buf, &size);
+ if (ret == -ENOENT)
size = amdgpu_dpm_print_clock_levels(adev, type, buf);
- else
+
+ if (size == 0)
size = sysfs_emit(buf, "\n");
pm_runtime_mark_last_busy(ddev->dev);
@@ -1151,10 +1090,7 @@ static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev,
return ret;
}
- if (adev->powerplay.pp_funcs->force_clock_level)
- ret = amdgpu_dpm_force_clock_level(adev, type, mask);
- else
- ret = 0;
+ ret = amdgpu_dpm_force_clock_level(adev, type, mask);
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
@@ -1305,10 +1241,7 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
return ret;
}
- if (is_support_sw_smu(adev))
- value = 0;
- else if (adev->powerplay.pp_funcs->get_sclk_od)
- value = amdgpu_dpm_get_sclk_od(adev);
+ value = amdgpu_dpm_get_sclk_od(adev);
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
@@ -1342,19 +1275,7 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
return ret;
}
- if (is_support_sw_smu(adev)) {
- value = 0;
- } else {
- if (adev->powerplay.pp_funcs->set_sclk_od)
- amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
-
- if (adev->powerplay.pp_funcs->dispatch_tasks) {
- amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
- } else {
- adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
- amdgpu_pm_compute_clocks(adev);
- }
- }
+ amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
@@ -1382,10 +1303,7 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
return ret;
}
- if (is_support_sw_smu(adev))
- value = 0;
- else if (adev->powerplay.pp_funcs->get_mclk_od)
- value = amdgpu_dpm_get_mclk_od(adev);
+ value = amdgpu_dpm_get_mclk_od(adev);
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
@@ -1419,19 +1337,7 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
return ret;
}
- if (is_support_sw_smu(adev)) {
- value = 0;
- } else {
- if (adev->powerplay.pp_funcs->set_mclk_od)
- amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
-
- if (adev->powerplay.pp_funcs->dispatch_tasks) {
- amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
- } else {
- adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
- amdgpu_pm_compute_clocks(adev);
- }
- }
+ amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
@@ -1479,9 +1385,8 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
return ret;
}
- if (adev->powerplay.pp_funcs->get_power_profile_mode)
- size = amdgpu_dpm_get_power_profile_mode(adev, buf);
- else
+ size = amdgpu_dpm_get_power_profile_mode(adev, buf);
+ if (size <= 0)
size = sysfs_emit(buf, "\n");
pm_runtime_mark_last_busy(ddev->dev);
@@ -1545,8 +1450,7 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
return ret;
}
- if (adev->powerplay.pp_funcs->set_power_profile_mode)
- ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
+ ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
@@ -1812,9 +1716,7 @@ static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
return ret;
}
- if (adev->powerplay.pp_funcs->get_gpu_metrics)
- size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics);
-
+ size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics);
if (size <= 0)
goto out;
@@ -2027,8 +1929,8 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = {
AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
- AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC),
AMDGPU_DEVICE_ATTR_RW(pp_mclk_od, ATTR_FLAG_BASIC),
AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
@@ -2053,7 +1955,6 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
{
struct device_attribute *dev_attr = &attr->dev_attr;
const char *attr_name = dev_attr->attr.name;
- struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
enum amd_asic_type asic_type = adev->asic_type;
if (!(attr->flags & mask)) {
@@ -2076,9 +1977,7 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_od_clk_voltage)) {
*states = ATTR_STATE_UNSUPPORTED;
- if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
- (is_support_sw_smu(adev) && adev->smu.is_apu) ||
- (!is_support_sw_smu(adev) && hwmgr->od_enabled))
+ if (amdgpu_dpm_is_overdrive_supported(adev))
*states = ATTR_STATE_SUPPORTED;
} else if (DEVICE_ATTR_IS(mem_busy_percent)) {
if (adev->flags & AMD_IS_APU || asic_type == CHIP_VEGA10)
@@ -2106,8 +2005,7 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
if (!(asic_type == CHIP_VANGOGH || asic_type == CHIP_SIENNA_CICHLID))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_power_profile_mode)) {
- if (!adev->powerplay.pp_funcs->get_power_profile_mode ||
- amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP)
+ if (amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP)
*states = ATTR_STATE_UNSUPPORTED;
}
@@ -2134,8 +2032,8 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
}
}
- /* setting should not be allowed from VF */
- if (amdgpu_sriov_vf(adev)) {
+ /* setting should not be allowed from VF if not in one VF mode */
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) {
dev_attr->attr.mode &= ~S_IWUGO;
dev_attr->store = NULL;
}
@@ -2396,17 +2294,14 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
return ret;
}
- if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return -EINVAL;
- }
-
- pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+ ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ if (ret)
+ return -EINVAL;
+
return sysfs_emit(buf, "%u\n", pwm_mode);
}
@@ -2434,17 +2329,14 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
return ret;
}
- if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return -EINVAL;
- }
-
- amdgpu_dpm_set_fan_control_mode(adev, value);
+ ret = amdgpu_dpm_set_fan_control_mode(adev, value);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ if (ret)
+ return -EINVAL;
+
return count;
}
@@ -2476,32 +2368,29 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
+ err = kstrtou32(buf, 10, &value);
+ if (err)
+ return err;
+
err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (err < 0) {
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return err;
}
- pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+ err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
+ if (err)
+ goto out;
+
if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
pr_info("manual fan speed control should be enabled first\n");
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return -EINVAL;
- }
-
- err = kstrtou32(buf, 10, &value);
- if (err) {
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return err;
+ err = -EINVAL;
+ goto out;
}
- if (adev->powerplay.pp_funcs->set_fan_speed_pwm)
- err = amdgpu_dpm_set_fan_speed_pwm(adev, value);
- else
- err = -EINVAL;
+ err = amdgpu_dpm_set_fan_speed_pwm(adev, value);
+out:
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
@@ -2530,10 +2419,7 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
return err;
}
- if (adev->powerplay.pp_funcs->get_fan_speed_pwm)
- err = amdgpu_dpm_get_fan_speed_pwm(adev, &speed);
- else
- err = -EINVAL;
+ err = amdgpu_dpm_get_fan_speed_pwm(adev, &speed);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
@@ -2563,10 +2449,7 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
return err;
}
- if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
- err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
- else
- err = -EINVAL;
+ err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
@@ -2660,10 +2543,7 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
return err;
}
- if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
- err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
- else
- err = -EINVAL;
+ err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
@@ -2688,32 +2568,28 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
+ err = kstrtou32(buf, 10, &value);
+ if (err)
+ return err;
+
err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (err < 0) {
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return err;
}
- pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+ err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
+ if (err)
+ goto out;
if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return -ENODATA;
- }
-
- err = kstrtou32(buf, 10, &value);
- if (err) {
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return err;
+ err = -ENODATA;
+ goto out;
}
- if (adev->powerplay.pp_funcs->set_fan_speed_rpm)
- err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
- else
- err = -EINVAL;
+ err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
+out:
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
@@ -2742,17 +2618,14 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
return ret;
}
- if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return -EINVAL;
- }
-
- pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+ ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ if (ret)
+ return -EINVAL;
+
return sysfs_emit(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
}
@@ -2788,16 +2661,14 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
return err;
}
- if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return -EINVAL;
- }
- amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
+ err = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ if (err)
+ return -EINVAL;
+
return count;
}
@@ -2933,7 +2804,6 @@ static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev,
enum pp_power_limit_level pp_limit_level)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
enum pp_power_type power_type = to_sensor_dev_attr(attr)->index;
uint32_t limit;
ssize_t size;
@@ -2944,16 +2814,13 @@ static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- if ( !(pp_funcs && pp_funcs->get_power_limit))
- return -ENODATA;
-
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
- r = pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit,
+ r = amdgpu_dpm_get_power_limit(adev, &limit,
pp_limit_level, power_type);
if (!r)
@@ -2996,10 +2863,14 @@ static ssize_t amdgpu_hwmon_show_power_label(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- int limit_type = to_sensor_dev_attr(attr)->index;
+ struct amdgpu_device *adev = dev_get_drvdata(dev);
- return sysfs_emit(buf, "%s\n",
- limit_type == SMU_FAST_PPT_LIMIT ? "fastPPT" : "slowPPT");
+ if (adev->asic_type == CHIP_VANGOGH)
+ return sysfs_emit(buf, "%s\n",
+ to_sensor_dev_attr(attr)->index == PP_PWR_TYPE_FAST ?
+ "fastPPT" : "slowPPT");
+ else
+ return sysfs_emit(buf, "PPT\n");
}
static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
@@ -3008,7 +2879,6 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
size_t count)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int limit_type = to_sensor_dev_attr(attr)->index;
int err;
u32 value;
@@ -3034,10 +2904,7 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
return err;
}
- if (pp_funcs && pp_funcs->set_power_limit)
- err = pp_funcs->set_power_limit(adev->powerplay.pp_handle, value);
- else
- err = -EINVAL;
+ err = amdgpu_dpm_set_power_limit(adev, value);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
@@ -3315,19 +3182,6 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
- /* there is no fan under pp one vf mode */
- if (amdgpu_sriov_is_pp_one_vf(adev) &&
- (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
- return 0;
-
/* Skip fan attributes if fan is not present */
if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
@@ -3374,20 +3228,18 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
return 0;
- if (!is_support_sw_smu(adev)) {
- /* mask fan attributes if we have no bindings for this asic to expose */
- if ((!adev->powerplay.pp_funcs->get_fan_speed_pwm &&
- attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
- (!adev->powerplay.pp_funcs->get_fan_control_mode &&
- attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
- effective_mode &= ~S_IRUGO;
+ /* mask fan attributes if we have no bindings for this asic to expose */
+ if (((amdgpu_dpm_get_fan_speed_pwm(adev, NULL) == -EOPNOTSUPP) &&
+ attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
+ ((amdgpu_dpm_get_fan_control_mode(adev, NULL) == -EOPNOTSUPP) &&
+ attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
+ effective_mode &= ~S_IRUGO;
- if ((!adev->powerplay.pp_funcs->set_fan_speed_pwm &&
- attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
- (!adev->powerplay.pp_funcs->set_fan_control_mode &&
- attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
- effective_mode &= ~S_IWUSR;
- }
+ if (((amdgpu_dpm_set_fan_speed_pwm(adev, U32_MAX) == -EOPNOTSUPP) &&
+ attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
+ ((amdgpu_dpm_set_fan_control_mode(adev, U32_MAX) == -EOPNOTSUPP) &&
+ attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
+ effective_mode &= ~S_IWUSR;
if (((adev->family == AMDGPU_FAMILY_SI) ||
((adev->flags & AMD_IS_APU) &&
@@ -3404,22 +3256,20 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
(attr == &sensor_dev_attr_power1_average.dev_attr.attr))
return 0;
- if (!is_support_sw_smu(adev)) {
- /* hide max/min values if we can't both query and manage the fan */
- if ((!adev->powerplay.pp_funcs->set_fan_speed_pwm &&
- !adev->powerplay.pp_funcs->get_fan_speed_pwm) &&
- (!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
- !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
- (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
- return 0;
+ /* hide max/min values if we can't both query and manage the fan */
+ if (((amdgpu_dpm_set_fan_speed_pwm(adev, U32_MAX) == -EOPNOTSUPP) &&
+ (amdgpu_dpm_get_fan_speed_pwm(adev, NULL) == -EOPNOTSUPP) &&
+ (amdgpu_dpm_set_fan_speed_rpm(adev, U32_MAX) == -EOPNOTSUPP) &&
+ (amdgpu_dpm_get_fan_speed_rpm(adev, NULL) == -EOPNOTSUPP)) &&
+ (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
+ return 0;
- if ((!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
- !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
- (attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
- return 0;
- }
+ if ((amdgpu_dpm_set_fan_speed_rpm(adev, U32_MAX) == -EOPNOTSUPP) &&
+ (amdgpu_dpm_get_fan_speed_rpm(adev, NULL) == -EOPNOTSUPP) &&
+ (attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
+ return 0;
if ((adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */
adev->family == AMDGPU_FAMILY_KV) && /* not implemented yet */
@@ -3462,8 +3312,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
attr == &sensor_dev_attr_power2_cap_min.dev_attr.attr ||
attr == &sensor_dev_attr_power2_cap.dev_attr.attr ||
attr == &sensor_dev_attr_power2_cap_default.dev_attr.attr ||
- attr == &sensor_dev_attr_power2_label.dev_attr.attr ||
- attr == &sensor_dev_attr_power1_label.dev_attr.attr))
+ attr == &sensor_dev_attr_power2_label.dev_attr.attr))
return 0;
return effective_mode;
@@ -3549,14 +3398,15 @@ static void amdgpu_debugfs_prints_cpu_info(struct seq_file *m,
uint16_t *p_val;
uint32_t size;
int i;
+ uint32_t num_cpu_cores = amdgpu_dpm_get_num_cpu_cores(adev);
- if (is_support_cclk_dpm(adev)) {
- p_val = kcalloc(adev->smu.cpu_core_num, sizeof(uint16_t),
+ if (amdgpu_dpm_is_cclk_dpm_supported(adev)) {
+ p_val = kcalloc(num_cpu_cores, sizeof(uint16_t),
GFP_KERNEL);
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_CPU_CLK,
(void *)p_val, &size)) {
- for (i = 0; i < adev->smu.cpu_core_num; i++)
+ for (i = 0; i < num_cpu_cores; i++)
seq_printf(m, "\t%u MHz (CPU%d)\n",
*(p_val + i), i);
}
@@ -3684,27 +3534,11 @@ static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused)
return r;
}
- if (!adev->pm.dpm_enabled) {
- seq_printf(m, "dpm not enabled\n");
- pm_runtime_mark_last_busy(dev->dev);
- pm_runtime_put_autosuspend(dev->dev);
- return 0;
- }
-
- if (!is_support_sw_smu(adev) &&
- adev->powerplay.pp_funcs->debugfs_print_current_performance_level) {
- mutex_lock(&adev->pm.mutex);
- if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level)
- adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m);
- else
- seq_printf(m, "Debugfs support not implemented for this asic\n");
- mutex_unlock(&adev->pm.mutex);
- r = 0;
- } else {
+ if (amdgpu_dpm_debugfs_print_current_performance_level(adev, m)) {
r = amdgpu_debugfs_pm_info_pp(m, adev);
+ if (r)
+ goto out;
}
- if (r)
- goto out;
amdgpu_device_ip_get_clockgating_state(adev, &flags);
@@ -3730,21 +3564,18 @@ static ssize_t amdgpu_pm_prv_buffer_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
struct amdgpu_device *adev = file_inode(f)->i_private;
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
- void *pp_handle = adev->powerplay.pp_handle;
size_t smu_prv_buf_size;
void *smu_prv_buf;
+ int ret = 0;
if (amdgpu_in_reset(adev))
return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- if (pp_funcs && pp_funcs->get_smu_prv_buf_details)
- pp_funcs->get_smu_prv_buf_details(pp_handle, &smu_prv_buf,
- &smu_prv_buf_size);
- else
- return -ENOSYS;
+ ret = amdgpu_dpm_get_smu_prv_buf_details(adev, &smu_prv_buf, &smu_prv_buf_size);
+ if (ret)
+ return ret;
if (!smu_prv_buf || !smu_prv_buf_size)
return -EINVAL;
@@ -3768,6 +3599,9 @@ void amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
struct drm_minor *minor = adev_to_drm(adev)->primary;
struct dentry *root = minor->debugfs_root;
+ if (!adev->pm.dpm_enabled)
+ return;
+
debugfs_create_file("amdgpu_pm_info", 0444, root, adev,
&amdgpu_debugfs_pm_info_fops);
@@ -3777,6 +3611,6 @@ void amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
&amdgpu_debugfs_pm_prv_buffer_fops,
adev->pm.smu_prv_buffer_size);
- amdgpu_smu_stb_debug_fs_init(adev);
+ amdgpu_dpm_stb_debug_fs_init(adev);
#endif
}
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
index c464a045000d..ddfa55b59d02 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
@@ -23,6 +23,12 @@
#ifndef __AMDGPU_DPM_H__
#define __AMDGPU_DPM_H__
+/* Argument for PPSMC_MSG_GpuChangeState */
+enum gfx_change_state {
+ sGpuChangeState_D0Entry = 1,
+ sGpuChangeState_D3Entry,
+};
+
enum amdgpu_int_thermal_type {
THERMAL_TYPE_NONE,
THERMAL_TYPE_EXTERNAL,
@@ -39,19 +45,6 @@ enum amdgpu_int_thermal_type {
THERMAL_TYPE_KV,
};
-enum amdgpu_dpm_auto_throttle_src {
- AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL,
- AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL
-};
-
-enum amdgpu_dpm_event_src {
- AMDGPU_DPM_EVENT_SRC_ANALOG = 0,
- AMDGPU_DPM_EVENT_SRC_EXTERNAL = 1,
- AMDGPU_DPM_EVENT_SRC_DIGITAL = 2,
- AMDGPU_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
- AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
-};
-
struct amdgpu_ps {
u32 caps; /* vbios flags */
u32 class; /* vbios flags */
@@ -95,19 +88,6 @@ struct amdgpu_dpm_thermal {
struct amdgpu_irq_src irq;
};
-enum amdgpu_clk_action
-{
- AMDGPU_SCLK_UP = 1,
- AMDGPU_SCLK_DOWN
-};
-
-struct amdgpu_blacklist_clocks
-{
- u32 sclk;
- u32 mclk;
- enum amdgpu_clk_action action;
-};
-
struct amdgpu_clock_and_voltage_limits {
u32 sclk;
u32 mclk;
@@ -246,128 +226,6 @@ struct amdgpu_dpm_fan {
bool ucode_fan_control;
};
-enum amdgpu_pcie_gen {
- AMDGPU_PCIE_GEN1 = 0,
- AMDGPU_PCIE_GEN2 = 1,
- AMDGPU_PCIE_GEN3 = 2,
- AMDGPU_PCIE_GEN_INVALID = 0xffff
-};
-
-#define amdgpu_dpm_pre_set_power_state(adev) \
- ((adev)->powerplay.pp_funcs->pre_set_power_state((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_set_power_state(adev) \
- ((adev)->powerplay.pp_funcs->set_power_state((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_post_set_power_state(adev) \
- ((adev)->powerplay.pp_funcs->post_set_power_state((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_display_configuration_changed(adev) \
- ((adev)->powerplay.pp_funcs->display_configuration_changed((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_print_power_state(adev, ps) \
- ((adev)->powerplay.pp_funcs->print_power_state((adev)->powerplay.pp_handle, (ps)))
-
-#define amdgpu_dpm_vblank_too_short(adev) \
- ((adev)->powerplay.pp_funcs->vblank_too_short((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_enable_bapm(adev, e) \
- ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
-
-#define amdgpu_dpm_set_fan_control_mode(adev, m) \
- ((adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)))
-
-#define amdgpu_dpm_get_fan_control_mode(adev) \
- ((adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_set_fan_speed_pwm(adev, s) \
- ((adev)->powerplay.pp_funcs->set_fan_speed_pwm((adev)->powerplay.pp_handle, (s)))
-
-#define amdgpu_dpm_get_fan_speed_pwm(adev, s) \
- ((adev)->powerplay.pp_funcs->get_fan_speed_pwm((adev)->powerplay.pp_handle, (s)))
-
-#define amdgpu_dpm_get_fan_speed_rpm(adev, s) \
- ((adev)->powerplay.pp_funcs->get_fan_speed_rpm)((adev)->powerplay.pp_handle, (s))
-
-#define amdgpu_dpm_set_fan_speed_rpm(adev, s) \
- ((adev)->powerplay.pp_funcs->set_fan_speed_rpm)((adev)->powerplay.pp_handle, (s))
-
-#define amdgpu_dpm_force_performance_level(adev, l) \
- ((adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)))
-
-#define amdgpu_dpm_get_current_power_state(adev) \
- ((adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_get_pp_num_states(adev, data) \
- ((adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data))
-
-#define amdgpu_dpm_get_pp_table(adev, table) \
- ((adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table))
-
-#define amdgpu_dpm_set_pp_table(adev, buf, size) \
- ((adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size))
-
-#define amdgpu_dpm_print_clock_levels(adev, type, buf) \
- ((adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf))
-
-#define amdgpu_dpm_force_clock_level(adev, type, level) \
- ((adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level))
-
-#define amdgpu_dpm_get_sclk_od(adev) \
- ((adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_set_sclk_od(adev, value) \
- ((adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value))
-
-#define amdgpu_dpm_get_mclk_od(adev) \
- ((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_set_mclk_od(adev, value) \
- ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value))
-
-#define amdgpu_dpm_dispatch_task(adev, task_id, user_state) \
- ((adev)->powerplay.pp_funcs->dispatch_tasks)((adev)->powerplay.pp_handle, (task_id), (user_state))
-
-#define amdgpu_dpm_check_state_equal(adev, cps, rps, equal) \
- ((adev)->powerplay.pp_funcs->check_state_equal((adev)->powerplay.pp_handle, (cps), (rps), (equal)))
-
-#define amdgpu_dpm_get_vce_clock_state(adev, i) \
- ((adev)->powerplay.pp_funcs->get_vce_clock_state((adev)->powerplay.pp_handle, (i)))
-
-#define amdgpu_dpm_get_performance_level(adev) \
- ((adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_reset_power_profile_state(adev, request) \
- ((adev)->powerplay.pp_funcs->reset_power_profile_state(\
- (adev)->powerplay.pp_handle, request))
-
-#define amdgpu_dpm_get_power_profile_mode(adev, buf) \
- ((adev)->powerplay.pp_funcs->get_power_profile_mode(\
- (adev)->powerplay.pp_handle, buf))
-
-#define amdgpu_dpm_set_power_profile_mode(adev, parameter, size) \
- ((adev)->powerplay.pp_funcs->set_power_profile_mode(\
- (adev)->powerplay.pp_handle, parameter, size))
-
-#define amdgpu_dpm_set_fine_grain_clk_vol(adev, type, parameter, size) \
- ((adev)->powerplay.pp_funcs->set_fine_grain_clk_vol(\
- (adev)->powerplay.pp_handle, type, parameter, size))
-
-#define amdgpu_dpm_odn_edit_dpm_table(adev, type, parameter, size) \
- ((adev)->powerplay.pp_funcs->odn_edit_dpm_table(\
- (adev)->powerplay.pp_handle, type, parameter, size))
-
-#define amdgpu_dpm_get_ppfeature_status(adev, buf) \
- ((adev)->powerplay.pp_funcs->get_ppfeature_status(\
- (adev)->powerplay.pp_handle, (buf)))
-
-#define amdgpu_dpm_set_ppfeature_status(adev, ppfeatures) \
- ((adev)->powerplay.pp_funcs->set_ppfeature_status(\
- (adev)->powerplay.pp_handle, (ppfeatures)))
-
-#define amdgpu_dpm_get_gpu_metrics(adev, table) \
- ((adev)->powerplay.pp_funcs->get_gpu_metrics((adev)->powerplay.pp_handle, table))
-
struct amdgpu_dpm {
struct amdgpu_ps *ps;
/* number of valid power states */
@@ -426,6 +284,27 @@ enum ip_power_state {
/* Used to mask smu debug modes */
#define SMU_DEBUG_HALT_ON_ERROR 0x1
+#define MAX_SMU_I2C_BUSES 2
+
+struct amdgpu_smu_i2c_bus {
+ struct i2c_adapter adapter;
+ struct amdgpu_device *adev;
+ int port;
+ struct mutex mutex;
+};
+
+struct config_table_setting
+{
+ uint16_t gfxclk_average_tau;
+ uint16_t socclk_average_tau;
+ uint16_t uclk_average_tau;
+ uint16_t gfx_activity_average_tau;
+ uint16_t mem_activity_average_tau;
+ uint16_t socket_power_average_tau;
+ uint16_t apu_socket_power_average_tau;
+ uint16_t fclk_average_tau;
+};
+
struct amdgpu_pm {
struct mutex mutex;
u32 current_sclk;
@@ -458,8 +337,9 @@ struct amdgpu_pm {
uint32_t pp_feature;
/* Used for I2C access to various EEPROMs on relevant ASICs */
- struct i2c_adapter smu_i2c;
- struct mutex smu_i2c_mutex;
+ struct amdgpu_smu_i2c_bus smu_i2c[MAX_SMU_I2C_BUSES];
+ struct i2c_adapter *ras_eeprom_i2c_bus;
+ struct i2c_adapter *fru_eeprom_i2c_bus;
struct list_head pm_attr_list;
atomic_t pwr_state[AMD_IP_BLOCK_TYPE_NUM];
@@ -468,64 +348,18 @@ struct amdgpu_pm {
* 0 = disabled (default), otherwise enable corresponding debug mode
*/
uint32_t smu_debug_mask;
-};
-
-#define R600_SSTU_DFLT 0
-#define R600_SST_DFLT 0x00C8
-
-/* XXX are these ok? */
-#define R600_TEMP_RANGE_MIN (90 * 1000)
-#define R600_TEMP_RANGE_MAX (120 * 1000)
-
-#define FDO_PWM_MODE_STATIC 1
-#define FDO_PWM_MODE_STATIC_RPM 5
-enum amdgpu_td {
- AMDGPU_TD_AUTO,
- AMDGPU_TD_UP,
- AMDGPU_TD_DOWN,
-};
+ bool pp_force_state_enabled;
-enum amdgpu_display_watermark {
- AMDGPU_DISPLAY_WATERMARK_LOW = 0,
- AMDGPU_DISPLAY_WATERMARK_HIGH = 1,
-};
+ struct mutex stable_pstate_ctx_lock;
+ struct amdgpu_ctx *stable_pstate_ctx;
-enum amdgpu_display_gap
-{
- AMDGPU_PM_DISPLAY_GAP_VBLANK_OR_WM = 0,
- AMDGPU_PM_DISPLAY_GAP_VBLANK = 1,
- AMDGPU_PM_DISPLAY_GAP_WATERMARK = 2,
- AMDGPU_PM_DISPLAY_GAP_IGNORE = 3,
+ struct config_table_setting config_table;
};
-void amdgpu_dpm_print_class_info(u32 class, u32 class2);
-void amdgpu_dpm_print_cap_info(u32 caps);
-void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
- struct amdgpu_ps *rps);
-u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev);
-u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev);
-void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev);
int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
void *data, uint32_t *size);
-bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor);
-
-int amdgpu_get_platform_caps(struct amdgpu_device *adev);
-
-int amdgpu_parse_extended_power_table(struct amdgpu_device *adev);
-void amdgpu_free_extended_power_table(struct amdgpu_device *adev);
-
-void amdgpu_add_thermal_controller(struct amdgpu_device *adev);
-
-enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
- u32 sys_mask,
- enum amdgpu_pcie_gen asic_gen,
- enum amdgpu_pcie_gen default_gen);
-
-struct amd_vce_state*
-amdgpu_get_vce_clock_state(void *handle, u32 idx);
-
int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev,
uint32_t block_type, bool gate);
@@ -571,16 +405,139 @@ int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev);
-int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
- void *data, uint32_t *size);
-
-void amdgpu_dpm_thermal_work_handler(struct work_struct *work);
-
-void amdgpu_pm_compute_clocks(struct amdgpu_device *adev);
+void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev);
void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable);
void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable);
void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable);
-void amdgpu_pm_print_power_states(struct amdgpu_device *adev);
int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version);
-
+int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable);
+int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size);
+int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
+ enum pp_clock_type type,
+ uint32_t *min,
+ uint32_t *max);
+int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
+ enum pp_clock_type type,
+ uint32_t min,
+ uint32_t max);
+int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev);
+int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev, enum smu_event_type event,
+ uint64_t event_arg);
+int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value);
+uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev);
+void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
+ enum gfx_change_state state);
+int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev,
+ void *umc_ecc);
+struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
+ uint32_t idx);
+void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev, enum amd_pm_state_type *state);
+void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
+ enum amd_pm_state_type state);
+enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev);
+int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
+ enum amd_dpm_forced_level level);
+int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev,
+ struct pp_states_info *states);
+int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev,
+ enum amd_pp_task task_id,
+ enum amd_pm_state_type *user_state);
+int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table);
+int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev,
+ uint32_t type,
+ long *input,
+ uint32_t size);
+int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev,
+ uint32_t type,
+ long *input,
+ uint32_t size);
+int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev,
+ enum pp_clock_type type,
+ char *buf);
+int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev,
+ enum pp_clock_type type,
+ char *buf,
+ int *offset);
+int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev,
+ uint64_t ppfeature_masks);
+int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf);
+int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev,
+ enum pp_clock_type type,
+ uint32_t mask);
+int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev);
+int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value);
+int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev);
+int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value);
+int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
+ char *buf);
+int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
+ long *input, uint32_t size);
+int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table);
+int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev,
+ uint32_t *fan_mode);
+int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev,
+ uint32_t speed);
+int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev,
+ uint32_t *speed);
+int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev,
+ uint32_t *speed);
+int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev,
+ uint32_t speed);
+int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev,
+ uint32_t mode);
+int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev,
+ uint32_t *limit,
+ enum pp_power_limit_level pp_limit_level,
+ enum pp_power_type power_type);
+int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
+ uint32_t limit);
+int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev);
+int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
+ struct seq_file *m);
+int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
+ void **addr,
+ size_t *size);
+int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev);
+int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
+ const char *buf,
+ size_t size);
+int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev);
+void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev);
+int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev,
+ const struct amd_pp_display_configuration *input);
+int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev,
+ enum amd_pp_clock_type type,
+ struct amd_pp_clocks *clocks);
+int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev,
+ struct amd_pp_simple_clock_info *clocks);
+int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev,
+ enum amd_pp_clock_type type,
+ struct pp_clock_levels_with_latency *clocks);
+int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev,
+ enum amd_pp_clock_type type,
+ struct pp_clock_levels_with_voltage *clocks);
+int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev,
+ void *clock_ranges);
+int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev,
+ struct pp_display_clock_request *clock);
+int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev,
+ struct amd_pp_clock_info *clocks);
+void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev);
+int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev,
+ uint32_t count);
+int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev,
+ uint32_t clock);
+void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev,
+ uint32_t clock);
+void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev,
+ uint32_t clock);
+int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev,
+ bool disable_memory_clock_switch);
+int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev,
+ struct pp_smu_nv_clock_table *max_clocks);
+enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev,
+ unsigned int *clock_values_in_khz,
+ unsigned int *num_states);
+int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev,
+ struct dpm_clocks *clock_table);
#endif
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h
new file mode 100644
index 000000000000..5c2a89f0d5d5
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __AMDGPU_DPM_INTERNAL_H__
+#define __AMDGPU_DPM_INTERNAL_H__
+
+void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev);
+
+u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev);
+
+u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev);
+
+#endif
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/Makefile b/drivers/gpu/drm/amd/pm/legacy-dpm/Makefile
new file mode 100644
index 000000000000..baa4265d1daa
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/Makefile
@@ -0,0 +1,32 @@
+#
+# Copyright 2021 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+
+AMD_LEGACYDPM_PATH = ../pm/legacy-dpm
+
+LEGACYDPM_MGR-y = legacy_dpm.o
+
+LEGACYDPM_MGR-$(CONFIG_DRM_AMDGPU_CIK)+= kv_dpm.o kv_smc.o
+LEGACYDPM_MGR-$(CONFIG_DRM_AMDGPU_SI)+= si_dpm.o si_smc.o
+
+AMD_LEGACYDPM_POWER = $(addprefix $(AMD_LEGACYDPM_PATH)/,$(LEGACYDPM_MGR-y))
+
+AMD_POWERPLAY_FILES += $(AMD_LEGACYDPM_POWER)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/cik_dpm.h b/drivers/gpu/drm/amd/pm/legacy-dpm/cik_dpm.h
index 2fcc4b60153c..2fcc4b60153c 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/cik_dpm.h
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/cik_dpm.h
diff --git a/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
index bcae42cef374..8b23cc9f098a 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
@@ -36,6 +36,7 @@
#include "gca/gfx_7_2_d.h"
#include "gca/gfx_7_2_sh_mask.h"
+#include "legacy_dpm.h"
#define KV_MAX_DEEPSLEEP_DIVIDER_ID 5
#define KV_MINIMUM_ENGINE_CLOCK 800
@@ -1256,6 +1257,19 @@ static void kv_dpm_enable_bapm(void *handle, bool enable)
}
}
+static bool kv_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)
+{
+ switch (sensor) {
+ case THERMAL_TYPE_KV:
+ return true;
+ case THERMAL_TYPE_NONE:
+ case THERMAL_TYPE_EXTERNAL:
+ case THERMAL_TYPE_EXTERNAL_GPIO:
+ default:
+ return false;
+ }
+}
+
static int kv_dpm_enable(struct amdgpu_device *adev)
{
struct kv_power_info *pi = kv_get_pi(adev);
@@ -1352,7 +1366,7 @@ static int kv_dpm_enable(struct amdgpu_device *adev)
}
if (adev->irq.installed &&
- amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) {
+ kv_is_internal_thermal_sensor(adev->pm.int_thermal_type)) {
ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX);
if (ret) {
DRM_ERROR("kv_set_thermal_temperature_range failed\n");
@@ -3016,21 +3030,18 @@ static int kv_dpm_sw_init(void *handle)
return 0;
INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
- mutex_lock(&adev->pm.mutex);
ret = kv_dpm_init(adev);
if (ret)
goto dpm_failed;
adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
if (amdgpu_dpm == 1)
amdgpu_pm_print_power_states(adev);
- mutex_unlock(&adev->pm.mutex);
DRM_INFO("amdgpu: dpm initialized\n");
return 0;
dpm_failed:
kv_dpm_fini(adev);
- mutex_unlock(&adev->pm.mutex);
DRM_ERROR("amdgpu: dpm initialization failed\n");
return ret;
}
@@ -3041,9 +3052,7 @@ static int kv_dpm_sw_fini(void *handle)
flush_work(&adev->pm.dpm.thermal.work);
- mutex_lock(&adev->pm.mutex);
kv_dpm_fini(adev);
- mutex_unlock(&adev->pm.mutex);
return 0;
}
@@ -3056,15 +3065,13 @@ static int kv_dpm_hw_init(void *handle)
if (!amdgpu_dpm)
return 0;
- mutex_lock(&adev->pm.mutex);
kv_dpm_setup_asic(adev);
ret = kv_dpm_enable(adev);
if (ret)
adev->pm.dpm_enabled = false;
else
adev->pm.dpm_enabled = true;
- mutex_unlock(&adev->pm.mutex);
- amdgpu_pm_compute_clocks(adev);
+ amdgpu_legacy_dpm_compute_clocks(adev);
return ret;
}
@@ -3072,11 +3079,8 @@ static int kv_dpm_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (adev->pm.dpm_enabled) {
- mutex_lock(&adev->pm.mutex);
+ if (adev->pm.dpm_enabled)
kv_dpm_disable(adev);
- mutex_unlock(&adev->pm.mutex);
- }
return 0;
}
@@ -3086,12 +3090,10 @@ static int kv_dpm_suspend(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->pm.dpm_enabled) {
- mutex_lock(&adev->pm.mutex);
/* disable dpm */
kv_dpm_disable(adev);
/* reset the power state */
adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
- mutex_unlock(&adev->pm.mutex);
}
return 0;
}
@@ -3103,16 +3105,14 @@ static int kv_dpm_resume(void *handle)
if (adev->pm.dpm_enabled) {
/* asic init will reset to the boot state */
- mutex_lock(&adev->pm.mutex);
kv_dpm_setup_asic(adev);
ret = kv_dpm_enable(adev);
if (ret)
adev->pm.dpm_enabled = false;
else
adev->pm.dpm_enabled = true;
- mutex_unlock(&adev->pm.mutex);
if (adev->pm.dpm_enabled)
- amdgpu_pm_compute_clocks(adev);
+ amdgpu_legacy_dpm_compute_clocks(adev);
}
return 0;
}
@@ -3366,6 +3366,7 @@ static const struct amd_pm_funcs kv_dpm_funcs = {
.get_vce_clock_state = amdgpu_get_vce_clock_state,
.check_state_equal = kv_check_state_equal,
.read_sensor = &kv_dpm_read_sensor,
+ .pm_compute_clocks = amdgpu_legacy_dpm_compute_clocks,
};
static const struct amdgpu_irq_src_funcs kv_dpm_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.h b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.h
index 6df0ed41317c..6df0ed41317c 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.h
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.h
diff --git a/drivers/gpu/drm/amd/pm/powerplay/kv_smc.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_smc.c
index 2d9ab6b8be66..2d9ab6b8be66 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/kv_smc.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_smc.c
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
new file mode 100644
index 000000000000..9613c6181c17
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
@@ -0,0 +1,1081 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_i2c.h"
+#include "amdgpu_atombios.h"
+#include "atom.h"
+#include "amd_pcie.h"
+#include "legacy_dpm.h"
+#include "amdgpu_dpm_internal.h"
+#include "amdgpu_display.h"
+
+#define amdgpu_dpm_pre_set_power_state(adev) \
+ ((adev)->powerplay.pp_funcs->pre_set_power_state((adev)->powerplay.pp_handle))
+
+#define amdgpu_dpm_post_set_power_state(adev) \
+ ((adev)->powerplay.pp_funcs->post_set_power_state((adev)->powerplay.pp_handle))
+
+#define amdgpu_dpm_display_configuration_changed(adev) \
+ ((adev)->powerplay.pp_funcs->display_configuration_changed((adev)->powerplay.pp_handle))
+
+#define amdgpu_dpm_print_power_state(adev, ps) \
+ ((adev)->powerplay.pp_funcs->print_power_state((adev)->powerplay.pp_handle, (ps)))
+
+#define amdgpu_dpm_vblank_too_short(adev) \
+ ((adev)->powerplay.pp_funcs->vblank_too_short((adev)->powerplay.pp_handle))
+
+#define amdgpu_dpm_check_state_equal(adev, cps, rps, equal) \
+ ((adev)->powerplay.pp_funcs->check_state_equal((adev)->powerplay.pp_handle, (cps), (rps), (equal)))
+
+void amdgpu_dpm_print_class_info(u32 class, u32 class2)
+{
+ const char *s;
+
+ switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
+ case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
+ default:
+ s = "none";
+ break;
+ case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
+ s = "battery";
+ break;
+ case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
+ s = "balanced";
+ break;
+ case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
+ s = "performance";
+ break;
+ }
+ printk("\tui class: %s\n", s);
+ printk("\tinternal class:");
+ if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
+ (class2 == 0))
+ pr_cont(" none");
+ else {
+ if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
+ pr_cont(" boot");
+ if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
+ pr_cont(" thermal");
+ if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
+ pr_cont(" limited_pwr");
+ if (class & ATOM_PPLIB_CLASSIFICATION_REST)
+ pr_cont(" rest");
+ if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
+ pr_cont(" forced");
+ if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
+ pr_cont(" 3d_perf");
+ if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
+ pr_cont(" ovrdrv");
+ if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
+ pr_cont(" uvd");
+ if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
+ pr_cont(" 3d_low");
+ if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
+ pr_cont(" acpi");
+ if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
+ pr_cont(" uvd_hd2");
+ if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
+ pr_cont(" uvd_hd");
+ if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
+ pr_cont(" uvd_sd");
+ if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
+ pr_cont(" limited_pwr2");
+ if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
+ pr_cont(" ulv");
+ if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
+ pr_cont(" uvd_mvc");
+ }
+ pr_cont("\n");
+}
+
+void amdgpu_dpm_print_cap_info(u32 caps)
+{
+ printk("\tcaps:");
+ if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
+ pr_cont(" single_disp");
+ if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
+ pr_cont(" video");
+ if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
+ pr_cont(" no_dc");
+ pr_cont("\n");
+}
+
+void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
+ struct amdgpu_ps *rps)
+{
+ printk("\tstatus:");
+ if (rps == adev->pm.dpm.current_ps)
+ pr_cont(" c");
+ if (rps == adev->pm.dpm.requested_ps)
+ pr_cont(" r");
+ if (rps == adev->pm.dpm.boot_ps)
+ pr_cont(" b");
+ pr_cont("\n");
+}
+
+void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
+{
+ int i;
+
+ if (adev->powerplay.pp_funcs->print_power_state == NULL)
+ return;
+
+ for (i = 0; i < adev->pm.dpm.num_ps; i++)
+ amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
+
+}
+
+union power_info {
+ struct _ATOM_POWERPLAY_INFO info;
+ struct _ATOM_POWERPLAY_INFO_V2 info_2;
+ struct _ATOM_POWERPLAY_INFO_V3 info_3;
+ struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
+ struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
+ struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
+ struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
+ struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
+};
+
+int amdgpu_get_platform_caps(struct amdgpu_device *adev)
+{
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ union power_info *power_info;
+ int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+ u16 data_offset;
+ u8 frev, crev;
+
+ if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset))
+ return -EINVAL;
+ power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+ adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
+ adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
+ adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
+
+ return 0;
+}
+
+union fan_info {
+ struct _ATOM_PPLIB_FANTABLE fan;
+ struct _ATOM_PPLIB_FANTABLE2 fan2;
+ struct _ATOM_PPLIB_FANTABLE3 fan3;
+};
+
+static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table,
+ ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
+{
+ u32 size = atom_table->ucNumEntries *
+ sizeof(struct amdgpu_clock_voltage_dependency_entry);
+ int i;
+ ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
+
+ amdgpu_table->entries = kzalloc(size, GFP_KERNEL);
+ if (!amdgpu_table->entries)
+ return -ENOMEM;
+
+ entry = &atom_table->entries[0];
+ for (i = 0; i < atom_table->ucNumEntries; i++) {
+ amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
+ (entry->ucClockHigh << 16);
+ amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage);
+ entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
+ ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
+ }
+ amdgpu_table->count = atom_table->ucNumEntries;
+
+ return 0;
+}
+
+/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
+
+int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
+{
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ union power_info *power_info;
+ union fan_info *fan_info;
+ ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
+ int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+ u16 data_offset;
+ u8 frev, crev;
+ int ret, i;
+
+ if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset))
+ return -EINVAL;
+ power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+ /* fan table */
+ if (le16_to_cpu(power_info->pplib.usTableSize) >=
+ sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
+ if (power_info->pplib3.usFanTableOffset) {
+ fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib3.usFanTableOffset));
+ adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
+ adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
+ adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
+ adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
+ adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
+ adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
+ adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
+ if (fan_info->fan.ucFanTableFormat >= 2)
+ adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
+ else
+ adev->pm.dpm.fan.t_max = 10900;
+ adev->pm.dpm.fan.cycle_delay = 100000;
+ if (fan_info->fan.ucFanTableFormat >= 3) {
+ adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
+ adev->pm.dpm.fan.default_max_fan_pwm =
+ le16_to_cpu(fan_info->fan3.usFanPWMMax);
+ adev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
+ adev->pm.dpm.fan.fan_output_sensitivity =
+ le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
+ }
+ adev->pm.dpm.fan.ucode_fan_control = true;
+ }
+ }
+
+ /* clock dependancy tables, shedding tables */
+ if (le16_to_cpu(power_info->pplib.usTableSize) >=
+ sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
+ if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
+ dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
+ ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
+ dep_table);
+ if (ret) {
+ amdgpu_free_extended_power_table(adev);
+ return ret;
+ }
+ }
+ if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
+ dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
+ ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+ dep_table);
+ if (ret) {
+ amdgpu_free_extended_power_table(adev);
+ return ret;
+ }
+ }
+ if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
+ dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
+ ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+ dep_table);
+ if (ret) {
+ amdgpu_free_extended_power_table(adev);
+ return ret;
+ }
+ }
+ if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
+ dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
+ ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
+ dep_table);
+ if (ret) {
+ amdgpu_free_extended_power_table(adev);
+ return ret;
+ }
+ }
+ if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
+ ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
+ (ATOM_PPLIB_Clock_Voltage_Limit_Table *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
+ if (clk_v->ucNumEntries) {
+ adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
+ le16_to_cpu(clk_v->entries[0].usSclkLow) |
+ (clk_v->entries[0].ucSclkHigh << 16);
+ adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
+ le16_to_cpu(clk_v->entries[0].usMclkLow) |
+ (clk_v->entries[0].ucMclkHigh << 16);
+ adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
+ le16_to_cpu(clk_v->entries[0].usVddc);
+ adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
+ le16_to_cpu(clk_v->entries[0].usVddci);
+ }
+ }
+ if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
+ ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
+ (ATOM_PPLIB_PhaseSheddingLimits_Table *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
+ ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
+
+ adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
+ kcalloc(psl->ucNumEntries,
+ sizeof(struct amdgpu_phase_shedding_limits_entry),
+ GFP_KERNEL);
+ if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
+ amdgpu_free_extended_power_table(adev);
+ return -ENOMEM;
+ }
+
+ entry = &psl->entries[0];
+ for (i = 0; i < psl->ucNumEntries; i++) {
+ adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
+ le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
+ adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
+ le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
+ adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
+ le16_to_cpu(entry->usVoltage);
+ entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
+ ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
+ }
+ adev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
+ psl->ucNumEntries;
+ }
+ }
+
+ /* cac data */
+ if (le16_to_cpu(power_info->pplib.usTableSize) >=
+ sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
+ adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
+ adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
+ adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit;
+ adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
+ if (adev->pm.dpm.tdp_od_limit)
+ adev->pm.dpm.power_control = true;
+ else
+ adev->pm.dpm.power_control = false;
+ adev->pm.dpm.tdp_adjustment = 0;
+ adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
+ adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
+ adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
+ if (power_info->pplib5.usCACLeakageTableOffset) {
+ ATOM_PPLIB_CAC_Leakage_Table *cac_table =
+ (ATOM_PPLIB_CAC_Leakage_Table *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
+ ATOM_PPLIB_CAC_Leakage_Record *entry;
+ u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
+ adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
+ if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
+ amdgpu_free_extended_power_table(adev);
+ return -ENOMEM;
+ }
+ entry = &cac_table->entries[0];
+ for (i = 0; i < cac_table->ucNumEntries; i++) {
+ if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
+ adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
+ le16_to_cpu(entry->usVddc1);
+ adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
+ le16_to_cpu(entry->usVddc2);
+ adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
+ le16_to_cpu(entry->usVddc3);
+ } else {
+ adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
+ le16_to_cpu(entry->usVddc);
+ adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
+ le32_to_cpu(entry->ulLeakageValue);
+ }
+ entry = (ATOM_PPLIB_CAC_Leakage_Record *)
+ ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
+ }
+ adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
+ }
+ }
+
+ /* ext tables */
+ if (le16_to_cpu(power_info->pplib.usTableSize) >=
+ sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
+ ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
+ if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
+ ext_hdr->usVCETableOffset) {
+ VCEClockInfoArray *array = (VCEClockInfoArray *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
+ ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
+ (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
+ 1 + array->ucNumEntries * sizeof(VCEClockInfo));
+ ATOM_PPLIB_VCE_State_Table *states =
+ (ATOM_PPLIB_VCE_State_Table *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
+ 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
+ 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
+ ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
+ ATOM_PPLIB_VCE_State_Record *state_entry;
+ VCEClockInfo *vce_clk;
+ u32 size = limits->numEntries *
+ sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
+ adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
+ kzalloc(size, GFP_KERNEL);
+ if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
+ amdgpu_free_extended_power_table(adev);
+ return -ENOMEM;
+ }
+ adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
+ limits->numEntries;
+ entry = &limits->entries[0];
+ state_entry = &states->entries[0];
+ for (i = 0; i < limits->numEntries; i++) {
+ vce_clk = (VCEClockInfo *)
+ ((u8 *)&array->entries[0] +
+ (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
+ adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
+ le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
+ adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
+ le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
+ adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
+ le16_to_cpu(entry->usVoltage);
+ entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
+ ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
+ }
+ adev->pm.dpm.num_of_vce_states =
+ states->numEntries > AMD_MAX_VCE_LEVELS ?
+ AMD_MAX_VCE_LEVELS : states->numEntries;
+ for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
+ vce_clk = (VCEClockInfo *)
+ ((u8 *)&array->entries[0] +
+ (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
+ adev->pm.dpm.vce_states[i].evclk =
+ le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
+ adev->pm.dpm.vce_states[i].ecclk =
+ le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
+ adev->pm.dpm.vce_states[i].clk_idx =
+ state_entry->ucClockInfoIndex & 0x3f;
+ adev->pm.dpm.vce_states[i].pstate =
+ (state_entry->ucClockInfoIndex & 0xc0) >> 6;
+ state_entry = (ATOM_PPLIB_VCE_State_Record *)
+ ((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
+ }
+ }
+ if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
+ ext_hdr->usUVDTableOffset) {
+ UVDClockInfoArray *array = (UVDClockInfoArray *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
+ ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
+ (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
+ 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
+ ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
+ u32 size = limits->numEntries *
+ sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
+ adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
+ kzalloc(size, GFP_KERNEL);
+ if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
+ amdgpu_free_extended_power_table(adev);
+ return -ENOMEM;
+ }
+ adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
+ limits->numEntries;
+ entry = &limits->entries[0];
+ for (i = 0; i < limits->numEntries; i++) {
+ UVDClockInfo *uvd_clk = (UVDClockInfo *)
+ ((u8 *)&array->entries[0] +
+ (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
+ adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
+ le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
+ adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
+ le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
+ adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
+ le16_to_cpu(entry->usVoltage);
+ entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
+ ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
+ }
+ }
+ if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
+ ext_hdr->usSAMUTableOffset) {
+ ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
+ (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
+ ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
+ u32 size = limits->numEntries *
+ sizeof(struct amdgpu_clock_voltage_dependency_entry);
+ adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
+ kzalloc(size, GFP_KERNEL);
+ if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
+ amdgpu_free_extended_power_table(adev);
+ return -ENOMEM;
+ }
+ adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
+ limits->numEntries;
+ entry = &limits->entries[0];
+ for (i = 0; i < limits->numEntries; i++) {
+ adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
+ le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
+ adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
+ le16_to_cpu(entry->usVoltage);
+ entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
+ ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
+ }
+ }
+ if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
+ ext_hdr->usPPMTableOffset) {
+ ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(ext_hdr->usPPMTableOffset));
+ adev->pm.dpm.dyn_state.ppm_table =
+ kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
+ if (!adev->pm.dpm.dyn_state.ppm_table) {
+ amdgpu_free_extended_power_table(adev);
+ return -ENOMEM;
+ }
+ adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
+ adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
+ le16_to_cpu(ppm->usCpuCoreNumber);
+ adev->pm.dpm.dyn_state.ppm_table->platform_tdp =
+ le32_to_cpu(ppm->ulPlatformTDP);
+ adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
+ le32_to_cpu(ppm->ulSmallACPlatformTDP);
+ adev->pm.dpm.dyn_state.ppm_table->platform_tdc =
+ le32_to_cpu(ppm->ulPlatformTDC);
+ adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
+ le32_to_cpu(ppm->ulSmallACPlatformTDC);
+ adev->pm.dpm.dyn_state.ppm_table->apu_tdp =
+ le32_to_cpu(ppm->ulApuTDP);
+ adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
+ le32_to_cpu(ppm->ulDGpuTDP);
+ adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
+ le32_to_cpu(ppm->ulDGpuUlvPower);
+ adev->pm.dpm.dyn_state.ppm_table->tj_max =
+ le32_to_cpu(ppm->ulTjmax);
+ }
+ if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
+ ext_hdr->usACPTableOffset) {
+ ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
+ (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
+ ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
+ u32 size = limits->numEntries *
+ sizeof(struct amdgpu_clock_voltage_dependency_entry);
+ adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
+ kzalloc(size, GFP_KERNEL);
+ if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
+ amdgpu_free_extended_power_table(adev);
+ return -ENOMEM;
+ }
+ adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
+ limits->numEntries;
+ entry = &limits->entries[0];
+ for (i = 0; i < limits->numEntries; i++) {
+ adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
+ le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
+ adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
+ le16_to_cpu(entry->usVoltage);
+ entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
+ ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
+ }
+ }
+ if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
+ ext_hdr->usPowerTuneTableOffset) {
+ u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
+ ATOM_PowerTune_Table *pt;
+ adev->pm.dpm.dyn_state.cac_tdp_table =
+ kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
+ if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
+ amdgpu_free_extended_power_table(adev);
+ return -ENOMEM;
+ }
+ if (rev > 0) {
+ ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
+ adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
+ ppt->usMaximumPowerDeliveryLimit;
+ pt = &ppt->power_tune_table;
+ } else {
+ ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
+ adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
+ pt = &ppt->power_tune_table;
+ }
+ adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
+ adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
+ le16_to_cpu(pt->usConfigurableTDP);
+ adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
+ adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
+ le16_to_cpu(pt->usBatteryPowerLimit);
+ adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
+ le16_to_cpu(pt->usSmallPowerLimit);
+ adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
+ le16_to_cpu(pt->usLowCACLeakage);
+ adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
+ le16_to_cpu(pt->usHighCACLeakage);
+ }
+ if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) &&
+ ext_hdr->usSclkVddgfxTableOffset) {
+ dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset));
+ ret = amdgpu_parse_clk_voltage_dep_table(
+ &adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
+ dep_table);
+ if (ret) {
+ kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void amdgpu_free_extended_power_table(struct amdgpu_device *adev)
+{
+ struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state;
+
+ kfree(dyn_state->vddc_dependency_on_sclk.entries);
+ kfree(dyn_state->vddci_dependency_on_mclk.entries);
+ kfree(dyn_state->vddc_dependency_on_mclk.entries);
+ kfree(dyn_state->mvdd_dependency_on_mclk.entries);
+ kfree(dyn_state->cac_leakage_table.entries);
+ kfree(dyn_state->phase_shedding_limits_table.entries);
+ kfree(dyn_state->ppm_table);
+ kfree(dyn_state->cac_tdp_table);
+ kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
+ kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
+ kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
+ kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
+ kfree(dyn_state->vddgfx_dependency_on_sclk.entries);
+}
+
+static const char *pp_lib_thermal_controller_names[] = {
+ "NONE",
+ "lm63",
+ "adm1032",
+ "adm1030",
+ "max6649",
+ "lm64",
+ "f75375",
+ "RV6xx",
+ "RV770",
+ "adt7473",
+ "NONE",
+ "External GPIO",
+ "Evergreen",
+ "emc2103",
+ "Sumo",
+ "Northern Islands",
+ "Southern Islands",
+ "lm96163",
+ "Sea Islands",
+ "Kaveri/Kabini",
+};
+
+void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
+{
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ ATOM_PPLIB_POWERPLAYTABLE *power_table;
+ int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+ ATOM_PPLIB_THERMALCONTROLLER *controller;
+ struct amdgpu_i2c_bus_rec i2c_bus;
+ u16 data_offset;
+ u8 frev, crev;
+
+ if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset))
+ return;
+ power_table = (ATOM_PPLIB_POWERPLAYTABLE *)
+ (mode_info->atom_context->bios + data_offset);
+ controller = &power_table->sThermalController;
+
+ /* add the i2c bus for thermal/fan chip */
+ if (controller->ucType > 0) {
+ if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
+ adev->pm.no_fan = true;
+ adev->pm.fan_pulses_per_revolution =
+ controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
+ if (adev->pm.fan_pulses_per_revolution) {
+ adev->pm.fan_min_rpm = controller->ucFanMinRPM;
+ adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
+ }
+ if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
+ DRM_INFO("Internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
+ } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
+ DRM_INFO("Internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
+ } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
+ DRM_INFO("Internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
+ } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
+ DRM_INFO("Internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
+ } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
+ DRM_INFO("Internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ adev->pm.int_thermal_type = THERMAL_TYPE_NI;
+ } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
+ DRM_INFO("Internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ adev->pm.int_thermal_type = THERMAL_TYPE_SI;
+ } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
+ DRM_INFO("Internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ adev->pm.int_thermal_type = THERMAL_TYPE_CI;
+ } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
+ DRM_INFO("Internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ adev->pm.int_thermal_type = THERMAL_TYPE_KV;
+ } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
+ DRM_INFO("External GPIO thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
+ } else if (controller->ucType ==
+ ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
+ DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
+ } else if (controller->ucType ==
+ ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
+ DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
+ } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
+ DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
+ pp_lib_thermal_controller_names[controller->ucType],
+ controller->ucI2cAddress >> 1,
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
+ i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine);
+ adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus);
+ if (adev->pm.i2c_bus) {
+ struct i2c_board_info info = { };
+ const char *name = pp_lib_thermal_controller_names[controller->ucType];
+ info.addr = controller->ucI2cAddress >> 1;
+ strlcpy(info.type, name, sizeof(info.type));
+ i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info);
+ }
+ } else {
+ DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
+ controller->ucType,
+ controller->ucI2cAddress >> 1,
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ }
+ }
+}
+
+struct amd_vce_state* amdgpu_get_vce_clock_state(void *handle, u32 idx)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (idx < adev->pm.dpm.num_of_vce_states)
+ return &adev->pm.dpm.vce_states[idx];
+
+ return NULL;
+}
+
+static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
+ enum amd_pm_state_type dpm_state)
+{
+ int i;
+ struct amdgpu_ps *ps;
+ u32 ui_class;
+ bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
+ true : false;
+
+ /* check if the vblank period is too short to adjust the mclk */
+ if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
+ if (amdgpu_dpm_vblank_too_short(adev))
+ single_display = false;
+ }
+
+ /* certain older asics have a separare 3D performance state,
+ * so try that first if the user selected performance
+ */
+ if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
+ dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
+ /* balanced states don't exist at the moment */
+ if (dpm_state == POWER_STATE_TYPE_BALANCED)
+ dpm_state = POWER_STATE_TYPE_PERFORMANCE;
+
+restart_search:
+ /* Pick the best power state based on current conditions */
+ for (i = 0; i < adev->pm.dpm.num_ps; i++) {
+ ps = &adev->pm.dpm.ps[i];
+ ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
+ switch (dpm_state) {
+ /* user states */
+ case POWER_STATE_TYPE_BATTERY:
+ if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
+ if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
+ if (single_display)
+ return ps;
+ } else
+ return ps;
+ }
+ break;
+ case POWER_STATE_TYPE_BALANCED:
+ if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
+ if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
+ if (single_display)
+ return ps;
+ } else
+ return ps;
+ }
+ break;
+ case POWER_STATE_TYPE_PERFORMANCE:
+ if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
+ if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
+ if (single_display)
+ return ps;
+ } else
+ return ps;
+ }
+ break;
+ /* internal states */
+ case POWER_STATE_TYPE_INTERNAL_UVD:
+ if (adev->pm.dpm.uvd_ps)
+ return adev->pm.dpm.uvd_ps;
+ else
+ break;
+ case POWER_STATE_TYPE_INTERNAL_UVD_SD:
+ if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
+ return ps;
+ break;
+ case POWER_STATE_TYPE_INTERNAL_UVD_HD:
+ if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
+ return ps;
+ break;
+ case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
+ if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
+ return ps;
+ break;
+ case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
+ if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
+ return ps;
+ break;
+ case POWER_STATE_TYPE_INTERNAL_BOOT:
+ return adev->pm.dpm.boot_ps;
+ case POWER_STATE_TYPE_INTERNAL_THERMAL:
+ if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
+ return ps;
+ break;
+ case POWER_STATE_TYPE_INTERNAL_ACPI:
+ if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
+ return ps;
+ break;
+ case POWER_STATE_TYPE_INTERNAL_ULV:
+ if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
+ return ps;
+ break;
+ case POWER_STATE_TYPE_INTERNAL_3DPERF:
+ if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
+ return ps;
+ break;
+ default:
+ break;
+ }
+ }
+ /* use a fallback state if we didn't match */
+ switch (dpm_state) {
+ case POWER_STATE_TYPE_INTERNAL_UVD_SD:
+ dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
+ goto restart_search;
+ case POWER_STATE_TYPE_INTERNAL_UVD_HD:
+ case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
+ case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
+ if (adev->pm.dpm.uvd_ps) {
+ return adev->pm.dpm.uvd_ps;
+ } else {
+ dpm_state = POWER_STATE_TYPE_PERFORMANCE;
+ goto restart_search;
+ }
+ case POWER_STATE_TYPE_INTERNAL_THERMAL:
+ dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
+ goto restart_search;
+ case POWER_STATE_TYPE_INTERNAL_ACPI:
+ dpm_state = POWER_STATE_TYPE_BATTERY;
+ goto restart_search;
+ case POWER_STATE_TYPE_BATTERY:
+ case POWER_STATE_TYPE_BALANCED:
+ case POWER_STATE_TYPE_INTERNAL_3DPERF:
+ dpm_state = POWER_STATE_TYPE_PERFORMANCE;
+ goto restart_search;
+ default:
+ break;
+ }
+
+ return NULL;
+}
+
+static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ struct amdgpu_ps *ps;
+ enum amd_pm_state_type dpm_state;
+ int ret;
+ bool equal = false;
+
+ /* if dpm init failed */
+ if (!adev->pm.dpm_enabled)
+ return 0;
+
+ if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
+ /* add other state override checks here */
+ if ((!adev->pm.dpm.thermal_active) &&
+ (!adev->pm.dpm.uvd_active))
+ adev->pm.dpm.state = adev->pm.dpm.user_state;
+ }
+ dpm_state = adev->pm.dpm.state;
+
+ ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
+ if (ps)
+ adev->pm.dpm.requested_ps = ps;
+ else
+ return -EINVAL;
+
+ if (amdgpu_dpm == 1 && pp_funcs->print_power_state) {
+ printk("switching from power state:\n");
+ amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
+ printk("switching to power state:\n");
+ amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
+ }
+
+ /* update whether vce is active */
+ ps->vce_active = adev->pm.dpm.vce_active;
+ if (pp_funcs->display_configuration_changed)
+ amdgpu_dpm_display_configuration_changed(adev);
+
+ ret = amdgpu_dpm_pre_set_power_state(adev);
+ if (ret)
+ return ret;
+
+ if (pp_funcs->check_state_equal) {
+ if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))
+ equal = false;
+ }
+
+ if (equal)
+ return 0;
+
+ if (pp_funcs->set_power_state)
+ pp_funcs->set_power_state(adev->powerplay.pp_handle);
+
+ amdgpu_dpm_post_set_power_state(adev);
+
+ adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
+ adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
+
+ if (pp_funcs->force_performance_level) {
+ if (adev->pm.dpm.thermal_active) {
+ enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
+ /* force low perf level for thermal */
+ pp_funcs->force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW);
+ /* save the user's level */
+ adev->pm.dpm.forced_level = level;
+ } else {
+ /* otherwise, user selected level */
+ pp_funcs->force_performance_level(adev, adev->pm.dpm.forced_level);
+ }
+ }
+
+ return 0;
+}
+
+void amdgpu_legacy_dpm_compute_clocks(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i = 0;
+
+ if (adev->mode_info.num_crtc)
+ amdgpu_display_bandwidth_update(adev);
+
+ for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+ struct amdgpu_ring *ring = adev->rings[i];
+ if (ring && ring->sched.ready)
+ amdgpu_fence_wait_empty(ring);
+ }
+
+ amdgpu_dpm_get_active_displays(adev);
+
+ amdgpu_dpm_change_power_state_locked(adev);
+}
+
+void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
+{
+ struct amdgpu_device *adev =
+ container_of(work, struct amdgpu_device,
+ pm.dpm.thermal.work);
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ /* switch to the thermal state */
+ enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
+ int temp, size = sizeof(temp);
+
+ if (!adev->pm.dpm_enabled)
+ return;
+
+ if (!pp_funcs->read_sensor(adev->powerplay.pp_handle,
+ AMDGPU_PP_SENSOR_GPU_TEMP,
+ (void *)&temp,
+ &size)) {
+ if (temp < adev->pm.dpm.thermal.min_temp)
+ /* switch back the user state */
+ dpm_state = adev->pm.dpm.user_state;
+ } else {
+ if (adev->pm.dpm.thermal.high_to_low)
+ /* switch back the user state */
+ dpm_state = adev->pm.dpm.user_state;
+ }
+
+ if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
+ adev->pm.dpm.thermal_active = true;
+ else
+ adev->pm.dpm.thermal_active = false;
+
+ adev->pm.dpm.state = dpm_state;
+
+ amdgpu_legacy_dpm_compute_clocks(adev->powerplay.pp_handle);
+}
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h
new file mode 100644
index 000000000000..93bd3973330c
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __LEGACY_DPM_H__
+#define __LEGACY_DPM_H__
+
+void amdgpu_dpm_print_class_info(u32 class, u32 class2);
+void amdgpu_dpm_print_cap_info(u32 caps);
+void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
+ struct amdgpu_ps *rps);
+int amdgpu_get_platform_caps(struct amdgpu_device *adev);
+int amdgpu_parse_extended_power_table(struct amdgpu_device *adev);
+void amdgpu_free_extended_power_table(struct amdgpu_device *adev);
+void amdgpu_add_thermal_controller(struct amdgpu_device *adev);
+struct amd_vce_state* amdgpu_get_vce_clock_state(void *handle, u32 idx);
+void amdgpu_pm_print_power_states(struct amdgpu_device *adev);
+void amdgpu_legacy_dpm_compute_clocks(void *handle);
+void amdgpu_dpm_thermal_work_handler(struct work_struct *work);
+#endif
diff --git a/drivers/gpu/drm/amd/pm/powerplay/ppsmc.h b/drivers/gpu/drm/amd/pm/legacy-dpm/ppsmc.h
index 8463245f424f..8463245f424f 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/ppsmc.h
diff --git a/drivers/gpu/drm/amd/pm/powerplay/r600_dpm.h b/drivers/gpu/drm/amd/pm/legacy-dpm/r600_dpm.h
index 055321f61ca7..055321f61ca7 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/r600_dpm.h
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/r600_dpm.h
diff --git a/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
index 81f82aa05ec2..caae54487f9c 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
@@ -28,6 +28,7 @@
#include "amdgpu_pm.h"
#include "amdgpu_dpm.h"
#include "amdgpu_atombios.h"
+#include "amdgpu_dpm_internal.h"
#include "amd_pcie.h"
#include "sid.h"
#include "r600_dpm.h"
@@ -37,6 +38,7 @@
#include <linux/math64.h>
#include <linux/seq_file.h>
#include <linux/firmware.h>
+#include <legacy_dpm.h>
#define MC_CG_ARB_FREQ_F0 0x0a
#define MC_CG_ARB_FREQ_F1 0x0b
@@ -96,6 +98,19 @@ union pplib_clock_info {
struct _ATOM_PPLIB_SI_CLOCK_INFO si;
};
+enum si_dpm_auto_throttle_src {
+ SI_DPM_AUTO_THROTTLE_SRC_THERMAL,
+ SI_DPM_AUTO_THROTTLE_SRC_EXTERNAL
+};
+
+enum si_dpm_event_src {
+ SI_DPM_EVENT_SRC_ANALOG = 0,
+ SI_DPM_EVENT_SRC_EXTERNAL = 1,
+ SI_DPM_EVENT_SRC_DIGITAL = 2,
+ SI_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
+ SI_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
+};
+
static const u32 r600_utc[R600_PM_NUMBER_OF_TC] =
{
R600_UTC_DFLT_00,
@@ -3718,25 +3733,25 @@ static void si_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources)
{
struct rv7xx_power_info *pi = rv770_get_pi(adev);
bool want_thermal_protection;
- enum amdgpu_dpm_event_src dpm_event_src;
+ enum si_dpm_event_src dpm_event_src;
switch (sources) {
case 0:
default:
want_thermal_protection = false;
break;
- case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL):
+ case (1 << SI_DPM_AUTO_THROTTLE_SRC_THERMAL):
want_thermal_protection = true;
- dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGITAL;
+ dpm_event_src = SI_DPM_EVENT_SRC_DIGITAL;
break;
- case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
+ case (1 << SI_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
want_thermal_protection = true;
- dpm_event_src = AMDGPU_DPM_EVENT_SRC_EXTERNAL;
+ dpm_event_src = SI_DPM_EVENT_SRC_EXTERNAL;
break;
- case ((1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
- (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL)):
+ case ((1 << SI_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
+ (1 << SI_DPM_AUTO_THROTTLE_SRC_THERMAL)):
want_thermal_protection = true;
- dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
+ dpm_event_src = SI_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
break;
}
@@ -3750,7 +3765,7 @@ static void si_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources)
}
static void si_enable_auto_throttle_source(struct amdgpu_device *adev,
- enum amdgpu_dpm_auto_throttle_src source,
+ enum si_dpm_auto_throttle_src source,
bool enable)
{
struct rv7xx_power_info *pi = rv770_get_pi(adev);
@@ -3877,6 +3892,40 @@ static int si_set_boot_state(struct amdgpu_device *adev)
}
#endif
+static int si_set_powergating_by_smu(void *handle,
+ uint32_t block_type,
+ bool gate)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ switch (block_type) {
+ case AMD_IP_BLOCK_TYPE_UVD:
+ if (!gate) {
+ adev->pm.dpm.uvd_active = true;
+ adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
+ } else {
+ adev->pm.dpm.uvd_active = false;
+ }
+
+ amdgpu_legacy_dpm_compute_clocks(handle);
+ break;
+ case AMD_IP_BLOCK_TYPE_VCE:
+ if (!gate) {
+ adev->pm.dpm.vce_active = true;
+ /* XXX select vce level based on ring/task */
+ adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
+ } else {
+ adev->pm.dpm.vce_active = false;
+ }
+
+ amdgpu_legacy_dpm_compute_clocks(handle);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
static int si_set_sw_state(struct amdgpu_device *adev)
{
return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_SwitchToSwState) == PPSMC_Result_OK) ?
@@ -4927,6 +4976,31 @@ static int si_populate_smc_initial_state(struct amdgpu_device *adev,
return 0;
}
+static enum si_pcie_gen si_gen_pcie_gen_support(struct amdgpu_device *adev,
+ u32 sys_mask,
+ enum si_pcie_gen asic_gen,
+ enum si_pcie_gen default_gen)
+{
+ switch (asic_gen) {
+ case SI_PCIE_GEN1:
+ return SI_PCIE_GEN1;
+ case SI_PCIE_GEN2:
+ return SI_PCIE_GEN2;
+ case SI_PCIE_GEN3:
+ return SI_PCIE_GEN3;
+ default:
+ if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) &&
+ (default_gen == SI_PCIE_GEN3))
+ return SI_PCIE_GEN3;
+ else if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) &&
+ (default_gen == SI_PCIE_GEN2))
+ return SI_PCIE_GEN2;
+ else
+ return SI_PCIE_GEN1;
+ }
+ return SI_PCIE_GEN1;
+}
+
static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
SISLANDS_SMC_STATETABLE *table)
{
@@ -4989,10 +5063,10 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
&table->ACPIState.level.std_vddc);
}
table->ACPIState.level.gen2PCIE =
- (u8)amdgpu_get_pcie_gen_support(adev,
- si_pi->sys_pcie_mask,
- si_pi->boot_pcie_gen,
- AMDGPU_PCIE_GEN1);
+ (u8)si_gen_pcie_gen_support(adev,
+ si_pi->sys_pcie_mask,
+ si_pi->boot_pcie_gen,
+ SI_PCIE_GEN1);
if (si_pi->vddc_phase_shed_control)
si_populate_phase_shedding_value(adev,
@@ -5430,7 +5504,7 @@ static int si_convert_power_level_to_smc(struct amdgpu_device *adev,
bool gmc_pg = false;
if (eg_pi->pcie_performance_request &&
- (si_pi->force_pcie_gen != AMDGPU_PCIE_GEN_INVALID))
+ (si_pi->force_pcie_gen != SI_PCIE_GEN_INVALID))
level->gen2PCIE = (u8)si_pi->force_pcie_gen;
else
level->gen2PCIE = (u8)pl->pcie_gen;
@@ -6147,8 +6221,8 @@ static void si_enable_voltage_control(struct amdgpu_device *adev, bool enable)
WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN);
}
-static enum amdgpu_pcie_gen si_get_maximum_link_speed(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_state)
+static enum si_pcie_gen si_get_maximum_link_speed(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_state)
{
struct si_ps *state = si_get_ps(amdgpu_state);
int i;
@@ -6177,27 +6251,27 @@ static void si_request_link_speed_change_before_state_change(struct amdgpu_devic
struct amdgpu_ps *amdgpu_current_state)
{
struct si_power_info *si_pi = si_get_pi(adev);
- enum amdgpu_pcie_gen target_link_speed = si_get_maximum_link_speed(adev, amdgpu_new_state);
- enum amdgpu_pcie_gen current_link_speed;
+ enum si_pcie_gen target_link_speed = si_get_maximum_link_speed(adev, amdgpu_new_state);
+ enum si_pcie_gen current_link_speed;
- if (si_pi->force_pcie_gen == AMDGPU_PCIE_GEN_INVALID)
+ if (si_pi->force_pcie_gen == SI_PCIE_GEN_INVALID)
current_link_speed = si_get_maximum_link_speed(adev, amdgpu_current_state);
else
current_link_speed = si_pi->force_pcie_gen;
- si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
+ si_pi->force_pcie_gen = SI_PCIE_GEN_INVALID;
si_pi->pspp_notify_required = false;
if (target_link_speed > current_link_speed) {
switch (target_link_speed) {
#if defined(CONFIG_ACPI)
- case AMDGPU_PCIE_GEN3:
+ case SI_PCIE_GEN3:
if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
break;
- si_pi->force_pcie_gen = AMDGPU_PCIE_GEN2;
- if (current_link_speed == AMDGPU_PCIE_GEN2)
+ si_pi->force_pcie_gen = SI_PCIE_GEN2;
+ if (current_link_speed == SI_PCIE_GEN2)
break;
fallthrough;
- case AMDGPU_PCIE_GEN2:
+ case SI_PCIE_GEN2:
if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
break;
fallthrough;
@@ -6217,13 +6291,13 @@ static void si_notify_link_speed_change_after_state_change(struct amdgpu_device
struct amdgpu_ps *amdgpu_current_state)
{
struct si_power_info *si_pi = si_get_pi(adev);
- enum amdgpu_pcie_gen target_link_speed = si_get_maximum_link_speed(adev, amdgpu_new_state);
+ enum si_pcie_gen target_link_speed = si_get_maximum_link_speed(adev, amdgpu_new_state);
u8 request;
if (si_pi->pspp_notify_required) {
- if (target_link_speed == AMDGPU_PCIE_GEN3)
+ if (target_link_speed == SI_PCIE_GEN3)
request = PCIE_PERF_REQ_PECI_GEN3;
- else if (target_link_speed == AMDGPU_PCIE_GEN2)
+ else if (target_link_speed == SI_PCIE_GEN2)
request = PCIE_PERF_REQ_PECI_GEN2;
else
request = PCIE_PERF_REQ_PECI_GEN1;
@@ -6546,6 +6620,9 @@ static int si_dpm_get_fan_speed_pwm(void *handle,
u64 tmp64;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (!speed)
+ return -EINVAL;
+
if (adev->pm.no_fan)
return -ENOENT;
@@ -6596,10 +6673,13 @@ static int si_dpm_set_fan_speed_pwm(void *handle,
return 0;
}
-static void si_dpm_set_fan_control_mode(void *handle, u32 mode)
+static int si_dpm_set_fan_control_mode(void *handle, u32 mode)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (mode == U32_MAX)
+ return -EINVAL;
+
if (mode) {
/* stop auto-manage */
if (adev->pm.dpm.fan.ucode_fan_control)
@@ -6612,19 +6692,26 @@ static void si_dpm_set_fan_control_mode(void *handle, u32 mode)
else
si_fan_ctrl_set_default_mode(adev);
}
+
+ return 0;
}
-static u32 si_dpm_get_fan_control_mode(void *handle)
+static int si_dpm_get_fan_control_mode(void *handle, u32 *fan_mode)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct si_power_info *si_pi = si_get_pi(adev);
u32 tmp;
+ if (!fan_mode)
+ return -EINVAL;
+
if (si_pi->fan_is_controlled_by_smc)
return 0;
tmp = RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK;
- return (tmp >> FDO_PWM_MODE_SHIFT);
+ *fan_mode = (tmp >> FDO_PWM_MODE_SHIFT);
+
+ return 0;
}
#if 0
@@ -6864,7 +6951,7 @@ static int si_dpm_enable(struct amdgpu_device *adev)
si_enable_sclk_control(adev, true);
si_start_dpm(adev);
- si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
+ si_enable_auto_throttle_source(adev, SI_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
si_thermal_start_thermal_controller(adev);
ni_update_current_ps(adev, boot_ps);
@@ -6904,7 +6991,7 @@ static void si_dpm_disable(struct amdgpu_device *adev)
si_enable_power_containment(adev, boot_ps, false);
si_enable_smc_cac(adev, boot_ps, false);
si_enable_spread_spectrum(adev, false);
- si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
+ si_enable_auto_throttle_source(adev, SI_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
si_stop_dpm(adev);
si_reset_to_default(adev);
si_dpm_stop_smc(adev);
@@ -6946,10 +7033,7 @@ static int si_power_control_set_level(struct amdgpu_device *adev)
ret = si_resume_smc(adev);
if (ret)
return ret;
- ret = si_set_sw_state(adev);
- if (ret)
- return ret;
- return 0;
+ return si_set_sw_state(adev);
}
static void si_set_vce_clock(struct amdgpu_device *adev,
@@ -7148,10 +7232,10 @@ static void si_parse_pplib_clock_info(struct amdgpu_device *adev,
pl->vddc = le16_to_cpu(clock_info->si.usVDDC);
pl->vddci = le16_to_cpu(clock_info->si.usVDDCI);
pl->flags = le32_to_cpu(clock_info->si.ulFlags);
- pl->pcie_gen = amdgpu_get_pcie_gen_support(adev,
- si_pi->sys_pcie_mask,
- si_pi->boot_pcie_gen,
- clock_info->si.ucPCIEGen);
+ pl->pcie_gen = si_gen_pcie_gen_support(adev,
+ si_pi->sys_pcie_mask,
+ si_pi->boot_pcie_gen,
+ clock_info->si.ucPCIEGen);
/* patch up vddc if necessary */
ret = si_get_leakage_voltage_from_leakage_index(adev, pl->vddc,
@@ -7318,7 +7402,7 @@ static int si_dpm_init(struct amdgpu_device *adev)
si_pi->sys_pcie_mask =
adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK;
- si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
+ si_pi->force_pcie_gen = SI_PCIE_GEN_INVALID;
si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev);
si_set_max_cu_value(adev);
@@ -7713,21 +7797,18 @@ static int si_dpm_sw_init(void *handle)
return ret;
INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
- mutex_lock(&adev->pm.mutex);
ret = si_dpm_init(adev);
if (ret)
goto dpm_failed;
adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
if (amdgpu_dpm == 1)
amdgpu_pm_print_power_states(adev);
- mutex_unlock(&adev->pm.mutex);
DRM_INFO("amdgpu: dpm initialized\n");
return 0;
dpm_failed:
si_dpm_fini(adev);
- mutex_unlock(&adev->pm.mutex);
DRM_ERROR("amdgpu: dpm initialization failed\n");
return ret;
}
@@ -7738,9 +7819,7 @@ static int si_dpm_sw_fini(void *handle)
flush_work(&adev->pm.dpm.thermal.work);
- mutex_lock(&adev->pm.mutex);
si_dpm_fini(adev);
- mutex_unlock(&adev->pm.mutex);
return 0;
}
@@ -7754,15 +7833,13 @@ static int si_dpm_hw_init(void *handle)
if (!amdgpu_dpm)
return 0;
- mutex_lock(&adev->pm.mutex);
si_dpm_setup_asic(adev);
ret = si_dpm_enable(adev);
if (ret)
adev->pm.dpm_enabled = false;
else
adev->pm.dpm_enabled = true;
- mutex_unlock(&adev->pm.mutex);
- amdgpu_pm_compute_clocks(adev);
+ amdgpu_legacy_dpm_compute_clocks(adev);
return ret;
}
@@ -7770,11 +7847,8 @@ static int si_dpm_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (adev->pm.dpm_enabled) {
- mutex_lock(&adev->pm.mutex);
+ if (adev->pm.dpm_enabled)
si_dpm_disable(adev);
- mutex_unlock(&adev->pm.mutex);
- }
return 0;
}
@@ -7784,12 +7858,10 @@ static int si_dpm_suspend(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->pm.dpm_enabled) {
- mutex_lock(&adev->pm.mutex);
/* disable dpm */
si_dpm_disable(adev);
/* reset the power state */
adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
- mutex_unlock(&adev->pm.mutex);
}
return 0;
}
@@ -7801,16 +7873,14 @@ static int si_dpm_resume(void *handle)
if (adev->pm.dpm_enabled) {
/* asic init will reset to the boot state */
- mutex_lock(&adev->pm.mutex);
si_dpm_setup_asic(adev);
ret = si_dpm_enable(adev);
if (ret)
adev->pm.dpm_enabled = false;
else
adev->pm.dpm_enabled = true;
- mutex_unlock(&adev->pm.mutex);
if (adev->pm.dpm_enabled)
- amdgpu_pm_compute_clocks(adev);
+ amdgpu_legacy_dpm_compute_clocks(adev);
}
return 0;
}
@@ -8055,6 +8125,7 @@ static const struct amd_pm_funcs si_dpm_funcs = {
.print_power_state = &si_dpm_print_power_state,
.debugfs_print_current_performance_level = &si_dpm_debugfs_print_current_performance_level,
.force_performance_level = &si_dpm_force_performance_level,
+ .set_powergating_by_smu = &si_set_powergating_by_smu,
.vblank_too_short = &si_dpm_vblank_too_short,
.set_fan_control_mode = &si_dpm_set_fan_control_mode,
.get_fan_control_mode = &si_dpm_get_fan_control_mode,
@@ -8063,6 +8134,7 @@ static const struct amd_pm_funcs si_dpm_funcs = {
.check_state_equal = &si_check_state_equal,
.get_vce_clock_state = amdgpu_get_vce_clock_state,
.read_sensor = &si_dpm_read_sensor,
+ .pm_compute_clocks = amdgpu_legacy_dpm_compute_clocks,
};
static const struct amdgpu_irq_src_funcs si_dpm_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/pm/powerplay/si_dpm.h b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.h
index bc0be6818e21..11cb7874a6bb 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/si_dpm.h
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.h
@@ -595,13 +595,20 @@ struct rv7xx_power_info {
RV770_SMC_STATETABLE smc_statetable;
};
+enum si_pcie_gen {
+ SI_PCIE_GEN1 = 0,
+ SI_PCIE_GEN2 = 1,
+ SI_PCIE_GEN3 = 2,
+ SI_PCIE_GEN_INVALID = 0xffff
+};
+
struct rv7xx_pl {
u32 sclk;
u32 mclk;
u16 vddc;
u16 vddci; /* eg+ only */
u32 flags;
- enum amdgpu_pcie_gen pcie_gen; /* si+ only */
+ enum si_pcie_gen pcie_gen; /* si+ only */
};
struct rv7xx_ps {
@@ -967,9 +974,9 @@ struct si_power_info {
struct si_ulv_param ulv;
u32 max_cu;
/* pcie gen */
- enum amdgpu_pcie_gen force_pcie_gen;
- enum amdgpu_pcie_gen boot_pcie_gen;
- enum amdgpu_pcie_gen acpi_pcie_gen;
+ enum si_pcie_gen force_pcie_gen;
+ enum si_pcie_gen boot_pcie_gen;
+ enum si_pcie_gen acpi_pcie_gen;
u32 sys_pcie_mask;
/* flags */
bool enable_dte;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/si_smc.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c
index 8f994ffa9cd1..8f994ffa9cd1 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/si_smc.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c
diff --git a/drivers/gpu/drm/amd/pm/powerplay/sislands_smc.h b/drivers/gpu/drm/amd/pm/legacy-dpm/sislands_smc.h
index c7dc117a688c..c7dc117a688c 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/sislands_smc.h
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/sislands_smc.h
diff --git a/drivers/gpu/drm/amd/pm/powerplay/Makefile b/drivers/gpu/drm/amd/pm/powerplay/Makefile
index 0fb114adc79f..795a3624cbbf 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/Makefile
+++ b/drivers/gpu/drm/amd/pm/powerplay/Makefile
@@ -30,10 +30,6 @@ include $(AMD_POWERPLAY)
POWER_MGR-y = amd_powerplay.o
-POWER_MGR-$(CONFIG_DRM_AMDGPU_CIK)+= kv_dpm.o kv_smc.o
-
-POWER_MGR-$(CONFIG_DRM_AMDGPU_SI)+= si_dpm.o si_smc.o
-
AMD_PP_POWER = $(addprefix $(AMD_PP_PATH)/,$(POWER_MGR-y))
AMD_POWERPLAY_FILES += $(AMD_PP_POWER)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
index 3ab67b232cd4..a2da46bf3985 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
@@ -31,7 +31,8 @@
#include "power_state.h"
#include "amdgpu.h"
#include "hwmgr.h"
-
+#include "amdgpu_dpm_internal.h"
+#include "amdgpu_display.h"
static const struct amd_pm_funcs pp_dpm_funcs;
@@ -49,7 +50,6 @@ static int amd_powerplay_create(struct amdgpu_device *adev)
hwmgr->adev = adev;
hwmgr->not_vf = !amdgpu_sriov_vf(adev);
hwmgr->device = amdgpu_cgs_create_device(adev);
- mutex_init(&hwmgr->smu_lock);
mutex_init(&hwmgr->msg_lock);
hwmgr->chip_family = adev->family;
hwmgr->chip_id = adev->asic_type;
@@ -177,12 +177,9 @@ static int pp_late_init(void *handle)
struct amdgpu_device *adev = handle;
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
- if (hwmgr && hwmgr->pm_en) {
- mutex_lock(&hwmgr->smu_lock);
+ if (hwmgr && hwmgr->pm_en)
hwmgr_handle_task(hwmgr,
AMD_PP_TASK_COMPLETE_INIT, NULL);
- mutex_unlock(&hwmgr->smu_lock);
- }
if (adev->pm.smu_prv_buffer_size != 0)
pp_reserve_vram_for_smu(adev);
@@ -322,12 +319,6 @@ static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr,
if (*level & profile_mode_mask) {
hwmgr->saved_dpm_level = hwmgr->dpm_level;
hwmgr->en_umd_pstate = true;
- amdgpu_device_ip_set_powergating_state(hwmgr->adev,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_PG_STATE_UNGATE);
- amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_CG_STATE_UNGATE);
}
} else {
/* exit umd pstate, restore level, enable gfx cg*/
@@ -335,12 +326,6 @@ static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr,
if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
*level = hwmgr->saved_dpm_level;
hwmgr->en_umd_pstate = false;
- amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_CG_STATE_GATE);
- amdgpu_device_ip_set_powergating_state(hwmgr->adev,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_PG_STATE_GATE);
}
}
}
@@ -356,11 +341,9 @@ static int pp_dpm_force_performance_level(void *handle,
if (level == hwmgr->dpm_level)
return 0;
- mutex_lock(&hwmgr->smu_lock);
pp_dpm_en_umd_pstate(hwmgr, &level);
hwmgr->request_dpm_level = level;
hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
@@ -369,21 +352,16 @@ static enum amd_dpm_forced_level pp_dpm_get_performance_level(
void *handle)
{
struct pp_hwmgr *hwmgr = handle;
- enum amd_dpm_forced_level level;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
- level = hwmgr->dpm_level;
- mutex_unlock(&hwmgr->smu_lock);
- return level;
+ return hwmgr->dpm_level;
}
static uint32_t pp_dpm_get_sclk(void *handle, bool low)
{
struct pp_hwmgr *hwmgr = handle;
- uint32_t clk = 0;
if (!hwmgr || !hwmgr->pm_en)
return 0;
@@ -392,16 +370,12 @@ static uint32_t pp_dpm_get_sclk(void *handle, bool low)
pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
- mutex_lock(&hwmgr->smu_lock);
- clk = hwmgr->hwmgr_func->get_sclk(hwmgr, low);
- mutex_unlock(&hwmgr->smu_lock);
- return clk;
+ return hwmgr->hwmgr_func->get_sclk(hwmgr, low);
}
static uint32_t pp_dpm_get_mclk(void *handle, bool low)
{
struct pp_hwmgr *hwmgr = handle;
- uint32_t clk = 0;
if (!hwmgr || !hwmgr->pm_en)
return 0;
@@ -410,10 +384,7 @@ static uint32_t pp_dpm_get_mclk(void *handle, bool low)
pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
- mutex_lock(&hwmgr->smu_lock);
- clk = hwmgr->hwmgr_func->get_mclk(hwmgr, low);
- mutex_unlock(&hwmgr->smu_lock);
- return clk;
+ return hwmgr->hwmgr_func->get_mclk(hwmgr, low);
}
static void pp_dpm_powergate_vce(void *handle, bool gate)
@@ -427,9 +398,7 @@ static void pp_dpm_powergate_vce(void *handle, bool gate)
pr_info_ratelimited("%s was not implemented.\n", __func__);
return;
}
- mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
- mutex_unlock(&hwmgr->smu_lock);
}
static void pp_dpm_powergate_uvd(void *handle, bool gate)
@@ -443,25 +412,18 @@ static void pp_dpm_powergate_uvd(void *handle, bool gate)
pr_info_ratelimited("%s was not implemented.\n", __func__);
return;
}
- mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
- mutex_unlock(&hwmgr->smu_lock);
}
static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
enum amd_pm_state_type *user_state)
{
- int ret = 0;
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr_handle_task(hwmgr, task_id, user_state);
- mutex_unlock(&hwmgr->smu_lock);
-
- return ret;
+ return hwmgr_handle_task(hwmgr, task_id, user_state);
}
static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
@@ -473,8 +435,6 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
if (!hwmgr || !hwmgr->pm_en || !hwmgr->current_ps)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
-
state = hwmgr->current_ps;
switch (state->classification.ui_label) {
@@ -494,115 +454,107 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
pm_type = POWER_STATE_TYPE_DEFAULT;
break;
}
- mutex_unlock(&hwmgr->smu_lock);
return pm_type;
}
-static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
+static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
- return;
+ return -EOPNOTSUPP;
+
+ if (hwmgr->hwmgr_func->set_fan_control_mode == NULL)
+ return -EOPNOTSUPP;
+
+ if (mode == U32_MAX)
+ return -EINVAL;
- if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) {
- pr_info_ratelimited("%s was not implemented.\n", __func__);
- return;
- }
- mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
- mutex_unlock(&hwmgr->smu_lock);
+
+ return 0;
}
-static uint32_t pp_dpm_get_fan_control_mode(void *handle)
+static int pp_dpm_get_fan_control_mode(void *handle, uint32_t *fan_mode)
{
struct pp_hwmgr *hwmgr = handle;
- uint32_t mode = 0;
if (!hwmgr || !hwmgr->pm_en)
- return 0;
+ return -EOPNOTSUPP;
- if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) {
- pr_info_ratelimited("%s was not implemented.\n", __func__);
- return 0;
- }
- mutex_lock(&hwmgr->smu_lock);
- mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
- mutex_unlock(&hwmgr->smu_lock);
- return mode;
+ if (hwmgr->hwmgr_func->get_fan_control_mode == NULL)
+ return -EOPNOTSUPP;
+
+ if (!fan_mode)
+ return -EINVAL;
+
+ *fan_mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
+ return 0;
}
static int pp_dpm_set_fan_speed_pwm(void *handle, uint32_t speed)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
+ return -EOPNOTSUPP;
+
+ if (hwmgr->hwmgr_func->set_fan_speed_pwm == NULL)
+ return -EOPNOTSUPP;
+
+ if (speed == U32_MAX)
return -EINVAL;
- if (hwmgr->hwmgr_func->set_fan_speed_pwm == NULL) {
- pr_info_ratelimited("%s was not implemented.\n", __func__);
- return 0;
- }
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->set_fan_speed_pwm(hwmgr, speed);
- mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ return hwmgr->hwmgr_func->set_fan_speed_pwm(hwmgr, speed);
}
static int pp_dpm_get_fan_speed_pwm(void *handle, uint32_t *speed)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
- return -EINVAL;
+ return -EOPNOTSUPP;
- if (hwmgr->hwmgr_func->get_fan_speed_pwm == NULL) {
- pr_info_ratelimited("%s was not implemented.\n", __func__);
- return 0;
- }
+ if (hwmgr->hwmgr_func->get_fan_speed_pwm == NULL)
+ return -EOPNOTSUPP;
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->get_fan_speed_pwm(hwmgr, speed);
- mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ if (!speed)
+ return -EINVAL;
+
+ return hwmgr->hwmgr_func->get_fan_speed_pwm(hwmgr, speed);
}
static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
- return -EINVAL;
+ return -EOPNOTSUPP;
if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
+ return -EOPNOTSUPP;
+
+ if (!rpm)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
- mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ return hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
}
static int pp_dpm_set_fan_speed_rpm(void *handle, uint32_t rpm)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
+ return -EOPNOTSUPP;
+
+ if (hwmgr->hwmgr_func->set_fan_speed_rpm == NULL)
+ return -EOPNOTSUPP;
+
+ if (rpm == U32_MAX)
return -EINVAL;
- if (hwmgr->hwmgr_func->set_fan_speed_rpm == NULL) {
- pr_info_ratelimited("%s was not implemented.\n", __func__);
- return 0;
- }
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->set_fan_speed_rpm(hwmgr, rpm);
- mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ return hwmgr->hwmgr_func->set_fan_speed_rpm(hwmgr, rpm);
}
static int pp_dpm_get_pp_num_states(void *handle,
@@ -616,8 +568,6 @@ static int pp_dpm_get_pp_num_states(void *handle,
if (!hwmgr || !hwmgr->pm_en ||!hwmgr->ps)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
-
data->nums = hwmgr->num_ps;
for (i = 0; i < hwmgr->num_ps; i++) {
@@ -640,23 +590,18 @@ static int pp_dpm_get_pp_num_states(void *handle,
data->states[i] = POWER_STATE_TYPE_DEFAULT;
}
}
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
static int pp_dpm_get_pp_table(void *handle, char **table)
{
struct pp_hwmgr *hwmgr = handle;
- int size = 0;
if (!hwmgr || !hwmgr->pm_en ||!hwmgr->soft_pp_table)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
*table = (char *)hwmgr->soft_pp_table;
- size = hwmgr->soft_pp_table_size;
- mutex_unlock(&hwmgr->smu_lock);
- return size;
+ return hwmgr->soft_pp_table_size;
}
static int amd_powerplay_reset(void *handle)
@@ -683,13 +628,12 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
if (!hwmgr->hardcode_pp_table) {
hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
hwmgr->soft_pp_table_size,
GFP_KERNEL);
if (!hwmgr->hardcode_pp_table)
- goto err;
+ return ret;
}
memcpy(hwmgr->hardcode_pp_table, buf, size);
@@ -698,17 +642,11 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
ret = amd_powerplay_reset(handle);
if (ret)
- goto err;
+ return ret;
- if (hwmgr->hwmgr_func->avfs_control) {
+ if (hwmgr->hwmgr_func->avfs_control)
ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false);
- if (ret)
- goto err;
- }
- mutex_unlock(&hwmgr->smu_lock);
- return 0;
-err:
- mutex_unlock(&hwmgr->smu_lock);
+
return ret;
}
@@ -716,7 +654,6 @@ static int pp_dpm_force_clock_level(void *handle,
enum pp_clock_type type, uint32_t mask)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
@@ -731,17 +668,13 @@ static int pp_dpm_force_clock_level(void *handle,
return -EINVAL;
}
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
- mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ return hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
}
static int pp_dpm_print_clock_levels(void *handle,
enum pp_clock_type type, char *buf)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
@@ -750,16 +683,12 @@ static int pp_dpm_print_clock_levels(void *handle,
pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
- mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ return hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
}
static int pp_dpm_get_sclk_od(void *handle)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
@@ -768,16 +697,12 @@ static int pp_dpm_get_sclk_od(void *handle)
pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->get_sclk_od(hwmgr);
- mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ return hwmgr->hwmgr_func->get_sclk_od(hwmgr);
}
static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
@@ -787,16 +712,12 @@ static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
return 0;
}
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
- mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ return hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
}
static int pp_dpm_get_mclk_od(void *handle)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
@@ -805,16 +726,12 @@ static int pp_dpm_get_mclk_od(void *handle)
pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->get_mclk_od(hwmgr);
- mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ return hwmgr->hwmgr_func->get_mclk_od(hwmgr);
}
static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
@@ -823,17 +740,13 @@ static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
- mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ return hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
}
static int pp_dpm_read_sensor(void *handle, int idx,
void *value, int *size)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en || !value)
return -EINVAL;
@@ -852,10 +765,7 @@ static int pp_dpm_read_sensor(void *handle, int idx,
*((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
return 0;
default:
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
- mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ return hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
}
}
@@ -875,36 +785,28 @@ pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
static int pp_get_power_profile_mode(void *handle, char *buf)
{
struct pp_hwmgr *hwmgr = handle;
- int ret;
if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->get_power_profile_mode)
return -EOPNOTSUPP;
if (!buf)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf);
- mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf);
}
static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = -EOPNOTSUPP;
if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->set_power_profile_mode)
- return ret;
+ return -EOPNOTSUPP;
if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
pr_debug("power profile setting is for manual dpm mode only.\n");
return -EINVAL;
}
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
- mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ return hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
}
static int pp_set_fine_grain_clk_vol(void *handle, uint32_t type, long *input, uint32_t size)
@@ -969,8 +871,6 @@ static int pp_dpm_switch_power_profile(void *handle,
if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
-
if (!en) {
hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]);
index = fls(hwmgr->workload_mask);
@@ -985,15 +885,12 @@ static int pp_dpm_switch_power_profile(void *handle,
if (type == PP_SMC_POWER_PROFILE_COMPUTE &&
hwmgr->hwmgr_func->disable_power_features_for_compute_performance) {
- if (hwmgr->hwmgr_func->disable_power_features_for_compute_performance(hwmgr, en)) {
- mutex_unlock(&hwmgr->smu_lock);
+ if (hwmgr->hwmgr_func->disable_power_features_for_compute_performance(hwmgr, en))
return -EINVAL;
- }
}
if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
@@ -1023,10 +920,8 @@ static int pp_set_power_limit(void *handle, uint32_t limit)
if (limit > max_power_limit)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
hwmgr->power_limit = limit;
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
@@ -1043,8 +938,6 @@ static int pp_get_power_limit(void *handle, uint32_t *limit,
if (power_type != PP_PWR_TYPE_SUSTAINED)
return -EOPNOTSUPP;
- mutex_lock(&hwmgr->smu_lock);
-
switch (pp_limit_level) {
case PP_PWR_LIMIT_CURRENT:
*limit = hwmgr->power_limit;
@@ -1064,8 +957,6 @@ static int pp_get_power_limit(void *handle, uint32_t *limit,
break;
}
- mutex_unlock(&hwmgr->smu_lock);
-
return ret;
}
@@ -1077,9 +968,7 @@ static int pp_display_configuration_change(void *handle,
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
phm_store_dal_configuration_data(hwmgr, display_config);
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
@@ -1087,15 +976,11 @@ static int pp_get_display_power_level(void *handle,
struct amd_pp_simple_clock_info *output)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en ||!output)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
- ret = phm_get_dal_power_level(hwmgr, output);
- mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ return phm_get_dal_power_level(hwmgr, output);
}
static int pp_get_current_clocks(void *handle,
@@ -1109,8 +994,6 @@ static int pp_get_current_clocks(void *handle,
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
-
phm_get_dal_power_level(hwmgr, &simple_clocks);
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -1123,7 +1006,6 @@ static int pp_get_current_clocks(void *handle,
if (ret) {
pr_debug("Error in phm_get_clock_info \n");
- mutex_unlock(&hwmgr->smu_lock);
return -EINVAL;
}
@@ -1146,14 +1028,12 @@ static int pp_get_current_clocks(void *handle,
clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
}
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
@@ -1161,10 +1041,7 @@ static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struc
if (clocks == NULL)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
- ret = phm_get_clock_by_type(hwmgr, type, clocks);
- mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ return phm_get_clock_by_type(hwmgr, type, clocks);
}
static int pp_get_clock_by_type_with_latency(void *handle,
@@ -1172,15 +1049,11 @@ static int pp_get_clock_by_type_with_latency(void *handle,
struct pp_clock_levels_with_latency *clocks)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en ||!clocks)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
- ret = phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
- mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ return phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
}
static int pp_get_clock_by_type_with_voltage(void *handle,
@@ -1188,50 +1061,34 @@ static int pp_get_clock_by_type_with_voltage(void *handle,
struct pp_clock_levels_with_voltage *clocks)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en ||!clocks)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
-
- ret = phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
-
- mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ return phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
}
static int pp_set_watermarks_for_clocks_ranges(void *handle,
void *clock_ranges)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en || !clock_ranges)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
- ret = phm_set_watermarks_for_clocks_ranges(hwmgr,
- clock_ranges);
- mutex_unlock(&hwmgr->smu_lock);
-
- return ret;
+ return phm_set_watermarks_for_clocks_ranges(hwmgr,
+ clock_ranges);
}
static int pp_display_clock_voltage_request(void *handle,
struct pp_display_clock_request *clock)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en ||!clock)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
- ret = phm_display_clock_voltage_request(hwmgr, clock);
- mutex_unlock(&hwmgr->smu_lock);
-
- return ret;
+ return phm_display_clock_voltage_request(hwmgr, clock);
}
static int pp_get_display_mode_validation_clocks(void *handle,
@@ -1245,12 +1102,9 @@ static int pp_get_display_mode_validation_clocks(void *handle,
clocks->level = PP_DAL_POWERLEVEL_7;
- mutex_lock(&hwmgr->smu_lock);
-
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
ret = phm_get_max_high_clocks(hwmgr, clocks);
- mutex_unlock(&hwmgr->smu_lock);
return ret;
}
@@ -1362,9 +1216,7 @@ static int pp_notify_smu_enable_pwe(void *handle)
return -EINVAL;
}
- mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->smus_notify_pwe(hwmgr);
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
@@ -1380,9 +1232,7 @@ static int pp_enable_mgpu_fan_boost(void *handle)
hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL)
return 0;
- mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->enable_mgpu_fan_boost(hwmgr);
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
@@ -1399,9 +1249,7 @@ static int pp_set_min_deep_sleep_dcefclk(void *handle, uint32_t clock)
return -EINVAL;
}
- mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, clock);
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
@@ -1418,9 +1266,7 @@ static int pp_set_hard_min_dcefclk_by_freq(void *handle, uint32_t clock)
return -EINVAL;
}
- mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq(hwmgr, clock);
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
@@ -1437,9 +1283,7 @@ static int pp_set_hard_min_fclk_by_freq(void *handle, uint32_t clock)
return -EINVAL;
}
- mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->set_hard_min_fclk_by_freq(hwmgr, clock);
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
@@ -1447,16 +1291,11 @@ static int pp_set_hard_min_fclk_by_freq(void *handle, uint32_t clock)
static int pp_set_active_display_count(void *handle, uint32_t count)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
- ret = phm_set_active_display_count(hwmgr, count);
- mutex_unlock(&hwmgr->smu_lock);
-
- return ret;
+ return phm_set_active_display_count(hwmgr, count);
}
static int pp_get_asic_baco_capability(void *handle, bool *cap)
@@ -1471,9 +1310,7 @@ static int pp_get_asic_baco_capability(void *handle, bool *cap)
!hwmgr->hwmgr_func->get_asic_baco_capability)
return 0;
- mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->get_asic_baco_capability(hwmgr, cap);
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
@@ -1488,9 +1325,7 @@ static int pp_get_asic_baco_state(void *handle, int *state)
if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_state)
return 0;
- mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->get_asic_baco_state(hwmgr, (enum BACO_STATE *)state);
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
@@ -1506,9 +1341,7 @@ static int pp_set_asic_baco_state(void *handle, int state)
!hwmgr->hwmgr_func->set_asic_baco_state)
return 0;
- mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->set_asic_baco_state(hwmgr, (enum BACO_STATE)state);
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
@@ -1516,7 +1349,6 @@ static int pp_set_asic_baco_state(void *handle, int state)
static int pp_get_ppfeature_status(void *handle, char *buf)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en || !buf)
return -EINVAL;
@@ -1526,17 +1358,12 @@ static int pp_get_ppfeature_status(void *handle, char *buf)
return -EINVAL;
}
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->get_ppfeature_status(hwmgr, buf);
- mutex_unlock(&hwmgr->smu_lock);
-
- return ret;
+ return hwmgr->hwmgr_func->get_ppfeature_status(hwmgr, buf);
}
static int pp_set_ppfeature_status(void *handle, uint64_t ppfeature_masks)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
@@ -1546,17 +1373,12 @@ static int pp_set_ppfeature_status(void *handle, uint64_t ppfeature_masks)
return -EINVAL;
}
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->set_ppfeature_status(hwmgr, ppfeature_masks);
- mutex_unlock(&hwmgr->smu_lock);
-
- return ret;
+ return hwmgr->hwmgr_func->set_ppfeature_status(hwmgr, ppfeature_masks);
}
static int pp_asic_reset_mode_2(void *handle)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
@@ -1566,17 +1388,12 @@ static int pp_asic_reset_mode_2(void *handle)
return -EINVAL;
}
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->asic_reset(hwmgr, SMU_ASIC_RESET_MODE_2);
- mutex_unlock(&hwmgr->smu_lock);
-
- return ret;
+ return hwmgr->hwmgr_func->asic_reset(hwmgr, SMU_ASIC_RESET_MODE_2);
}
static int pp_smu_i2c_bus_access(void *handle, bool acquire)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
@@ -1586,11 +1403,7 @@ static int pp_smu_i2c_bus_access(void *handle, bool acquire)
return -EINVAL;
}
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->smu_i2c_bus_access(hwmgr, acquire);
- mutex_unlock(&hwmgr->smu_lock);
-
- return ret;
+ return hwmgr->hwmgr_func->smu_i2c_bus_access(hwmgr, acquire);
}
static int pp_set_df_cstate(void *handle, enum pp_df_cstate state)
@@ -1603,9 +1416,7 @@ static int pp_set_df_cstate(void *handle, enum pp_df_cstate state)
if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_df_cstate)
return 0;
- mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->set_df_cstate(hwmgr, state);
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
@@ -1620,9 +1431,7 @@ static int pp_set_xgmi_pstate(void *handle, uint32_t pstate)
if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_xgmi_pstate)
return 0;
- mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->set_xgmi_pstate(hwmgr, pstate);
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
@@ -1630,7 +1439,6 @@ static int pp_set_xgmi_pstate(void *handle, uint32_t pstate)
static ssize_t pp_get_gpu_metrics(void *handle, void **table)
{
struct pp_hwmgr *hwmgr = handle;
- ssize_t size;
if (!hwmgr)
return -EINVAL;
@@ -1638,11 +1446,7 @@ static ssize_t pp_get_gpu_metrics(void *handle, void **table)
if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_gpu_metrics)
return -EOPNOTSUPP;
- mutex_lock(&hwmgr->smu_lock);
- size = hwmgr->hwmgr_func->get_gpu_metrics(hwmgr, table);
- mutex_unlock(&hwmgr->smu_lock);
-
- return size;
+ return hwmgr->hwmgr_func->get_gpu_metrics(hwmgr, table);
}
static int pp_gfx_state_change_set(void *handle, uint32_t state)
@@ -1657,9 +1461,7 @@ static int pp_gfx_state_change_set(void *handle, uint32_t state)
return -EINVAL;
}
- mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->gfx_state_change(hwmgr, state);
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
@@ -1673,16 +1475,49 @@ static int pp_get_prv_buffer_details(void *handle, void **addr, size_t *size)
*addr = NULL;
*size = 0;
- mutex_lock(&hwmgr->smu_lock);
if (adev->pm.smu_prv_buffer) {
amdgpu_bo_kmap(adev->pm.smu_prv_buffer, addr);
*size = adev->pm.smu_prv_buffer_size;
}
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
+static void pp_pm_compute_clocks(void *handle)
+{
+ struct pp_hwmgr *hwmgr = handle;
+ struct amdgpu_device *adev = hwmgr->adev;
+ int i = 0;
+
+ if (adev->mode_info.num_crtc)
+ amdgpu_display_bandwidth_update(adev);
+
+ for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+ struct amdgpu_ring *ring = adev->rings[i];
+ if (ring && ring->sched.ready)
+ amdgpu_fence_wait_empty(ring);
+ }
+
+ if (!amdgpu_device_has_dc_support(adev)) {
+ amdgpu_dpm_get_active_displays(adev);
+ adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
+ adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
+ adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
+ /* we have issues with mclk switching with
+ * refresh rates over 120 hz on the non-DC code.
+ */
+ if (adev->pm.pm_display_cfg.vrefresh > 120)
+ adev->pm.pm_display_cfg.min_vblank_time = 0;
+
+ pp_display_configuration_change(handle,
+ &adev->pm.pm_display_cfg);
+ }
+
+ pp_dpm_dispatch_tasks(handle,
+ AMD_PP_TASK_DISPLAY_CONFIG_CHANGE,
+ NULL);
+}
+
static const struct amd_pm_funcs pp_dpm_funcs = {
.load_firmware = pp_dpm_load_fw,
.wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
@@ -1747,4 +1582,5 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
.get_gpu_metrics = pp_get_gpu_metrics,
.gfx_state_change_set = pp_gfx_state_change_set,
.get_smu_prv_buf_details = pp_get_prv_buffer_details,
+ .pm_compute_clocks = pp_pm_compute_clocks,
};
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
index cd99db0dc2be..e4fcbf8a7eb5 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
@@ -2109,7 +2109,7 @@ static void smu7_patch_ppt_v1_with_vdd_leakage(struct pp_hwmgr *hwmgr,
}
if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
- pr_err("Voltage value looks like a Leakage ID but it's not patched \n");
+ pr_info("Voltage value looks like a Leakage ID but it's not patched\n");
}
/**
@@ -2592,7 +2592,7 @@ static void smu7_patch_ppt_v0_with_vdd_leakage(struct pp_hwmgr *hwmgr,
}
if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
- pr_err("Voltage value looks like a Leakage ID but it's not patched \n");
+ pr_info("Voltage value looks like a Leakage ID but it's not patched\n");
}
@@ -3295,10 +3295,6 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
request_ps->classification.ui_label);
data->mclk_ignore_signal = false;
- PP_ASSERT_WITH_CODE(smu7_ps->performance_level_count == 2,
- "VI should always have 2 performance levels",
- );
-
max_limits = adev->pm.ac_power ?
&(hwmgr->dyn_state.max_clock_voltage_on_ac) :
&(hwmgr->dyn_state.max_clock_voltage_on_dc);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
index 03bf8f069222..b50fd4a4a3d1 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
@@ -1950,9 +1950,12 @@ static void smu8_dpm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate)
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerON, NULL);
}
+#define WIDTH_4K 3840
+
static void smu8_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
{
struct smu8_hwmgr *data = hwmgr->backend;
+ struct amdgpu_device *adev = hwmgr->adev;
data->uvd_power_gated = bgate;
@@ -1976,6 +1979,12 @@ static void smu8_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
smu8_dpm_update_uvd_dpm(hwmgr, false);
}
+ /* enable/disable Low Memory PState for UVD (4k videos) */
+ if (adev->asic_type == CHIP_STONEY &&
+ adev->uvd.decode_image_width >= WIDTH_4K)
+ smu8_nbdpm_pstate_enable_disable(hwmgr,
+ bgate,
+ true);
}
static void smu8_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
@@ -2037,7 +2046,6 @@ static const struct pp_hwmgr_func smu8_hwmgr_funcs = {
.power_state_set = smu8_set_power_state_tasks,
.dynamic_state_management_disable = smu8_disable_dpm_tasks,
.notify_cac_buffer_info = smu8_notify_cac_buffer_info,
- .update_nbdpm_pstate = smu8_nbdpm_pstate_enable_disable,
.get_thermal_temperature_range = smu8_get_thermal_temperature_range,
};
diff --git a/drivers/gpu/drm/amd/pm/inc/amd_powerplay.h b/drivers/gpu/drm/amd/pm/powerplay/inc/amd_powerplay.h
index fe3665965416..fe3665965416 100644
--- a/drivers/gpu/drm/amd/pm/inc/amd_powerplay.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/amd_powerplay.h
diff --git a/drivers/gpu/drm/amd/pm/inc/cz_ppsmc.h b/drivers/gpu/drm/amd/pm/powerplay/inc/cz_ppsmc.h
index 9b698780aed8..9b698780aed8 100644
--- a/drivers/gpu/drm/amd/pm/inc/cz_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/cz_ppsmc.h
diff --git a/drivers/gpu/drm/amd/pm/inc/fiji_ppsmc.h b/drivers/gpu/drm/amd/pm/powerplay/inc/fiji_ppsmc.h
index 7ae494569a60..7ae494569a60 100644
--- a/drivers/gpu/drm/amd/pm/inc/fiji_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/fiji_ppsmc.h
diff --git a/drivers/gpu/drm/amd/pm/inc/hardwaremanager.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h
index 6e0be6027705..6e0be6027705 100644
--- a/drivers/gpu/drm/amd/pm/inc/hardwaremanager.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h
diff --git a/drivers/gpu/drm/amd/pm/inc/hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
index 8ed01071fe5a..4f7f2f455301 100644
--- a/drivers/gpu/drm/amd/pm/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
@@ -331,9 +331,6 @@ struct pp_hwmgr_func {
uint32_t mc_addr_low,
uint32_t mc_addr_hi,
uint32_t size);
- int (*update_nbdpm_pstate)(struct pp_hwmgr *hwmgr,
- bool enable,
- bool lock);
int (*get_thermal_temperature_range)(struct pp_hwmgr *hwmgr,
struct PP_TemperatureRange *range);
int (*get_power_profile_mode)(struct pp_hwmgr *hwmgr, char *buf);
@@ -751,7 +748,6 @@ struct pp_hwmgr {
bool not_vf;
bool pm_en;
bool pp_one_vf;
- struct mutex smu_lock;
struct mutex msg_lock;
uint32_t pp_table_version;
diff --git a/drivers/gpu/drm/amd/pm/inc/polaris10_pwrvirus.h b/drivers/gpu/drm/amd/pm/powerplay/inc/polaris10_pwrvirus.h
index 6a53b7e74ccd..6a53b7e74ccd 100644
--- a/drivers/gpu/drm/amd/pm/inc/polaris10_pwrvirus.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/polaris10_pwrvirus.h
diff --git a/drivers/gpu/drm/amd/pm/inc/power_state.h b/drivers/gpu/drm/amd/pm/powerplay/inc/power_state.h
index a5f2227a3971..a5f2227a3971 100644
--- a/drivers/gpu/drm/amd/pm/inc/power_state.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/power_state.h
diff --git a/drivers/gpu/drm/amd/pm/inc/pp_debug.h b/drivers/gpu/drm/amd/pm/powerplay/inc/pp_debug.h
index cea65093b6ad..cea65093b6ad 100644
--- a/drivers/gpu/drm/amd/pm/inc/pp_debug.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/pp_debug.h
diff --git a/drivers/gpu/drm/amd/pm/inc/pp_endian.h b/drivers/gpu/drm/amd/pm/powerplay/inc/pp_endian.h
index f49d1963fe85..f49d1963fe85 100644
--- a/drivers/gpu/drm/amd/pm/inc/pp_endian.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/pp_endian.h
diff --git a/drivers/gpu/drm/amd/pm/inc/pp_thermal.h b/drivers/gpu/drm/amd/pm/powerplay/inc/pp_thermal.h
index f7c41185097e..f7c41185097e 100644
--- a/drivers/gpu/drm/amd/pm/inc/pp_thermal.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/pp_thermal.h
diff --git a/drivers/gpu/drm/amd/pm/inc/ppinterrupt.h b/drivers/gpu/drm/amd/pm/powerplay/inc/ppinterrupt.h
index c067e0925b6b..c067e0925b6b 100644
--- a/drivers/gpu/drm/amd/pm/inc/ppinterrupt.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/ppinterrupt.h
diff --git a/drivers/gpu/drm/amd/pm/inc/rv_ppsmc.h b/drivers/gpu/drm/amd/pm/powerplay/inc/rv_ppsmc.h
index 171f12b82716..171f12b82716 100644
--- a/drivers/gpu/drm/amd/pm/inc/rv_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/rv_ppsmc.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu10.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu10.h
index 9e837a5014c5..9e837a5014c5 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu10.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu10.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu10_driver_if.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu10_driver_if.h
index c498158771cc..c498158771cc 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu10_driver_if.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu10_driver_if.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu11_driver_if.h
index fdc6b7a57bc9..fdc6b7a57bc9 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu11_driver_if.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu7.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu7.h
index e14072d45918..e14072d45918 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu7.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu7.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu71.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu71.h
index 71c9b2d28640..71c9b2d28640 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu71.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu71.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu71_discrete.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu71_discrete.h
index c0e3936d5c2e..c0e3936d5c2e 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu71_discrete.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu71_discrete.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu72.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu72.h
index 9ad1cefff79f..9ad1cefff79f 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu72.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu72.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu72_discrete.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu72_discrete.h
index 2aefbb85f620..2aefbb85f620 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu72_discrete.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu72_discrete.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu73.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu73.h
index c6b12a4c00db..c6b12a4c00db 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu73.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu73.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu73_discrete.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu73_discrete.h
index 5916be08a7fe..5916be08a7fe 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu73_discrete.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu73_discrete.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu74.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu74.h
index fd10a9fa843d..fd10a9fa843d 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu74.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu74.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu74_discrete.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu74_discrete.h
index 350889e408d2..350889e408d2 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu74_discrete.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu74_discrete.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu75.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu75.h
index 771523001533..771523001533 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu75.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu75.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu75_discrete.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu75_discrete.h
index b64e58a22ddf..b64e58a22ddf 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu75_discrete.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu75_discrete.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu7_common.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu7_common.h
index 94bf7b649c20..94bf7b649c20 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu7_common.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu7_common.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu7_discrete.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu7_discrete.h
index ee876745dd12..ee876745dd12 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu7_discrete.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu7_discrete.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu7_fusion.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu7_fusion.h
index 78ada9ffd508..78ada9ffd508 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu7_fusion.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu7_fusion.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu7_ppsmc.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu7_ppsmc.h
index a0a38b8a4b1b..a0a38b8a4b1b 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu7_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu7_ppsmc.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu8.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu8.h
index d758d07b6a31..d758d07b6a31 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu8.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu8.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu8_fusion.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu8_fusion.h
index 0c37c94e9414..0c37c94e9414 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu8_fusion.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu8_fusion.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu9.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu9.h
index 70ac4d477be2..70ac4d477be2 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu9.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu9.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu9_driver_if.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu9_driver_if.h
index 2818c98ff5ca..2818c98ff5ca 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu9_driver_if.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu9_driver_if.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_ucode_xfer_cz.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu_ucode_xfer_cz.h
index eb0f79f9c876..701aae598b58 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_ucode_xfer_cz.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu_ucode_xfer_cz.h
@@ -121,7 +121,7 @@ typedef struct SMU_Task SMU_Task;
struct TOC {
uint8_t JobList[NUM_JOBLIST_ENTRIES];
- SMU_Task tasks[1];
+ SMU_Task tasks[];
};
// META DATA COMMAND Definitions
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_ucode_xfer_vi.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu_ucode_xfer_vi.h
index 880152c0f775..880152c0f775 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_ucode_xfer_vi.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu_ucode_xfer_vi.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smumgr.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smumgr.h
index 5f46f1a4f38e..5f46f1a4f38e 100644
--- a/drivers/gpu/drm/amd/pm/inc/smumgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smumgr.h
diff --git a/drivers/gpu/drm/amd/pm/inc/tonga_ppsmc.h b/drivers/gpu/drm/amd/pm/powerplay/inc/tonga_ppsmc.h
index 63631296d751..63631296d751 100644
--- a/drivers/gpu/drm/amd/pm/inc/tonga_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/tonga_ppsmc.h
diff --git a/drivers/gpu/drm/amd/pm/inc/vega10_ppsmc.h b/drivers/gpu/drm/amd/pm/powerplay/inc/vega10_ppsmc.h
index 715b5a168831..715b5a168831 100644
--- a/drivers/gpu/drm/amd/pm/inc/vega10_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/vega10_ppsmc.h
diff --git a/drivers/gpu/drm/amd/pm/inc/vega12/smu9_driver_if.h b/drivers/gpu/drm/amd/pm/powerplay/inc/vega12/smu9_driver_if.h
index b6ffd08784e7..b6ffd08784e7 100644
--- a/drivers/gpu/drm/amd/pm/inc/vega12/smu9_driver_if.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/vega12/smu9_driver_if.h
diff --git a/drivers/gpu/drm/amd/pm/inc/vega12_ppsmc.h b/drivers/gpu/drm/amd/pm/powerplay/inc/vega12_ppsmc.h
index f985c78d746a..f985c78d746a 100644
--- a/drivers/gpu/drm/amd/pm/inc/vega12_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/vega12_ppsmc.h
diff --git a/drivers/gpu/drm/amd/pm/inc/vega20_ppsmc.h b/drivers/gpu/drm/amd/pm/powerplay/inc/vega20_ppsmc.h
index 0c66f0fe1aaf..0c66f0fe1aaf 100644
--- a/drivers/gpu/drm/amd/pm/inc/vega20_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/vega20_ppsmc.h
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
index 93a1c7248e26..5ca3c422f7d4 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
@@ -208,6 +208,7 @@ static int ci_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr,
static int ci_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
{
+ struct amdgpu_device *adev = hwmgr->adev;
int ret;
cgs_write_register(hwmgr->device, mmSMC_RESP_0, 0);
@@ -218,7 +219,8 @@ static int ci_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP);
if (ret != 1)
- pr_info("\n failed to send message %x ret is %d\n", msg, ret);
+ dev_info(adev->dev,
+ "failed to send message %x ret is %d\n", msg,ret);
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c
index 47b34c6ca924..88a5641465dc 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c
@@ -87,7 +87,7 @@ static int smu10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
smu10_send_msg_to_smc_without_waiting(hwmgr, msg);
if (smu10_wait_for_response(hwmgr) == 0)
- printk("Failed to send Message %x.\n", msg);
+ dev_err(adev->dev, "Failed to send Message %x.\n", msg);
return 0;
}
@@ -108,7 +108,7 @@ static int smu10_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
if (smu10_wait_for_response(hwmgr) == 0)
- printk("Failed to send Message %x.\n", msg);
+ dev_err(adev->dev, "Failed to send Message %x.\n", msg);
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c
index aae25243eb10..5a010cd38303 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c
@@ -165,6 +165,7 @@ bool smu7_is_smc_ram_running(struct pp_hwmgr *hwmgr)
int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
{
+ struct amdgpu_device *adev = hwmgr->adev;
int ret;
PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
@@ -172,9 +173,10 @@ int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP);
if (ret == 0xFE)
- pr_debug("last message was not supported\n");
+ dev_dbg(adev->dev, "last message was not supported\n");
else if (ret != 1)
- pr_info("\n last message was failed ret is %d\n", ret);
+ dev_info(adev->dev,
+ "\nlast message was failed ret is %d\n", ret);
cgs_write_register(hwmgr->device, mmSMC_RESP_0, 0);
cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg);
@@ -184,9 +186,10 @@ int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP);
if (ret == 0xFE)
- pr_debug("message %x was not supported\n", msg);
+ dev_dbg(adev->dev, "message %x was not supported\n", msg);
else if (ret != 1)
- pr_info("\n failed to send message %x ret is %d \n", msg, ret);
+ dev_dbg(adev->dev,
+ "failed to send message %x ret is %d \n", msg, ret);
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu9_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu9_smumgr.c
index 23e5de3c4ec1..8c9bf4940dc1 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu9_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu9_smumgr.c
@@ -126,7 +126,7 @@ int smu9_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
ret = smu9_wait_for_response(hwmgr);
if (ret != 1)
- pr_err("Failed to send message: 0x%x, ret value: 0x%x\n", msg, ret);
+ dev_err(adev->dev, "Failed to send message: 0x%x, ret value: 0x%x\n", msg, ret);
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c
index 741fbc87467f..a5c95b180672 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c
@@ -115,7 +115,7 @@ static int vega20_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
ret = vega20_wait_for_response(hwmgr);
if (ret != PPSMC_Result_OK)
- pr_err("Failed to send message 0x%x, response 0x%x\n", msg, ret);
+ dev_err(adev->dev, "Failed to send message 0x%x, response 0x%x\n", msg, ret);
return (ret == PPSMC_Result_OK) ? 0 : -EIO;
}
@@ -143,7 +143,7 @@ static int vega20_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
ret = vega20_wait_for_response(hwmgr);
if (ret != PPSMC_Result_OK)
- pr_err("Failed to send message 0x%x, response 0x%x\n", msg, ret);
+ dev_err(adev->dev, "Failed to send message 0x%x, response 0x%x\n", msg, ret);
return (ret == PPSMC_Result_OK) ? 0 : -EIO;
}
@@ -520,7 +520,7 @@ static int vega20_smu_init(struct pp_hwmgr *hwmgr)
priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].version = 0x01;
priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size = sizeof(DpmActivityMonitorCoeffInt_t);
- ret = smu_v11_0_i2c_control_init(&adev->pm.smu_i2c);
+ ret = smu_v11_0_i2c_control_init(adev);
if (ret)
goto err4;
@@ -558,7 +558,7 @@ static int vega20_smu_fini(struct pp_hwmgr *hwmgr)
(struct vega20_smumgr *)(hwmgr->smu_backend);
struct amdgpu_device *adev = hwmgr->adev;
- smu_v11_0_i2c_control_fini(&adev->pm.smu_i2c);
+ smu_v11_0_i2c_control_fini(adev);
if (priv) {
amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PPTABLE].handle,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index d93d28c1af95..cd22f15e8707 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -55,11 +55,10 @@ static int smu_force_smuclk_levels(struct smu_context *smu,
uint32_t mask);
static int smu_handle_task(struct smu_context *smu,
enum amd_dpm_forced_level level,
- enum amd_pp_task task_id,
- bool lock_needed);
+ enum amd_pp_task task_id);
static int smu_reset(struct smu_context *smu);
static int smu_set_fan_speed_pwm(void *handle, u32 speed);
-static int smu_set_fan_control_mode(struct smu_context *smu, int value);
+static int smu_set_fan_control_mode(void *handle, u32 value);
static int smu_set_power_limit(void *handle, uint32_t limit);
static int smu_set_fan_speed_rpm(void *handle, uint32_t speed);
static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
@@ -68,49 +67,32 @@ static int smu_sys_get_pp_feature_mask(void *handle,
char *buf)
{
struct smu_context *smu = handle;
- int size = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
- size = smu_get_pp_feature_mask(smu, buf);
-
- mutex_unlock(&smu->mutex);
-
- return size;
+ return smu_get_pp_feature_mask(smu, buf);
}
static int smu_sys_set_pp_feature_mask(void *handle,
uint64_t new_mask)
{
struct smu_context *smu = handle;
- int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
- ret = smu_set_pp_feature_mask(smu, new_mask);
-
- mutex_unlock(&smu->mutex);
-
- return ret;
+ return smu_set_pp_feature_mask(smu, new_mask);
}
-int smu_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
+int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value)
{
- int ret = 0;
- struct smu_context *smu = &adev->smu;
+ if (!smu->ppt_funcs->get_gfx_off_status)
+ return -EINVAL;
- if (is_support_sw_smu(adev) && smu->ppt_funcs->get_gfx_off_status)
- *value = smu_get_gfx_off_status(smu);
- else
- ret = -EINVAL;
+ *value = smu_get_gfx_off_status(smu);
- return ret;
+ return 0;
}
int smu_set_soft_freq_range(struct smu_context *smu,
@@ -120,16 +102,12 @@ int smu_set_soft_freq_range(struct smu_context *smu,
{
int ret = 0;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->set_soft_freq_limited_range)
ret = smu->ppt_funcs->set_soft_freq_limited_range(smu,
clk_type,
min,
max);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -138,21 +116,17 @@ int smu_get_dpm_freq_range(struct smu_context *smu,
uint32_t *min,
uint32_t *max)
{
- int ret = 0;
+ int ret = -ENOTSUPP;
if (!min && !max)
return -EINVAL;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->get_dpm_ultimate_freq)
ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu,
clk_type,
min,
max);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -184,8 +158,8 @@ static u32 smu_get_sclk(void *handle, bool low)
return clk_freq * 100;
}
-static int smu_dpm_set_vcn_enable_locked(struct smu_context *smu,
- bool enable)
+static int smu_dpm_set_vcn_enable(struct smu_context *smu,
+ bool enable)
{
struct smu_power_context *smu_power = &smu->smu_power;
struct smu_power_gate *power_gate = &smu_power->power_gate;
@@ -204,24 +178,8 @@ static int smu_dpm_set_vcn_enable_locked(struct smu_context *smu,
return ret;
}
-static int smu_dpm_set_vcn_enable(struct smu_context *smu,
- bool enable)
-{
- struct smu_power_context *smu_power = &smu->smu_power;
- struct smu_power_gate *power_gate = &smu_power->power_gate;
- int ret = 0;
-
- mutex_lock(&power_gate->vcn_gate_lock);
-
- ret = smu_dpm_set_vcn_enable_locked(smu, enable);
-
- mutex_unlock(&power_gate->vcn_gate_lock);
-
- return ret;
-}
-
-static int smu_dpm_set_jpeg_enable_locked(struct smu_context *smu,
- bool enable)
+static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
+ bool enable)
{
struct smu_power_context *smu_power = &smu->smu_power;
struct smu_power_gate *power_gate = &smu_power->power_gate;
@@ -240,22 +198,6 @@ static int smu_dpm_set_jpeg_enable_locked(struct smu_context *smu,
return ret;
}
-static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
- bool enable)
-{
- struct smu_power_context *smu_power = &smu->smu_power;
- struct smu_power_gate *power_gate = &smu_power->power_gate;
- int ret = 0;
-
- mutex_lock(&power_gate->jpeg_gate_lock);
-
- ret = smu_dpm_set_jpeg_enable_locked(smu, enable);
-
- mutex_unlock(&power_gate->jpeg_gate_lock);
-
- return ret;
-}
-
/**
* smu_dpm_set_power_gate - power gate/ungate the specific IP block
*
@@ -410,7 +352,7 @@ static void smu_restore_dpm_user_profile(struct smu_context *smu)
if (smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_MANUAL ||
smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_NONE) {
ret = smu_set_fan_control_mode(smu, smu->user_dpm_profile.fan_mode);
- if (ret) {
+ if (ret != -EOPNOTSUPP) {
smu->user_dpm_profile.fan_speed_pwm = 0;
smu->user_dpm_profile.fan_speed_rpm = 0;
smu->user_dpm_profile.fan_mode = AMD_FAN_CTRL_AUTO;
@@ -419,13 +361,13 @@ static void smu_restore_dpm_user_profile(struct smu_context *smu)
if (smu->user_dpm_profile.fan_speed_pwm) {
ret = smu_set_fan_speed_pwm(smu, smu->user_dpm_profile.fan_speed_pwm);
- if (ret)
+ if (ret != -EOPNOTSUPP)
dev_err(smu->adev->dev, "Failed to set manual fan speed in pwm\n");
}
if (smu->user_dpm_profile.fan_speed_rpm) {
ret = smu_set_fan_speed_rpm(smu, smu->user_dpm_profile.fan_speed_rpm);
- if (ret)
+ if (ret != -EOPNOTSUPP)
dev_err(smu->adev->dev, "Failed to set manual fan speed in rpm\n");
}
}
@@ -471,10 +413,7 @@ bool is_support_sw_smu(struct amdgpu_device *adev)
bool is_support_cclk_dpm(struct amdgpu_device *adev)
{
- struct smu_context *smu = &adev->smu;
-
- if (!is_support_sw_smu(adev))
- return false;
+ struct smu_context *smu = adev->powerplay.pp_handle;
if (!smu_feature_is_enabled(smu, SMU_FEATURE_CCLK_DPM_BIT))
return false;
@@ -488,7 +427,6 @@ static int smu_sys_get_pp_table(void *handle,
{
struct smu_context *smu = handle;
struct smu_table_context *smu_table = &smu->smu_table;
- uint32_t powerplay_table_size;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
@@ -496,18 +434,12 @@ static int smu_sys_get_pp_table(void *handle,
if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
return -EINVAL;
- mutex_lock(&smu->mutex);
-
if (smu_table->hardcode_pptable)
*table = smu_table->hardcode_pptable;
else
*table = smu_table->power_play_table;
- powerplay_table_size = smu_table->power_play_table_size;
-
- mutex_unlock(&smu->mutex);
-
- return powerplay_table_size;
+ return smu_table->power_play_table_size;
}
static int smu_sys_set_pp_table(void *handle,
@@ -527,12 +459,10 @@ static int smu_sys_set_pp_table(void *handle,
return -EIO;
}
- mutex_lock(&smu->mutex);
- if (!smu_table->hardcode_pptable)
- smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
if (!smu_table->hardcode_pptable) {
- ret = -ENOMEM;
- goto failed;
+ smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
+ if (!smu_table->hardcode_pptable)
+ return -ENOMEM;
}
memcpy(smu_table->hardcode_pptable, buf, size);
@@ -551,8 +481,6 @@ static int smu_sys_set_pp_table(void *handle,
smu->uploading_custom_pp_table = false;
-failed:
- mutex_unlock(&smu->mutex);
return ret;
}
@@ -578,7 +506,7 @@ static int smu_get_driver_allowed_feature_mask(struct smu_context *smu)
static int smu_set_funcs(struct amdgpu_device *adev)
{
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
smu->od_enabled = true;
@@ -604,6 +532,7 @@ static int smu_set_funcs(struct amdgpu_device *adev)
break;
case IP_VERSION(13, 0, 1):
case IP_VERSION(13, 0, 3):
+ case IP_VERSION(13, 0, 8):
yellow_carp_set_ppt_funcs(smu);
break;
case IP_VERSION(11, 0, 8):
@@ -630,13 +559,15 @@ static int smu_set_funcs(struct amdgpu_device *adev)
static int smu_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu;
+
+ smu = kzalloc(sizeof(struct smu_context), GFP_KERNEL);
+ if (!smu)
+ return -ENOMEM;
smu->adev = adev;
smu->pm_enabled = !!amdgpu_dpm;
smu->is_apu = false;
- mutex_init(&smu->mutex);
- mutex_init(&smu->smu_baco.mutex);
smu->smu_baco.state = SMU_BACO_STATE_EXIT;
smu->smu_baco.platform_support = false;
smu->user_dpm_profile.fan_mode = -1;
@@ -657,40 +588,45 @@ static int smu_set_default_dpm_table(struct smu_context *smu)
if (!smu->ppt_funcs->set_default_dpm_table)
return 0;
- mutex_lock(&power_gate->vcn_gate_lock);
- mutex_lock(&power_gate->jpeg_gate_lock);
-
vcn_gate = atomic_read(&power_gate->vcn_gated);
jpeg_gate = atomic_read(&power_gate->jpeg_gated);
- ret = smu_dpm_set_vcn_enable_locked(smu, true);
+ ret = smu_dpm_set_vcn_enable(smu, true);
if (ret)
- goto err0_out;
+ return ret;
- ret = smu_dpm_set_jpeg_enable_locked(smu, true);
+ ret = smu_dpm_set_jpeg_enable(smu, true);
if (ret)
- goto err1_out;
+ goto err_out;
ret = smu->ppt_funcs->set_default_dpm_table(smu);
if (ret)
dev_err(smu->adev->dev,
"Failed to setup default dpm clock tables!\n");
- smu_dpm_set_jpeg_enable_locked(smu, !jpeg_gate);
-err1_out:
- smu_dpm_set_vcn_enable_locked(smu, !vcn_gate);
-err0_out:
- mutex_unlock(&power_gate->jpeg_gate_lock);
- mutex_unlock(&power_gate->vcn_gate_lock);
-
+ smu_dpm_set_jpeg_enable(smu, !jpeg_gate);
+err_out:
+ smu_dpm_set_vcn_enable(smu, !vcn_gate);
return ret;
}
+static int smu_apply_default_config_table_settings(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+
+ ret = smu_get_default_config_table_settings(smu,
+ &adev->pm.config_table);
+ if (ret)
+ return ret;
+
+ return smu_set_config_table(smu, &adev->pm.config_table);
+}
static int smu_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
int ret = 0;
smu_set_fine_grain_gfx_freq_parameters(smu);
@@ -736,10 +672,15 @@ static int smu_late_init(void *handle)
smu_get_fan_parameters(smu);
- smu_handle_task(&adev->smu,
+ smu_handle_task(smu,
smu->smu_dpm.dpm_level,
- AMD_PP_TASK_COMPLETE_INIT,
- false);
+ AMD_PP_TASK_COMPLETE_INIT);
+
+ ret = smu_apply_default_config_table_settings(smu);
+ if (ret && (ret != -EOPNOTSUPP)) {
+ dev_err(adev->dev, "Failed to apply default DriverSmuConfig settings!\n");
+ return ret;
+ }
smu_restore_dpm_user_profile(smu);
@@ -964,7 +905,7 @@ static int smu_smc_table_sw_init(struct smu_context *smu)
if (ret)
return ret;
- ret = smu_i2c_init(smu, &smu->adev->pm.smu_i2c);
+ ret = smu_i2c_init(smu);
if (ret)
return ret;
@@ -975,7 +916,7 @@ static int smu_smc_table_sw_fini(struct smu_context *smu)
{
int ret;
- smu_i2c_fini(smu, &smu->adev->pm.smu_i2c);
+ smu_i2c_fini(smu);
smu_free_dummy_read_table(smu);
@@ -1015,29 +956,21 @@ static void smu_interrupt_work_fn(struct work_struct *work)
struct smu_context *smu = container_of(work, struct smu_context,
interrupt_work);
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work)
smu->ppt_funcs->interrupt_work(smu);
-
- mutex_unlock(&smu->mutex);
}
static int smu_sw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
int ret;
smu->pool_size = adev->pm.smu_prv_buffer_size;
smu->smu_feature.feature_num = SMU_FEATURE_MAX;
- mutex_init(&smu->smu_feature.mutex);
bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
- bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
- mutex_init(&smu->sensor_lock);
- mutex_init(&smu->metrics_lock);
mutex_init(&smu->message_lock);
INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn);
@@ -1049,8 +982,6 @@ static int smu_sw_init(void *handle)
atomic_set(&smu->smu_power.power_gate.vcn_gated, 1);
atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
- mutex_init(&smu->smu_power.power_gate.vcn_gate_lock);
- mutex_init(&smu->smu_power.power_gate.jpeg_gate_lock);
smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
@@ -1101,7 +1032,7 @@ static int smu_sw_init(void *handle)
static int smu_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
int ret;
ret = smu_smc_table_sw_fini(smu);
@@ -1144,8 +1075,10 @@ static int smu_get_thermal_temperature_range(struct smu_context *smu)
static int smu_smc_hw_setup(struct smu_context *smu)
{
+ struct smu_feature *feature = &smu->smu_feature;
struct amdgpu_device *adev = smu->adev;
uint32_t pcie_gen = 0, pcie_width = 0;
+ uint64_t features_supported;
int ret = 0;
if (adev->in_suspend && smu_is_dpm_running(smu)) {
@@ -1225,6 +1158,15 @@ static int smu_smc_hw_setup(struct smu_context *smu)
return ret;
}
+ ret = smu_feature_get_enabled_mask(smu, &features_supported);
+ if (ret) {
+ dev_err(adev->dev, "Failed to retrieve supported dpm features!\n");
+ return ret;
+ }
+ bitmap_copy(feature->supported,
+ (unsigned long *)&features_supported,
+ feature->feature_num);
+
if (!smu_is_dpm_running(smu))
dev_info(adev->dev, "dpm has been disabled\n");
@@ -1336,7 +1278,7 @@ static int smu_hw_init(void *handle)
{
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) {
smu->pm_enabled = false;
@@ -1352,7 +1294,7 @@ static int smu_hw_init(void *handle)
if (smu->is_apu) {
smu_dpm_set_vcn_enable(smu, true);
smu_dpm_set_jpeg_enable(smu, true);
- smu_set_gfx_cgpg(&adev->smu, true);
+ smu_set_gfx_cgpg(smu, true);
}
if (!smu->pm_enabled)
@@ -1437,9 +1379,7 @@ static int smu_disable_dpms(struct smu_context *smu)
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 0, 12):
case IP_VERSION(11, 0, 13):
- return smu_disable_all_features_with_exception(smu,
- true,
- SMU_FEATURE_COUNT);
+ return 0;
default:
break;
}
@@ -1455,9 +1395,7 @@ static int smu_disable_dpms(struct smu_context *smu)
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 5):
case IP_VERSION(11, 0, 9):
- return smu_disable_all_features_with_exception(smu,
- true,
- SMU_FEATURE_BACO_BIT);
+ return 0;
default:
break;
}
@@ -1469,7 +1407,6 @@ static int smu_disable_dpms(struct smu_context *smu)
*/
if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) {
ret = smu_disable_all_features_with_exception(smu,
- false,
SMU_FEATURE_BACO_BIT);
if (ret)
dev_err(adev->dev, "Failed to disable smu features except BACO.\n");
@@ -1512,7 +1449,7 @@ static int smu_smc_hw_cleanup(struct smu_context *smu)
static int smu_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
@@ -1531,13 +1468,19 @@ static int smu_hw_fini(void *handle)
return smu_smc_hw_cleanup(smu);
}
+static void smu_late_fini(void *handle)
+{
+ struct amdgpu_device *adev = handle;
+ struct smu_context *smu = adev->powerplay.pp_handle;
+
+ kfree(smu);
+}
+
static int smu_reset(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
int ret;
- amdgpu_gfx_off_ctrl(smu->adev, false);
-
ret = smu_hw_fini(adev);
if (ret)
return ret;
@@ -1550,15 +1493,13 @@ static int smu_reset(struct smu_context *smu)
if (ret)
return ret;
- amdgpu_gfx_off_ctrl(smu->adev, true);
-
return 0;
}
static int smu_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
int ret;
if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
@@ -1575,7 +1516,7 @@ static int smu_suspend(void *handle)
smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
- smu_set_gfx_cgpg(&adev->smu, false);
+ smu_set_gfx_cgpg(smu, false);
return 0;
}
@@ -1584,7 +1525,7 @@ static int smu_resume(void *handle)
{
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
@@ -1606,7 +1547,7 @@ static int smu_resume(void *handle)
return ret;
}
- smu_set_gfx_cgpg(&adev->smu, true);
+ smu_set_gfx_cgpg(smu, true);
smu->disable_uclk_switch = 0;
@@ -1630,8 +1571,6 @@ static int smu_display_configuration_change(void *handle,
if (!display_config)
return -EINVAL;
- mutex_lock(&smu->mutex);
-
smu_set_min_dcef_deep_sleep(smu,
display_config->min_dcef_deep_sleep_set_clk / 100);
@@ -1640,8 +1579,6 @@ static int smu_display_configuration_change(void *handle,
num_of_active_display++;
}
- mutex_unlock(&smu->mutex);
-
return 0;
}
@@ -1675,14 +1612,7 @@ static int smu_enable_umd_pstate(void *handle,
/* enter umd pstate, save current level, disable gfx cg*/
if (*level & profile_mode_mask) {
smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
- smu_dpm_ctx->enable_umd_pstate = true;
smu_gpo_control(smu, false);
- amdgpu_device_ip_set_powergating_state(smu->adev,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_PG_STATE_UNGATE);
- amdgpu_device_ip_set_clockgating_state(smu->adev,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_CG_STATE_UNGATE);
smu_gfx_ulv_control(smu, false);
smu_deep_sleep_control(smu, false);
amdgpu_asic_update_umd_stable_pstate(smu->adev, true);
@@ -1692,16 +1622,9 @@ static int smu_enable_umd_pstate(void *handle,
if (!(*level & profile_mode_mask)) {
if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
*level = smu_dpm_ctx->saved_dpm_level;
- smu_dpm_ctx->enable_umd_pstate = false;
amdgpu_asic_update_umd_stable_pstate(smu->adev, false);
smu_deep_sleep_control(smu, true);
smu_gfx_ulv_control(smu, true);
- amdgpu_device_ip_set_clockgating_state(smu->adev,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_CG_STATE_GATE);
- amdgpu_device_ip_set_powergating_state(smu->adev,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_PG_STATE_GATE);
smu_gpo_control(smu, true);
}
}
@@ -1778,22 +1701,18 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
static int smu_handle_task(struct smu_context *smu,
enum amd_dpm_forced_level level,
- enum amd_pp_task task_id,
- bool lock_needed)
+ enum amd_pp_task task_id)
{
int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- if (lock_needed)
- mutex_lock(&smu->mutex);
-
switch (task_id) {
case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
ret = smu_pre_display_config_changed(smu);
if (ret)
- goto out;
+ return ret;
ret = smu_adjust_power_state_dynamic(smu, level, false);
break;
case AMD_PP_TASK_COMPLETE_INIT:
@@ -1804,10 +1723,6 @@ static int smu_handle_task(struct smu_context *smu,
break;
}
-out:
- if (lock_needed)
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -1818,7 +1733,7 @@ static int smu_handle_dpm_task(void *handle,
struct smu_context *smu = handle;
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
- return smu_handle_task(smu, smu_dpm->dpm_level, task_id, true);
+ return smu_handle_task(smu, smu_dpm->dpm_level, task_id);
}
@@ -1837,8 +1752,6 @@ static int smu_switch_power_profile(void *handle,
if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
return -EINVAL;
- mutex_lock(&smu->mutex);
-
if (!en) {
smu->workload_mask &= ~(1 << smu->workload_prority[type]);
index = fls(smu->workload_mask);
@@ -1855,8 +1768,6 @@ static int smu_switch_power_profile(void *handle,
smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
smu_bump_power_profile_mode(smu, &workload, 0);
- mutex_unlock(&smu->mutex);
-
return 0;
}
@@ -1864,7 +1775,6 @@ static enum amd_dpm_forced_level smu_get_performance_level(void *handle)
{
struct smu_context *smu = handle;
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
- enum amd_dpm_forced_level level;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
@@ -1872,11 +1782,7 @@ static enum amd_dpm_forced_level smu_get_performance_level(void *handle)
if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
return -EINVAL;
- mutex_lock(&(smu->mutex));
- level = smu_dpm_ctx->dpm_level;
- mutex_unlock(&(smu->mutex));
-
- return level;
+ return smu_dpm_ctx->dpm_level;
}
static int smu_force_performance_level(void *handle,
@@ -1892,19 +1798,12 @@ static int smu_force_performance_level(void *handle,
if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
return -EINVAL;
- mutex_lock(&smu->mutex);
-
ret = smu_enable_umd_pstate(smu, &level);
- if (ret) {
- mutex_unlock(&smu->mutex);
+ if (ret)
return ret;
- }
ret = smu_handle_task(smu, level,
- AMD_PP_TASK_READJUST_POWER_STATE,
- false);
-
- mutex_unlock(&smu->mutex);
+ AMD_PP_TASK_READJUST_POWER_STATE);
/* reset user dpm clock state */
if (!ret && smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
@@ -1918,16 +1817,11 @@ static int smu_force_performance_level(void *handle,
static int smu_set_display_count(void *handle, uint32_t count)
{
struct smu_context *smu = handle;
- int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
- ret = smu_init_display_count(smu, count);
- mutex_unlock(&smu->mutex);
-
- return ret;
+ return smu_init_display_count(smu, count);
}
static int smu_force_smuclk_levels(struct smu_context *smu,
@@ -1945,8 +1839,6 @@ static int smu_force_smuclk_levels(struct smu_context *smu,
return -EINVAL;
}
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) {
ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
@@ -1955,8 +1847,6 @@ static int smu_force_smuclk_levels(struct smu_context *smu,
}
}
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2015,14 +1905,10 @@ static int smu_set_mp1_state(void *handle,
if (!smu->pm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs &&
smu->ppt_funcs->set_mp1_state)
ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2038,14 +1924,10 @@ static int smu_set_df_cstate(void *handle,
if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
return 0;
- mutex_lock(&smu->mutex);
-
ret = smu->ppt_funcs->set_df_cstate(smu, state);
if (ret)
dev_err(smu->adev->dev, "[SetDfCstate] failed!\n");
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2059,38 +1941,25 @@ int smu_allow_xgmi_power_down(struct smu_context *smu, bool en)
if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down)
return 0;
- mutex_lock(&smu->mutex);
-
ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en);
if (ret)
dev_err(smu->adev->dev, "[AllowXgmiPowerDown] failed!\n");
- mutex_unlock(&smu->mutex);
-
return ret;
}
int smu_write_watermarks_table(struct smu_context *smu)
{
- int ret = 0;
-
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
- ret = smu_set_watermarks_table(smu, NULL);
-
- mutex_unlock(&smu->mutex);
-
- return ret;
+ return smu_set_watermarks_table(smu, NULL);
}
static int smu_set_watermarks_for_clock_ranges(void *handle,
struct pp_smu_wm_range_sets *clock_ranges)
{
struct smu_context *smu = handle;
- int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
@@ -2098,13 +1967,7 @@ static int smu_set_watermarks_for_clock_ranges(void *handle,
if (smu->disable_watermark)
return 0;
- mutex_lock(&smu->mutex);
-
- ret = smu_set_watermarks_table(smu, clock_ranges);
-
- mutex_unlock(&smu->mutex);
-
- return ret;
+ return smu_set_watermarks_table(smu, clock_ranges);
}
int smu_set_ac_dc(struct smu_context *smu)
@@ -2118,14 +1981,12 @@ int smu_set_ac_dc(struct smu_context *smu)
if (smu->dc_controlled_by_gpio)
return 0;
- mutex_lock(&smu->mutex);
ret = smu_set_power_source(smu,
smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
SMU_POWER_SOURCE_DC);
if (ret)
dev_err(smu->adev->dev, "Failed to switch to %s mode!\n",
smu->adev->pm.ac_power ? "AC" : "DC");
- mutex_unlock(&smu->mutex);
return ret;
}
@@ -2138,6 +1999,7 @@ const struct amd_ip_funcs smu_ip_funcs = {
.sw_fini = smu_sw_fini,
.hw_init = smu_hw_init,
.hw_fini = smu_hw_fini,
+ .late_fini = smu_late_fini,
.suspend = smu_suspend,
.resume = smu_resume,
.is_idle = NULL,
@@ -2146,7 +2008,6 @@ const struct amd_ip_funcs smu_ip_funcs = {
.soft_reset = NULL,
.set_clockgating_state = smu_set_clockgating_state,
.set_powergating_state = smu_set_powergating_state,
- .enable_umd_pstate = smu_enable_umd_pstate,
};
const struct amdgpu_ip_block_version smu_v11_0_ip_block =
@@ -2212,13 +2073,9 @@ static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
{
int ret = 0;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->set_gfx_cgpg)
ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2230,21 +2087,21 @@ static int smu_set_fan_speed_rpm(void *handle, uint32_t speed)
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
+ if (!smu->ppt_funcs->set_fan_speed_rpm)
+ return -EOPNOTSUPP;
- if (smu->ppt_funcs->set_fan_speed_rpm) {
- ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
- if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
- smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_RPM;
- smu->user_dpm_profile.fan_speed_rpm = speed;
+ if (speed == U32_MAX)
+ return -EINVAL;
- /* Override custom PWM setting as they cannot co-exist */
- smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_PWM;
- smu->user_dpm_profile.fan_speed_pwm = 0;
- }
- }
+ ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
+ if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
+ smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_RPM;
+ smu->user_dpm_profile.fan_speed_rpm = speed;
- mutex_unlock(&smu->mutex);
+ /* Override custom PWM setting as they cannot co-exist */
+ smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_PWM;
+ smu->user_dpm_profile.fan_speed_pwm = 0;
+ }
return ret;
}
@@ -2301,8 +2158,6 @@ int smu_get_power_limit(void *handle,
break;
}
- mutex_lock(&smu->mutex);
-
if (limit_type != SMU_DEFAULT_PPT_LIMIT) {
if (smu->ppt_funcs->get_ppt_limit)
ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level);
@@ -2336,8 +2191,6 @@ int smu_get_power_limit(void *handle,
}
}
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2350,21 +2203,16 @@ static int smu_set_power_limit(void *handle, uint32_t limit)
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
limit &= (1<<24)-1;
if (limit_type != SMU_DEFAULT_PPT_LIMIT)
- if (smu->ppt_funcs->set_power_limit) {
- ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
- goto out;
- }
+ if (smu->ppt_funcs->set_power_limit)
+ return smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
if (limit > smu->max_power_limit) {
dev_err(smu->adev->dev,
"New power limit (%d) is over the max allowed %d\n",
limit, smu->max_power_limit);
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
if (!limit)
@@ -2376,9 +2224,6 @@ static int smu_set_power_limit(void *handle, uint32_t limit)
smu->user_dpm_profile.power_limit = limit;
}
-out:
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2389,21 +2234,14 @@ static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type cl
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->print_clk_levels)
ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
- mutex_unlock(&smu->mutex);
-
return ret;
}
-static int smu_print_ppclk_levels(void *handle,
- enum pp_clock_type type,
- char *buf)
+static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type)
{
- struct smu_context *smu = handle;
enum smu_clk_type clk_type;
switch (type) {
@@ -2436,12 +2274,45 @@ static int smu_print_ppclk_levels(void *handle,
case OD_CCLK:
clk_type = SMU_OD_CCLK; break;
default:
- return -EINVAL;
+ clk_type = SMU_CLK_COUNT; break;
}
+ return clk_type;
+}
+
+static int smu_print_ppclk_levels(void *handle,
+ enum pp_clock_type type,
+ char *buf)
+{
+ struct smu_context *smu = handle;
+ enum smu_clk_type clk_type;
+
+ clk_type = smu_convert_to_smuclk(type);
+ if (clk_type == SMU_CLK_COUNT)
+ return -EINVAL;
+
return smu_print_smuclk_levels(smu, clk_type, buf);
}
+static int smu_emit_ppclk_levels(void *handle, enum pp_clock_type type, char *buf, int *offset)
+{
+ struct smu_context *smu = handle;
+ enum smu_clk_type clk_type;
+
+ clk_type = smu_convert_to_smuclk(type);
+ if (clk_type == SMU_CLK_COUNT)
+ return -EINVAL;
+
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
+
+ if (!smu->ppt_funcs->emit_clk_levels)
+ return -ENOENT;
+
+ return smu->ppt_funcs->emit_clk_levels(smu, clk_type, buf, offset);
+
+}
+
static int smu_od_edit_dpm_table(void *handle,
enum PP_OD_DPM_TABLE_COMMAND type,
long *input, uint32_t size)
@@ -2452,14 +2323,10 @@ static int smu_od_edit_dpm_table(void *handle,
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->od_edit_dpm_table) {
ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
}
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2483,8 +2350,6 @@ static int smu_read_sensor(void *handle,
size_val = *size_arg;
size = &size_val;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->read_sensor)
if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size))
goto unlock;
@@ -2499,7 +2364,7 @@ static int smu_read_sensor(void *handle,
*size = 4;
break;
case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
- ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
+ ret = smu_feature_get_enabled_mask(smu, (uint64_t *)data);
*size = 8;
break;
case AMDGPU_PP_SENSOR_UVD_POWER:
@@ -2525,8 +2390,6 @@ static int smu_read_sensor(void *handle,
}
unlock:
- mutex_unlock(&smu->mutex);
-
// assign uint32_t to int
*size_arg = size_val;
@@ -2536,7 +2399,6 @@ unlock:
static int smu_get_power_profile_mode(void *handle, char *buf)
{
struct smu_context *smu = handle;
- int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
!smu->ppt_funcs->get_power_profile_mode)
@@ -2544,13 +2406,7 @@ static int smu_get_power_profile_mode(void *handle, char *buf)
if (!buf)
return -EINVAL;
- mutex_lock(&smu->mutex);
-
- ret = smu->ppt_funcs->get_power_profile_mode(smu, buf);
-
- mutex_unlock(&smu->mutex);
-
- return ret;
+ return smu->ppt_funcs->get_power_profile_mode(smu, buf);
}
static int smu_set_power_profile_mode(void *handle,
@@ -2558,76 +2414,66 @@ static int smu_set_power_profile_mode(void *handle,
uint32_t param_size)
{
struct smu_context *smu = handle;
- int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
!smu->ppt_funcs->set_power_profile_mode)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
- smu_bump_power_profile_mode(smu, param, param_size);
-
- mutex_unlock(&smu->mutex);
-
- return ret;
+ return smu_bump_power_profile_mode(smu, param, param_size);
}
-static u32 smu_get_fan_control_mode(void *handle)
+static int smu_get_fan_control_mode(void *handle, u32 *fan_mode)
{
struct smu_context *smu = handle;
- u32 ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
- return AMD_FAN_CTRL_NONE;
+ return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
+ if (!smu->ppt_funcs->get_fan_control_mode)
+ return -EOPNOTSUPP;
- if (smu->ppt_funcs->get_fan_control_mode)
- ret = smu->ppt_funcs->get_fan_control_mode(smu);
+ if (!fan_mode)
+ return -EINVAL;
- mutex_unlock(&smu->mutex);
+ *fan_mode = smu->ppt_funcs->get_fan_control_mode(smu);
- return ret;
+ return 0;
}
-static int smu_set_fan_control_mode(struct smu_context *smu, int value)
+static int smu_set_fan_control_mode(void *handle, u32 value)
{
+ struct smu_context *smu = handle;
int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
- return -EOPNOTSUPP;
+ return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
+ if (!smu->ppt_funcs->set_fan_control_mode)
+ return -EOPNOTSUPP;
- if (smu->ppt_funcs->set_fan_control_mode) {
- ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
- if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
- smu->user_dpm_profile.fan_mode = value;
- }
+ if (value == U32_MAX)
+ return -EINVAL;
- mutex_unlock(&smu->mutex);
+ ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
+ if (ret)
+ goto out;
- /* reset user dpm fan speed */
- if (!ret && value != AMD_FAN_CTRL_MANUAL &&
- !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
- smu->user_dpm_profile.fan_speed_pwm = 0;
- smu->user_dpm_profile.fan_speed_rpm = 0;
- smu->user_dpm_profile.flags &= ~(SMU_CUSTOM_FAN_SPEED_RPM | SMU_CUSTOM_FAN_SPEED_PWM);
+ if (!(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
+ smu->user_dpm_profile.fan_mode = value;
+
+ /* reset user dpm fan speed */
+ if (value != AMD_FAN_CTRL_MANUAL) {
+ smu->user_dpm_profile.fan_speed_pwm = 0;
+ smu->user_dpm_profile.fan_speed_rpm = 0;
+ smu->user_dpm_profile.flags &= ~(SMU_CUSTOM_FAN_SPEED_RPM | SMU_CUSTOM_FAN_SPEED_PWM);
+ }
}
+out:
return ret;
}
-static void smu_pp_set_fan_control_mode(void *handle, u32 value)
-{
- struct smu_context *smu = handle;
-
- smu_set_fan_control_mode(smu, value);
-}
-
-
static int smu_get_fan_speed_pwm(void *handle, u32 *speed)
{
struct smu_context *smu = handle;
@@ -2636,12 +2482,13 @@ static int smu_get_fan_speed_pwm(void *handle, u32 *speed)
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
+ if (!smu->ppt_funcs->get_fan_speed_pwm)
+ return -EOPNOTSUPP;
- if (smu->ppt_funcs->get_fan_speed_pwm)
- ret = smu->ppt_funcs->get_fan_speed_pwm(smu, speed);
+ if (!speed)
+ return -EINVAL;
- mutex_unlock(&smu->mutex);
+ ret = smu->ppt_funcs->get_fan_speed_pwm(smu, speed);
return ret;
}
@@ -2654,22 +2501,22 @@ static int smu_set_fan_speed_pwm(void *handle, u32 speed)
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
+ if (!smu->ppt_funcs->set_fan_speed_pwm)
+ return -EOPNOTSUPP;
+
+ if (speed == U32_MAX)
+ return -EINVAL;
- if (smu->ppt_funcs->set_fan_speed_pwm) {
- ret = smu->ppt_funcs->set_fan_speed_pwm(smu, speed);
- if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
- smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_PWM;
- smu->user_dpm_profile.fan_speed_pwm = speed;
+ ret = smu->ppt_funcs->set_fan_speed_pwm(smu, speed);
+ if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
+ smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_PWM;
+ smu->user_dpm_profile.fan_speed_pwm = speed;
- /* Override custom RPM setting as they cannot co-exist */
- smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_RPM;
- smu->user_dpm_profile.fan_speed_rpm = 0;
- }
+ /* Override custom RPM setting as they cannot co-exist */
+ smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_RPM;
+ smu->user_dpm_profile.fan_speed_rpm = 0;
}
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2681,12 +2528,13 @@ static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed)
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
+ if (!smu->ppt_funcs->get_fan_speed_rpm)
+ return -EOPNOTSUPP;
- if (smu->ppt_funcs->get_fan_speed_rpm)
- ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
+ if (!speed)
+ return -EINVAL;
- mutex_unlock(&smu->mutex);
+ ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
return ret;
}
@@ -2694,18 +2542,11 @@ static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed)
static int smu_set_deep_sleep_dcefclk(void *handle, uint32_t clk)
{
struct smu_context *smu = handle;
- int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
- ret = smu_set_min_dcef_deep_sleep(smu, clk);
-
- mutex_unlock(&smu->mutex);
-
- return ret;
+ return smu_set_min_dcef_deep_sleep(smu, clk);
}
static int smu_get_clock_by_type_with_latency(void *handle,
@@ -2719,8 +2560,6 @@ static int smu_get_clock_by_type_with_latency(void *handle,
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->get_clock_by_type_with_latency) {
switch (type) {
case amd_pp_sys_clock:
@@ -2737,15 +2576,12 @@ static int smu_get_clock_by_type_with_latency(void *handle,
break;
default:
dev_err(smu->adev->dev, "Invalid clock type!\n");
- mutex_unlock(&smu->mutex);
return -EINVAL;
}
ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
}
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2758,13 +2594,9 @@ static int smu_display_clock_voltage_request(void *handle,
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->display_clock_voltage_request)
ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2778,13 +2610,9 @@ static int smu_display_disable_memory_clock_switch(void *handle,
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->display_disable_memory_clock_switch)
ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2797,13 +2625,9 @@ static int smu_set_xgmi_pstate(void *handle,
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->set_xgmi_pstate)
ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
- mutex_unlock(&smu->mutex);
-
if(ret)
dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n");
@@ -2813,21 +2637,16 @@ static int smu_set_xgmi_pstate(void *handle,
static int smu_get_baco_capability(void *handle, bool *cap)
{
struct smu_context *smu = handle;
- int ret = 0;
*cap = false;
if (!smu->pm_enabled)
return 0;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
*cap = smu->ppt_funcs->baco_is_support(smu);
- mutex_unlock(&smu->mutex);
-
- return ret;
+ return 0;
}
static int smu_baco_set_state(void *handle, int state)
@@ -2839,20 +2658,11 @@ static int smu_baco_set_state(void *handle, int state)
return -EOPNOTSUPP;
if (state == 0) {
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->baco_exit)
ret = smu->ppt_funcs->baco_exit(smu);
-
- mutex_unlock(&smu->mutex);
} else if (state == 1) {
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->baco_enter)
ret = smu->ppt_funcs->baco_enter(smu);
-
- mutex_unlock(&smu->mutex);
-
} else {
return -EINVAL;
}
@@ -2871,13 +2681,9 @@ bool smu_mode1_reset_is_support(struct smu_context *smu)
if (!smu->pm_enabled)
return false;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support)
ret = smu->ppt_funcs->mode1_reset_is_support(smu);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2888,13 +2694,9 @@ bool smu_mode2_reset_is_support(struct smu_context *smu)
if (!smu->pm_enabled)
return false;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs && smu->ppt_funcs->mode2_reset_is_support)
ret = smu->ppt_funcs->mode2_reset_is_support(smu);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2905,13 +2707,9 @@ int smu_mode1_reset(struct smu_context *smu)
if (!smu->pm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->mode1_reset)
ret = smu->ppt_funcs->mode1_reset(smu);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2923,13 +2721,9 @@ static int smu_mode2_reset(void *handle)
if (!smu->pm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->mode2_reset)
ret = smu->ppt_funcs->mode2_reset(smu);
- mutex_unlock(&smu->mutex);
-
if (ret)
dev_err(smu->adev->dev, "Mode2 reset failed!\n");
@@ -2945,13 +2739,9 @@ static int smu_get_max_sustainable_clocks_by_dc(void *handle,
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2965,13 +2755,9 @@ static int smu_get_uclk_dpm_states(void *handle,
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->get_uclk_dpm_states)
ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2983,13 +2769,9 @@ static enum amd_pm_state_type smu_get_current_power_state(void *handle)
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->get_current_power_state)
pm_state = smu->ppt_funcs->get_current_power_state(smu);
- mutex_unlock(&smu->mutex);
-
return pm_state;
}
@@ -3002,20 +2784,15 @@ static int smu_get_dpm_clock_table(void *handle,
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->get_dpm_clock_table)
ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
- mutex_unlock(&smu->mutex);
-
return ret;
}
static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
{
struct smu_context *smu = handle;
- ssize_t size;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
@@ -3023,13 +2800,7 @@ static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
if (!smu->ppt_funcs->get_gpu_metrics)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
- size = smu->ppt_funcs->get_gpu_metrics(smu, table);
-
- mutex_unlock(&smu->mutex);
-
- return size;
+ return smu->ppt_funcs->get_gpu_metrics(smu, table);
}
static int smu_enable_mgpu_fan_boost(void *handle)
@@ -3040,13 +2811,9 @@ static int smu_enable_mgpu_fan_boost(void *handle)
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->enable_mgpu_fan_boost)
ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -3056,10 +2823,8 @@ static int smu_gfx_state_change_set(void *handle,
struct smu_context *smu = handle;
int ret = 0;
- mutex_lock(&smu->mutex);
if (smu->ppt_funcs->gfx_state_change_set)
ret = smu->ppt_funcs->gfx_state_change_set(smu, state);
- mutex_unlock(&smu->mutex);
return ret;
}
@@ -3068,10 +2833,8 @@ int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable)
{
int ret = 0;
- mutex_lock(&smu->mutex);
if (smu->ppt_funcs->smu_handle_passthrough_sbr)
ret = smu->ppt_funcs->smu_handle_passthrough_sbr(smu, enable);
- mutex_unlock(&smu->mutex);
return ret;
}
@@ -3080,11 +2843,9 @@ int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc)
{
int ret = -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
if (smu->ppt_funcs &&
smu->ppt_funcs->get_ecc_info)
ret = smu->ppt_funcs->get_ecc_info(smu, umc_ecc);
- mutex_unlock(&smu->mutex);
return ret;
@@ -3101,24 +2862,23 @@ static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size)
*addr = NULL;
*size = 0;
- mutex_lock(&smu->mutex);
if (memory_pool->bo) {
*addr = memory_pool->cpu_addr;
*size = memory_pool->size;
}
- mutex_unlock(&smu->mutex);
return 0;
}
static const struct amd_pm_funcs swsmu_pm_funcs = {
/* export for sysfs */
- .set_fan_control_mode = smu_pp_set_fan_control_mode,
+ .set_fan_control_mode = smu_set_fan_control_mode,
.get_fan_control_mode = smu_get_fan_control_mode,
.set_fan_speed_pwm = smu_set_fan_speed_pwm,
.get_fan_speed_pwm = smu_get_fan_speed_pwm,
.force_clock_level = smu_force_ppclk_levels,
.print_clock_levels = smu_print_ppclk_levels,
+ .emit_clock_levels = smu_emit_ppclk_levels,
.force_performance_level = smu_force_performance_level,
.read_sensor = smu_read_sensor,
.get_performance_level = smu_get_performance_level,
@@ -3165,17 +2925,13 @@ static const struct amd_pm_funcs swsmu_pm_funcs = {
.get_smu_prv_buf_details = smu_get_prv_buffer_details,
};
-int smu_wait_for_event(struct amdgpu_device *adev, enum smu_event_type event,
+int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event,
uint64_t event_arg)
{
int ret = -EINVAL;
- struct smu_context *smu = &adev->smu;
- if (smu->ppt_funcs->wait_for_event) {
- mutex_lock(&smu->mutex);
+ if (smu->ppt_funcs->wait_for_event)
ret = smu->ppt_funcs->wait_for_event(smu, event, event_arg);
- mutex_unlock(&smu->mutex);
- }
return ret;
}
@@ -3203,7 +2959,7 @@ int smu_stb_collect_info(struct smu_context *smu, void *buf, uint32_t size)
static int smu_stb_debugfs_open(struct inode *inode, struct file *filp)
{
struct amdgpu_device *adev = filp->f_inode->i_private;
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
unsigned char *buf;
int r;
@@ -3228,7 +2984,7 @@ static ssize_t smu_stb_debugfs_read(struct file *filp, char __user *buf, size_t
loff_t *pos)
{
struct amdgpu_device *adev = filp->f_inode->i_private;
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
if (!filp->private_data)
@@ -3269,7 +3025,7 @@ void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
if (!smu->stb_context.stb_buf_size)
return;
@@ -3281,5 +3037,14 @@ void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev)
&smu_stb_debugfs_fops,
smu->stb_context.stb_buf_size);
#endif
+}
+
+int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size)
+{
+ int ret = 0;
+ if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_pages_num)
+ ret = smu->ppt_funcs->send_hbm_bad_pages_num(smu, size);
+
+ return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index ba7565bc8104..fbef3ab8d487 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -241,11 +241,6 @@ struct smu_user_dpm_profile {
uint32_t clk_dependency;
};
-enum smu_event_type {
-
- SMU_EVENT_RESET_COMPLETE = 0,
-};
-
#define SMU_TABLE_INIT(tables, table_id, s, a, d) \
do { \
tables[table_id].size = s; \
@@ -342,6 +337,7 @@ struct smu_table_context
struct smu_bios_boot_up_values boot_values;
void *driver_pptable;
void *ecc_table;
+ void *driver_smu_config_table;
struct smu_table tables[SMU_TABLE_COUNT];
/*
* The driver table is just a staging buffer for
@@ -368,7 +364,6 @@ struct smu_dpm_context {
uint32_t dpm_context_size;
void *dpm_context;
void *golden_dpm_context;
- bool enable_umd_pstate;
enum amd_dpm_forced_level dpm_level;
enum amd_dpm_forced_level saved_dpm_level;
enum amd_dpm_forced_level requested_dpm_level;
@@ -382,8 +377,6 @@ struct smu_power_gate {
bool vce_gated;
atomic_t vcn_gated;
atomic_t jpeg_gated;
- struct mutex vcn_gate_lock;
- struct mutex jpeg_gate_lock;
};
struct smu_power_context {
@@ -398,8 +391,6 @@ struct smu_feature
uint32_t feature_num;
DECLARE_BITMAP(supported, SMU_FEATURE_MAX);
DECLARE_BITMAP(allowed, SMU_FEATURE_MAX);
- DECLARE_BITMAP(enabled, SMU_FEATURE_MAX);
- struct mutex mutex;
};
struct smu_clocks {
@@ -436,7 +427,6 @@ enum smu_baco_state
struct smu_baco_context
{
- struct mutex mutex;
uint32_t state;
bool platform_support;
};
@@ -494,9 +484,6 @@ struct smu_context
const struct cmn2asic_mapping *table_map;
const struct cmn2asic_mapping *pwr_src_map;
const struct cmn2asic_mapping *workload_map;
- struct mutex mutex;
- struct mutex sensor_lock;
- struct mutex metrics_lock;
struct mutex message_lock;
uint64_t pool_size;
@@ -618,10 +605,24 @@ struct pptable_funcs {
* to buffer. Star current level.
*
* Used for sysfs interfaces.
+ * Return: Number of characters written to the buffer
*/
int (*print_clk_levels)(struct smu_context *smu, enum smu_clk_type clk_type, char *buf);
/**
+ * @emit_clk_levels: Print DPM clock levels for a clock domain
+ * to buffer using sysfs_emit_at. Star current level.
+ *
+ * Used for sysfs interfaces.
+ * &buf: sysfs buffer
+ * &offset: offset within buffer to start printing, which is updated by the
+ * function.
+ *
+ * Return: 0 on Success or Negative to indicate an error occurred.
+ */
+ int (*emit_clk_levels)(struct smu_context *smu, enum smu_clk_type clk_type, char *buf, int *offset);
+
+ /**
* @force_clk_levels: Set a range of allowed DPM levels for a clock
* domain.
* &clk_type: Clock domain.
@@ -829,12 +830,12 @@ struct pptable_funcs {
* other devices. The i2c's EEPROM also stores bad page tables on boards
* with ECC.
*/
- int (*i2c_init)(struct smu_context *smu, struct i2c_adapter *control);
+ int (*i2c_init)(struct smu_context *smu);
/**
* @i2c_fini: Tear down i2c.
*/
- void (*i2c_fini)(struct smu_context *smu, struct i2c_adapter *control);
+ void (*i2c_fini)(struct smu_context *smu);
/**
* @get_unique_id: Get the GPU's unique id. Used for asset tracking.
@@ -988,10 +989,9 @@ struct pptable_funcs {
/**
* @get_enabled_mask: Get a mask of features that are currently enabled
* on the SMU.
- * &feature_mask: Array representing enabled feature mask.
- * &num: Elements in &feature_mask.
+ * &feature_mask: Enabled feature mask.
*/
- int (*get_enabled_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num);
+ int (*get_enabled_mask)(struct smu_context *smu, uint64_t *feature_mask);
/**
* @feature_is_enabled: Test if a feature is enabled.
@@ -1005,7 +1005,6 @@ struct pptable_funcs {
* exception to those in &mask.
*/
int (*disable_all_features_with_exception)(struct smu_context *smu,
- bool no_hw_disablement,
enum smu_feature_mask mask);
/**
@@ -1283,6 +1282,16 @@ struct pptable_funcs {
* @stb_collect_info: Collects Smart Trace Buffers data.
*/
int (*stb_collect_info)(struct smu_context *smu, void *buf, uint32_t size);
+
+ /**
+ * @get_default_config_table_settings: Get the ASIC default DriverSmuConfig table settings.
+ */
+ int (*get_default_config_table_settings)(struct smu_context *smu, struct config_table_setting *table);
+
+ /**
+ * @set_config_table: Apply the input DriverSmuConfig table settings.
+ */
+ int (*set_config_table)(struct smu_context *smu, struct config_table_setting *table);
};
typedef enum {
@@ -1395,10 +1404,6 @@ int smu_mode1_reset(struct smu_context *smu);
extern const struct amd_ip_funcs smu_ip_funcs;
-extern const struct amdgpu_ip_block_version smu_v11_0_ip_block;
-extern const struct amdgpu_ip_block_version smu_v12_0_ip_block;
-extern const struct amdgpu_ip_block_version smu_v13_0_ip_block;
-
bool is_support_sw_smu(struct amdgpu_device *adev);
bool is_support_cclk_dpm(struct amdgpu_device *adev);
int smu_write_watermarks_table(struct smu_context *smu);
@@ -1413,15 +1418,15 @@ int smu_set_ac_dc(struct smu_context *smu);
int smu_allow_xgmi_power_down(struct smu_context *smu, bool en);
-int smu_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value);
+int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value);
int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable);
-int smu_wait_for_event(struct amdgpu_device *adev, enum smu_event_type event,
+int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event,
uint64_t event_arg);
int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc);
int smu_stb_collect_info(struct smu_context *smu, void *buff, uint32_t size);
void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev);
-
+int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size);
#endif
#endif
diff --git a/drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/aldebaran_ppsmc.h
index ab66a4b9e438..ab66a4b9e438 100644
--- a/drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/aldebaran_ppsmc.h
diff --git a/drivers/gpu/drm/amd/pm/inc/arcturus_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/arcturus_ppsmc.h
index 45f5d29bc705..45f5d29bc705 100644
--- a/drivers/gpu/drm/amd/pm/inc/arcturus_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/arcturus_ppsmc.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_arcturus.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_arcturus.h
index 43d43d6addc0..43d43d6addc0 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_arcturus.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_arcturus.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_cyan_skillfish.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_cyan_skillfish.h
index 4884a4e1f261..4884a4e1f261 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_cyan_skillfish.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_cyan_skillfish.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_navi10.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_navi10.h
index 04752ade1016..04752ade1016 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_navi10.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_navi10.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_sienna_cichlid.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_sienna_cichlid.h
index 63b8701fd466..b253be602cc2 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_sienna_cichlid.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_sienna_cichlid.h
@@ -27,7 +27,7 @@
// *** IMPORTANT ***
// SMU TEAM: Always increment the interface version if
// any structure is changed in this file
-#define SMU11_DRIVER_IF_VERSION 0x3B
+#define SMU11_DRIVER_IF_VERSION 0x40
#define PPTABLE_Sienna_Cichlid_SMU_VERSION 7
@@ -172,6 +172,7 @@ typedef enum {
#define DPM_OVERRIDE_DISABLE_FAST_FCLK_TIMER 0x00001000
#define DPM_OVERRIDE_DISABLE_VCN_PG 0x00002000
#define DPM_OVERRIDE_DISABLE_FMAX_VMAX 0x00004000
+#define DPM_OVERRIDE_ENABLE_eGPU_USB_WA 0x00008000
// VR Mapping Bit Defines
#define VR_MAPPING_VR_SELECT_MASK 0x01
@@ -263,7 +264,22 @@ typedef enum {
#define LED_DISPLAY_ERROR_BIT 2
//RLC Pace Table total number of levels
-#define RLC_PACE_TABLE_NUM_LEVELS 16
+#define RLC_PACE_TABLE_NUM_LEVELS 16
+#define SIENNA_CICHLID_UMC_CHANNEL_NUM 16
+
+typedef struct {
+ uint64_t mca_umc_status;
+ uint64_t mca_umc_addr;
+
+ uint16_t ce_count_lo_chip;
+ uint16_t ce_count_hi_chip;
+
+ uint32_t eccPadding;
+} EccInfo_t;
+
+typedef struct {
+ EccInfo_t EccInfo[SIENNA_CICHLID_UMC_CHANNEL_NUM];
+} EccInfoTable_t;
typedef enum {
DRAM_BIT_WIDTH_DISABLED = 0,
@@ -283,6 +299,7 @@ typedef enum {
#define MAX_SW_I2C_COMMANDS 24
+
typedef enum {
I2C_CONTROLLER_PORT_0 = 0, //CKSVII2C0
I2C_CONTROLLER_PORT_1 = 1, //CKSVII2C1
@@ -1672,7 +1689,8 @@ typedef struct {
#define TABLE_OVERDRIVE 8
#define TABLE_I2C_COMMANDS 9
#define TABLE_PACE 10
-#define TABLE_COUNT 11
+#define TABLE_ECCINFO 11
+#define TABLE_COUNT 12
typedef struct {
float FlopsPerByteTable[RLC_PACE_TABLE_NUM_LEVELS];
diff --git a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_vangogh.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_vangogh.h
index 8361ebd8d876..8361ebd8d876 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_vangogh.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_vangogh.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu12_driver_if.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu12_driver_if.h
index e9315eb5b48e..e9315eb5b48e 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu12_driver_if.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu12_driver_if.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_aldebaran.h
index 0f67c56c2863..0f67c56c2863 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_aldebaran.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu13_driver_if_yellow_carp.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_yellow_carp.h
index 25540cb28208..25540cb28208 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu13_driver_if_yellow_carp.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_yellow_carp.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_0_7_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_0_7_ppsmc.h
index d2e10a724560..d2e10a724560 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v11_0_7_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_0_7_ppsmc.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_0_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_0_ppsmc.h
index 26181b679098..26181b679098 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v11_0_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_0_ppsmc.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_5_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_5_pmfw.h
index 22edd88b8117..22edd88b8117 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v11_5_pmfw.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_5_pmfw.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_5_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_5_ppsmc.h
index fe130a497d6c..fe130a497d6c 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v11_5_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_5_ppsmc.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_8_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_8_pmfw.h
index bd4fcb6b9610..bd4fcb6b9610 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v11_8_pmfw.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_8_pmfw.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_8_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_8_ppsmc.h
index 909a86aa60f3..909a86aa60f3 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v11_8_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_8_ppsmc.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v12_0_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v12_0_ppsmc.h
index 9ac9f3bd3664..9ac9f3bd3664 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v12_0_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v12_0_ppsmc.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_1_pmfw.h
index c5e26d619bf0..c5e26d619bf0 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_pmfw.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_1_pmfw.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_1_ppsmc.h
index fc9198846e70..fc9198846e70 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_1_ppsmc.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_11_0_cdr_table.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_11_0_cdr_table.h
index beab6d7b28b7..beab6d7b28b7 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_11_0_cdr_table.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_11_0_cdr_table.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
index ff8a0bcbd290..ff8a0bcbd290 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
index acb3be292096..acb3be292096 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_0_7_pptable.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_7_pptable.h
index 247c6e9632ba..247c6e9632ba 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v11_0_7_pptable.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_7_pptable.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_0_pptable.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_pptable.h
index 7a63cf8e85ed..7a63cf8e85ed 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v11_0_pptable.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_pptable.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v12_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v12_0.h
index 1ad2dff71090..1ad2dff71090 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v12_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v12_0.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index 44af23ae059e..44af23ae059e 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_pptable.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_pptable.h
index 1f311396b706..1f311396b706 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_pptable.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_pptable.h
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index 505d2fb94fd9..201563072189 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -25,6 +25,7 @@
#include <linux/firmware.h>
#include "amdgpu.h"
+#include "amdgpu_dpm.h"
#include "amdgpu_smu.h"
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
@@ -33,7 +34,6 @@
#include "smu11_driver_if_arcturus.h"
#include "soc15_common.h"
#include "atom.h"
-#include "power_state.h"
#include "arcturus_ppt.h"
#include "smu_v11_0_pptable.h"
#include "arcturus_ppsmc.h"
@@ -57,8 +57,6 @@
#undef pr_info
#undef pr_debug
-#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
-
#define ARCTURUS_FEA_MAP(smu_feature, arcturus_feature) \
[smu_feature] = {1, (arcturus_feature)}
@@ -603,15 +601,11 @@ static int arcturus_get_smu_metrics_data(struct smu_context *smu,
SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
int ret = 0;
- mutex_lock(&smu->metrics_lock);
-
- ret = smu_cmn_get_metrics_table_locked(smu,
- NULL,
- false);
- if (ret) {
- mutex_unlock(&smu->metrics_lock);
+ ret = smu_cmn_get_metrics_table(smu,
+ NULL,
+ false);
+ if (ret)
return ret;
- }
switch (member) {
case METRICS_CURR_GFXCLK:
@@ -694,8 +688,6 @@ static int arcturus_get_smu_metrics_data(struct smu_context *smu,
break;
}
- mutex_unlock(&smu->metrics_lock);
-
return ret;
}
@@ -1120,7 +1112,6 @@ static int arcturus_read_sensor(struct smu_context *smu,
if (!data || !size)
return -EINVAL;
- mutex_lock(&smu->sensor_lock);
switch (sensor) {
case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
*(uint32_t *)data = pptable->FanMaximumRpm;
@@ -1181,7 +1172,6 @@ static int arcturus_read_sensor(struct smu_context *smu,
ret = -EOPNOTSUPP;
break;
}
- mutex_unlock(&smu->sensor_lock);
return ret;
}
@@ -2031,15 +2021,12 @@ static void arcturus_dump_pptable(struct smu_context *smu)
static bool arcturus_is_dpm_running(struct smu_context *smu)
{
int ret = 0;
- uint32_t feature_mask[2];
uint64_t feature_enabled;
- ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2);
+ ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
if (ret)
return false;
- feature_enabled = (uint64_t)feature_mask[1] << 32 | feature_mask[0];
-
return !!(feature_enabled & SMC_DPM_FEATURE);
}
@@ -2071,18 +2058,23 @@ static int arcturus_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
static int arcturus_i2c_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg *msg, int num_msgs)
{
- struct amdgpu_device *adev = to_amdgpu_device(i2c_adap);
- struct smu_table_context *smu_table = &adev->smu.smu_table;
+ struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c_adap);
+ struct amdgpu_device *adev = smu_i2c->adev;
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *table = &smu_table->driver_table;
SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr;
int i, j, r, c;
u16 dir;
+ if (!adev->pm.dpm_enabled)
+ return -EBUSY;
+
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
- req->I2CcontrollerPort = 0;
+ req->I2CcontrollerPort = smu_i2c->port;
req->I2CSpeed = I2C_SPEED_FAST_400K;
req->SlaveAddress = msg[0].addr << 1; /* wants an 8-bit address */
dir = msg[0].flags & I2C_M_RD;
@@ -2118,9 +2110,9 @@ static int arcturus_i2c_xfer(struct i2c_adapter *i2c_adap,
}
}
}
- mutex_lock(&adev->smu.mutex);
- r = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
- mutex_unlock(&adev->smu.mutex);
+ mutex_lock(&adev->pm.mutex);
+ r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
+ mutex_unlock(&adev->pm.mutex);
if (r)
goto fail;
@@ -2161,28 +2153,60 @@ static const struct i2c_adapter_quirks arcturus_i2c_control_quirks = {
.max_comb_2nd_msg_len = MAX_SW_I2C_COMMANDS - 2,
};
-static int arcturus_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control)
+static int arcturus_i2c_control_init(struct smu_context *smu)
{
- struct amdgpu_device *adev = to_amdgpu_device(control);
- int res;
+ struct amdgpu_device *adev = smu->adev;
+ int res, i;
+
+ for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
+ struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
+ struct i2c_adapter *control = &smu_i2c->adapter;
+
+ smu_i2c->adev = adev;
+ smu_i2c->port = i;
+ mutex_init(&smu_i2c->mutex);
+ control->owner = THIS_MODULE;
+ control->class = I2C_CLASS_HWMON;
+ control->dev.parent = &adev->pdev->dev;
+ control->algo = &arcturus_i2c_algo;
+ control->quirks = &arcturus_i2c_control_quirks;
+ snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i);
+ i2c_set_adapdata(control, smu_i2c);
+
+ res = i2c_add_adapter(control);
+ if (res) {
+ DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
+ goto Out_err;
+ }
+ }
- control->owner = THIS_MODULE;
- control->class = I2C_CLASS_HWMON;
- control->dev.parent = &adev->pdev->dev;
- control->algo = &arcturus_i2c_algo;
- control->quirks = &arcturus_i2c_control_quirks;
- snprintf(control->name, sizeof(control->name), "AMDGPU SMU");
+ adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
+ adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[1].adapter;
- res = i2c_add_adapter(control);
- if (res)
- DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
+ return 0;
+Out_err:
+ for ( ; i >= 0; i--) {
+ struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
+ struct i2c_adapter *control = &smu_i2c->adapter;
+ i2c_del_adapter(control);
+ }
return res;
}
-static void arcturus_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control)
+static void arcturus_i2c_control_fini(struct smu_context *smu)
{
- i2c_del_adapter(control);
+ struct amdgpu_device *adev = smu->adev;
+ int i;
+
+ for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
+ struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
+ struct i2c_adapter *control = &smu_i2c->adapter;
+
+ i2c_del_adapter(control);
+ }
+ adev->pm.ras_eeprom_i2c_bus = NULL;
+ adev->pm.fru_eeprom_i2c_bus = NULL;
}
static void arcturus_get_unique_id(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
index 2238ee19c222..b3a0f3fb3e65 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
@@ -125,22 +125,6 @@ static int cyan_skillfish_init_smc_tables(struct smu_context *smu)
return smu_v11_0_init_smc_tables(smu);
}
-static int cyan_skillfish_finit_smc_tables(struct smu_context *smu)
-{
- struct smu_table_context *smu_table = &smu->smu_table;
-
- kfree(smu_table->metrics_table);
- smu_table->metrics_table = NULL;
-
- kfree(smu_table->gpu_metrics_table);
- smu_table->gpu_metrics_table = NULL;
- smu_table->gpu_metrics_table_size = 0;
-
- smu_table->metrics_time = 0;
-
- return 0;
-}
-
static int
cyan_skillfish_get_smu_metrics_data(struct smu_context *smu,
MetricsMember_t member,
@@ -150,13 +134,9 @@ cyan_skillfish_get_smu_metrics_data(struct smu_context *smu,
SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
int ret = 0;
- mutex_lock(&smu->metrics_lock);
-
- ret = smu_cmn_get_metrics_table_locked(smu, NULL, false);
- if (ret) {
- mutex_unlock(&smu->metrics_lock);
+ ret = smu_cmn_get_metrics_table(smu, NULL, false);
+ if (ret)
return ret;
- }
switch (member) {
case METRICS_CURR_GFXCLK:
@@ -200,8 +180,6 @@ cyan_skillfish_get_smu_metrics_data(struct smu_context *smu,
break;
}
- mutex_unlock(&smu->metrics_lock);
-
return ret;
}
@@ -215,8 +193,6 @@ static int cyan_skillfish_read_sensor(struct smu_context *smu,
if (!data || !size)
return -EINVAL;
- mutex_lock(&smu->sensor_lock);
-
switch (sensor) {
case AMDGPU_PP_SENSOR_GFX_SCLK:
ret = cyan_skillfish_get_smu_metrics_data(smu,
@@ -267,8 +243,6 @@ static int cyan_skillfish_read_sensor(struct smu_context *smu,
break;
}
- mutex_unlock(&smu->sensor_lock);
-
return ret;
}
@@ -376,20 +350,16 @@ static bool cyan_skillfish_is_dpm_running(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
int ret = 0;
- uint32_t feature_mask[2];
uint64_t feature_enabled;
/* we need to re-init after suspend so return false */
if (adev->in_suspend)
return false;
- ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2);
+ ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
if (ret)
return false;
- feature_enabled = (uint64_t)feature_mask[0] |
- ((uint64_t)feature_mask[1] << 32);
-
/*
* cyan_skillfish specific, query default sclk inseted of hard code.
*/
@@ -552,6 +522,36 @@ static int cyan_skillfish_od_edit_dpm_table(struct smu_context *smu,
return ret;
}
+static int cyan_skillfish_get_dpm_ultimate_freq(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *min,
+ uint32_t *max)
+{
+ int ret = 0;
+ uint32_t low, high;
+
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ low = CYAN_SKILLFISH_SCLK_MIN;
+ high = CYAN_SKILLFISH_SCLK_MAX;
+ break;
+ default:
+ ret = cyan_skillfish_get_current_clk_freq(smu, clk_type, &low);
+ if (ret)
+ return ret;
+ high = low;
+ break;
+ }
+
+ if (min)
+ *min = low;
+ if (max)
+ *max = high;
+
+ return 0;
+}
+
static const struct pptable_funcs cyan_skillfish_ppt_funcs = {
.check_fw_status = smu_v11_0_check_fw_status,
@@ -559,12 +559,14 @@ static const struct pptable_funcs cyan_skillfish_ppt_funcs = {
.init_power = smu_v11_0_init_power,
.fini_power = smu_v11_0_fini_power,
.init_smc_tables = cyan_skillfish_init_smc_tables,
- .fini_smc_tables = cyan_skillfish_finit_smc_tables,
+ .fini_smc_tables = smu_v11_0_fini_smc_tables,
.read_sensor = cyan_skillfish_read_sensor,
.print_clk_levels = cyan_skillfish_print_clk_levels,
+ .get_enabled_mask = smu_cmn_get_enabled_mask,
.is_dpm_running = cyan_skillfish_is_dpm_running,
.get_gpu_metrics = cyan_skillfish_get_gpu_metrics,
.od_edit_dpm_table = cyan_skillfish_od_edit_dpm_table,
+ .get_dpm_ultimate_freq = cyan_skillfish_get_dpm_ultimate_freq,
.register_irq_handler = smu_v11_0_register_irq_handler,
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
.send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 2bb7816b245a..5f22fc3430f4 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -27,6 +27,7 @@
#include <linux/pci.h>
#include <linux/i2c.h>
#include "amdgpu.h"
+#include "amdgpu_dpm.h"
#include "amdgpu_smu.h"
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
@@ -57,8 +58,6 @@
#undef pr_info
#undef pr_debug
-#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
-
#define FEATURE_MASK(feature) (1ULL << feature)
#define SMC_DPM_FEATURE ( \
FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT) | \
@@ -511,6 +510,8 @@ static int navi10_tables_init(struct smu_context *smu)
SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF,
sizeof(DpmActivityMonitorCoeffInt_t), PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, SMU_TABLE_DRIVER_SMU_CONFIG, sizeof(DriverSmuConfig_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_NV1X_t),
GFP_KERNEL);
@@ -527,8 +528,15 @@ static int navi10_tables_init(struct smu_context *smu)
if (!smu_table->watermarks_table)
goto err2_out;
+ smu_table->driver_smu_config_table =
+ kzalloc(tables[SMU_TABLE_DRIVER_SMU_CONFIG].size, GFP_KERNEL);
+ if (!smu_table->driver_smu_config_table)
+ goto err3_out;
+
return 0;
+err3_out:
+ kfree(smu_table->watermarks_table);
err2_out:
kfree(smu_table->gpu_metrics_table);
err1_out:
@@ -546,15 +554,11 @@ static int navi10_get_legacy_smu_metrics_data(struct smu_context *smu,
(SmuMetrics_legacy_t *)smu_table->metrics_table;
int ret = 0;
- mutex_lock(&smu->metrics_lock);
-
- ret = smu_cmn_get_metrics_table_locked(smu,
- NULL,
- false);
- if (ret) {
- mutex_unlock(&smu->metrics_lock);
+ ret = smu_cmn_get_metrics_table(smu,
+ NULL,
+ false);
+ if (ret)
return ret;
- }
switch (member) {
case METRICS_CURR_GFXCLK:
@@ -624,8 +628,6 @@ static int navi10_get_legacy_smu_metrics_data(struct smu_context *smu,
break;
}
- mutex_unlock(&smu->metrics_lock);
-
return ret;
}
@@ -638,15 +640,11 @@ static int navi10_get_smu_metrics_data(struct smu_context *smu,
(SmuMetrics_t *)smu_table->metrics_table;
int ret = 0;
- mutex_lock(&smu->metrics_lock);
-
- ret = smu_cmn_get_metrics_table_locked(smu,
- NULL,
- false);
- if (ret) {
- mutex_unlock(&smu->metrics_lock);
+ ret = smu_cmn_get_metrics_table(smu,
+ NULL,
+ false);
+ if (ret)
return ret;
- }
switch (member) {
case METRICS_CURR_GFXCLK:
@@ -719,8 +717,6 @@ static int navi10_get_smu_metrics_data(struct smu_context *smu,
break;
}
- mutex_unlock(&smu->metrics_lock);
-
return ret;
}
@@ -733,15 +729,11 @@ static int navi12_get_legacy_smu_metrics_data(struct smu_context *smu,
(SmuMetrics_NV12_legacy_t *)smu_table->metrics_table;
int ret = 0;
- mutex_lock(&smu->metrics_lock);
-
- ret = smu_cmn_get_metrics_table_locked(smu,
- NULL,
- false);
- if (ret) {
- mutex_unlock(&smu->metrics_lock);
+ ret = smu_cmn_get_metrics_table(smu,
+ NULL,
+ false);
+ if (ret)
return ret;
- }
switch (member) {
case METRICS_CURR_GFXCLK:
@@ -811,8 +803,6 @@ static int navi12_get_legacy_smu_metrics_data(struct smu_context *smu,
break;
}
- mutex_unlock(&smu->metrics_lock);
-
return ret;
}
@@ -825,15 +815,11 @@ static int navi12_get_smu_metrics_data(struct smu_context *smu,
(SmuMetrics_NV12_t *)smu_table->metrics_table;
int ret = 0;
- mutex_lock(&smu->metrics_lock);
-
- ret = smu_cmn_get_metrics_table_locked(smu,
- NULL,
- false);
- if (ret) {
- mutex_unlock(&smu->metrics_lock);
+ ret = smu_cmn_get_metrics_table(smu,
+ NULL,
+ false);
+ if (ret)
return ret;
- }
switch (member) {
case METRICS_CURR_GFXCLK:
@@ -906,8 +892,6 @@ static int navi12_get_smu_metrics_data(struct smu_context *smu,
break;
}
- mutex_unlock(&smu->metrics_lock);
-
return ret;
}
@@ -1261,6 +1245,215 @@ static void navi10_od_setting_get_range(struct smu_11_0_overdrive_table *od_tabl
*max = od_table->max[setting];
}
+static int navi10_emit_clk_levels(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ char *buf,
+ int *offset)
+{
+ uint16_t *curve_settings;
+ int ret = 0;
+ uint32_t cur_value = 0, value = 0;
+ uint32_t freq_values[3] = {0};
+ uint32_t i, levels, mark_index = 0, count = 0;
+ struct smu_table_context *table_context = &smu->smu_table;
+ uint32_t gen_speed, lane_width;
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context;
+ PPTable_t *pptable = (PPTable_t *)table_context->driver_pptable;
+ OverDriveTable_t *od_table =
+ (OverDriveTable_t *)table_context->overdrive_table;
+ struct smu_11_0_overdrive_table *od_settings = smu->od_settings;
+ uint32_t min_value, max_value;
+
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ case SMU_SOCCLK:
+ case SMU_MCLK:
+ case SMU_UCLK:
+ case SMU_FCLK:
+ case SMU_VCLK:
+ case SMU_DCLK:
+ case SMU_DCEFCLK:
+ ret = navi10_get_current_clk_freq_by_table(smu, clk_type, &cur_value);
+ if (ret)
+ return ret;
+
+ ret = smu_v11_0_get_dpm_level_count(smu, clk_type, &count);
+ if (ret)
+ return ret;
+
+ if (!navi10_is_support_fine_grained_dpm(smu, clk_type)) {
+ for (i = 0; i < count; i++) {
+ ret = smu_v11_0_get_dpm_freq_by_index(smu,
+ clk_type, i, &value);
+ if (ret)
+ return ret;
+
+ *offset += sysfs_emit_at(buf, *offset,
+ "%d: %uMhz %s\n",
+ i, value,
+ cur_value == value ? "*" : "");
+ }
+ } else {
+ ret = smu_v11_0_get_dpm_freq_by_index(smu,
+ clk_type, 0, &freq_values[0]);
+ if (ret)
+ return ret;
+ ret = smu_v11_0_get_dpm_freq_by_index(smu,
+ clk_type,
+ count - 1,
+ &freq_values[2]);
+ if (ret)
+ return ret;
+
+ freq_values[1] = cur_value;
+ mark_index = cur_value == freq_values[0] ? 0 :
+ cur_value == freq_values[2] ? 2 : 1;
+
+ levels = 3;
+ if (mark_index != 1) {
+ levels = 2;
+ freq_values[1] = freq_values[2];
+ }
+
+ for (i = 0; i < levels; i++) {
+ *offset += sysfs_emit_at(buf, *offset,
+ "%d: %uMhz %s\n",
+ i, freq_values[i],
+ i == mark_index ? "*" : "");
+ }
+ }
+ break;
+ case SMU_PCIE:
+ gen_speed = smu_v11_0_get_current_pcie_link_speed_level(smu);
+ lane_width = smu_v11_0_get_current_pcie_link_width_level(smu);
+ for (i = 0; i < NUM_LINK_LEVELS; i++) {
+ *offset += sysfs_emit_at(buf, *offset, "%d: %s %s %dMhz %s\n", i,
+ (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 0) ? "2.5GT/s," :
+ (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 1) ? "5.0GT/s," :
+ (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 2) ? "8.0GT/s," :
+ (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 3) ? "16.0GT/s," : "",
+ (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 1) ? "x1" :
+ (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 2) ? "x2" :
+ (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 3) ? "x4" :
+ (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 4) ? "x8" :
+ (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 5) ? "x12" :
+ (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 6) ? "x16" : "",
+ pptable->LclkFreq[i],
+ (gen_speed == dpm_context->dpm_tables.pcie_table.pcie_gen[i]) &&
+ (lane_width == dpm_context->dpm_tables.pcie_table.pcie_lane[i]) ?
+ "*" : "");
+ }
+ break;
+ case SMU_OD_SCLK:
+ if (!smu->od_enabled || !od_table || !od_settings)
+ return -EOPNOTSUPP;
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS))
+ break;
+ *offset += sysfs_emit_at(buf, *offset, "OD_SCLK:\n0: %uMhz\n1: %uMhz\n",
+ od_table->GfxclkFmin, od_table->GfxclkFmax);
+ break;
+ case SMU_OD_MCLK:
+ if (!smu->od_enabled || !od_table || !od_settings)
+ return -EOPNOTSUPP;
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX))
+ break;
+ *offset += sysfs_emit_at(buf, *offset, "OD_MCLK:\n1: %uMHz\n", od_table->UclkFmax);
+ break;
+ case SMU_OD_VDDC_CURVE:
+ if (!smu->od_enabled || !od_table || !od_settings)
+ return -EOPNOTSUPP;
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE))
+ break;
+ *offset += sysfs_emit_at(buf, *offset, "OD_VDDC_CURVE:\n");
+ for (i = 0; i < 3; i++) {
+ switch (i) {
+ case 0:
+ curve_settings = &od_table->GfxclkFreq1;
+ break;
+ case 1:
+ curve_settings = &od_table->GfxclkFreq2;
+ break;
+ case 2:
+ curve_settings = &od_table->GfxclkFreq3;
+ break;
+ default:
+ break;
+ }
+ *offset += sysfs_emit_at(buf, *offset, "%d: %uMHz %umV\n",
+ i, curve_settings[0],
+ curve_settings[1] / NAVI10_VOLTAGE_SCALE);
+ }
+ break;
+ case SMU_OD_RANGE:
+ if (!smu->od_enabled || !od_table || !od_settings)
+ return -EOPNOTSUPP;
+ *offset += sysfs_emit_at(buf, *offset, "%s:\n", "OD_RANGE");
+
+ if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS)) {
+ navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMIN,
+ &min_value, NULL);
+ navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMAX,
+ NULL, &max_value);
+ *offset += sysfs_emit_at(buf, *offset, "SCLK: %7uMhz %10uMhz\n",
+ min_value, max_value);
+ }
+
+ if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX)) {
+ navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_UCLKFMAX,
+ &min_value, &max_value);
+ *offset += sysfs_emit_at(buf, *offset, "MCLK: %7uMhz %10uMhz\n",
+ min_value, max_value);
+ }
+
+ if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE)) {
+ navi10_od_setting_get_range(od_settings,
+ SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P1,
+ &min_value, &max_value);
+ *offset += sysfs_emit_at(buf, *offset,
+ "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
+ min_value, max_value);
+ navi10_od_setting_get_range(od_settings,
+ SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P1,
+ &min_value, &max_value);
+ *offset += sysfs_emit_at(buf, *offset,
+ "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n",
+ min_value, max_value);
+ navi10_od_setting_get_range(od_settings,
+ SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P2,
+ &min_value, &max_value);
+ *offset += sysfs_emit_at(buf, *offset,
+ "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n",
+ min_value, max_value);
+ navi10_od_setting_get_range(od_settings,
+ SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P2,
+ &min_value, &max_value);
+ *offset += sysfs_emit_at(buf, *offset,
+ "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n",
+ min_value, max_value);
+ navi10_od_setting_get_range(od_settings,
+ SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P3,
+ &min_value, &max_value);
+ *offset += sysfs_emit_at(buf, *offset,
+ "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n",
+ min_value, max_value);
+ navi10_od_setting_get_range(od_settings,
+ SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P3,
+ &min_value, &max_value);
+ *offset += sysfs_emit_at(buf, *offset,
+ "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n",
+ min_value, max_value);
+ }
+
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static int navi10_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
@@ -1649,8 +1842,8 @@ static int navi10_display_config_changed(struct smu_context *smu)
int ret = 0;
if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
- smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
- smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
+ smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
+ smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays,
smu->display_config->num_display,
NULL);
@@ -1664,15 +1857,12 @@ static int navi10_display_config_changed(struct smu_context *smu)
static bool navi10_is_dpm_running(struct smu_context *smu)
{
int ret = 0;
- uint32_t feature_mask[2];
uint64_t feature_enabled;
- ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2);
+ ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
if (ret)
return false;
- feature_enabled = (uint64_t)feature_mask[1] << 32 | feature_mask[0];
-
return !!(feature_enabled & SMC_DPM_FEATURE);
}
@@ -1888,13 +2078,13 @@ static int navi10_notify_smc_display_config(struct smu_context *smu)
min_clocks.dcef_clock_in_sr = smu->display_config->min_dcef_deep_sleep_set_clk;
min_clocks.memory_clock = smu->display_config->min_mem_set_clock;
- if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
clock_req.clock_type = amd_pp_dcef_clock;
clock_req.clock_freq_in_khz = min_clocks.dcef_clock * 10;
ret = smu_v11_0_display_clock_voltage_request(smu, &clock_req);
if (!ret) {
- if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetMinDeepSleepDcefclk,
min_clocks.dcef_clock_in_sr/100,
@@ -1988,7 +2178,6 @@ static int navi10_read_sensor(struct smu_context *smu,
if(!data || !size)
return -EINVAL;
- mutex_lock(&smu->sensor_lock);
switch (sensor) {
case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
*(uint32_t *)data = pptable->FanMaximumRpm;
@@ -2048,7 +2237,6 @@ static int navi10_read_sensor(struct smu_context *smu,
ret = -EOPNOTSUPP;
break;
}
- mutex_unlock(&smu->sensor_lock);
return ret;
}
@@ -2708,20 +2896,14 @@ static ssize_t navi10_get_legacy_gpu_metrics(struct smu_context *smu,
SmuMetrics_legacy_t metrics;
int ret = 0;
- mutex_lock(&smu->metrics_lock);
-
- ret = smu_cmn_get_metrics_table_locked(smu,
- NULL,
- true);
- if (ret) {
- mutex_unlock(&smu->metrics_lock);
+ ret = smu_cmn_get_metrics_table(smu,
+ NULL,
+ true);
+ if (ret)
return ret;
- }
memcpy(&metrics, smu_table->metrics_table, sizeof(SmuMetrics_legacy_t));
- mutex_unlock(&smu->metrics_lock);
-
smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
gpu_metrics->temperature_edge = metrics.TemperatureEdge;
@@ -2778,18 +2960,23 @@ static ssize_t navi10_get_legacy_gpu_metrics(struct smu_context *smu,
static int navi10_i2c_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg *msg, int num_msgs)
{
- struct amdgpu_device *adev = to_amdgpu_device(i2c_adap);
- struct smu_table_context *smu_table = &adev->smu.smu_table;
+ struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c_adap);
+ struct amdgpu_device *adev = smu_i2c->adev;
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *table = &smu_table->driver_table;
SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr;
int i, j, r, c;
u16 dir;
+ if (!adev->pm.dpm_enabled)
+ return -EBUSY;
+
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
- req->I2CcontrollerPort = 0;
+ req->I2CcontrollerPort = smu_i2c->port;
req->I2CSpeed = I2C_SPEED_FAST_400K;
req->SlaveAddress = msg[0].addr << 1; /* wants an 8-bit address */
dir = msg[0].flags & I2C_M_RD;
@@ -2825,9 +3012,9 @@ static int navi10_i2c_xfer(struct i2c_adapter *i2c_adap,
}
}
}
- mutex_lock(&adev->smu.mutex);
- r = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
- mutex_unlock(&adev->smu.mutex);
+ mutex_lock(&adev->pm.mutex);
+ r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
+ mutex_unlock(&adev->pm.mutex);
if (r)
goto fail;
@@ -2867,28 +3054,60 @@ static const struct i2c_adapter_quirks navi10_i2c_control_quirks = {
.max_comb_2nd_msg_len = MAX_SW_I2C_COMMANDS - 2,
};
-static int navi10_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control)
+static int navi10_i2c_control_init(struct smu_context *smu)
{
- struct amdgpu_device *adev = to_amdgpu_device(control);
- int res;
+ struct amdgpu_device *adev = smu->adev;
+ int res, i;
+
+ for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
+ struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
+ struct i2c_adapter *control = &smu_i2c->adapter;
+
+ smu_i2c->adev = adev;
+ smu_i2c->port = i;
+ mutex_init(&smu_i2c->mutex);
+ control->owner = THIS_MODULE;
+ control->class = I2C_CLASS_HWMON;
+ control->dev.parent = &adev->pdev->dev;
+ control->algo = &navi10_i2c_algo;
+ snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i);
+ control->quirks = &navi10_i2c_control_quirks;
+ i2c_set_adapdata(control, smu_i2c);
+
+ res = i2c_add_adapter(control);
+ if (res) {
+ DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
+ goto Out_err;
+ }
+ }
- control->owner = THIS_MODULE;
- control->class = I2C_CLASS_HWMON;
- control->dev.parent = &adev->pdev->dev;
- control->algo = &navi10_i2c_algo;
- snprintf(control->name, sizeof(control->name), "AMDGPU SMU");
- control->quirks = &navi10_i2c_control_quirks;
+ adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
+ adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[1].adapter;
- res = i2c_add_adapter(control);
- if (res)
- DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
+ return 0;
+Out_err:
+ for ( ; i >= 0; i--) {
+ struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
+ struct i2c_adapter *control = &smu_i2c->adapter;
+ i2c_del_adapter(control);
+ }
return res;
}
-static void navi10_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control)
+static void navi10_i2c_control_fini(struct smu_context *smu)
{
- i2c_del_adapter(control);
+ struct amdgpu_device *adev = smu->adev;
+ int i;
+
+ for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
+ struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
+ struct i2c_adapter *control = &smu_i2c->adapter;
+
+ i2c_del_adapter(control);
+ }
+ adev->pm.ras_eeprom_i2c_bus = NULL;
+ adev->pm.fru_eeprom_i2c_bus = NULL;
}
static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
@@ -2900,20 +3119,14 @@ static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
SmuMetrics_t metrics;
int ret = 0;
- mutex_lock(&smu->metrics_lock);
-
- ret = smu_cmn_get_metrics_table_locked(smu,
- NULL,
- true);
- if (ret) {
- mutex_unlock(&smu->metrics_lock);
+ ret = smu_cmn_get_metrics_table(smu,
+ NULL,
+ true);
+ if (ret)
return ret;
- }
memcpy(&metrics, smu_table->metrics_table, sizeof(SmuMetrics_t));
- mutex_unlock(&smu->metrics_lock);
-
smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
gpu_metrics->temperature_edge = metrics.TemperatureEdge;
@@ -2978,20 +3191,14 @@ static ssize_t navi12_get_legacy_gpu_metrics(struct smu_context *smu,
SmuMetrics_NV12_legacy_t metrics;
int ret = 0;
- mutex_lock(&smu->metrics_lock);
-
- ret = smu_cmn_get_metrics_table_locked(smu,
- NULL,
- true);
- if (ret) {
- mutex_unlock(&smu->metrics_lock);
+ ret = smu_cmn_get_metrics_table(smu,
+ NULL,
+ true);
+ if (ret)
return ret;
- }
memcpy(&metrics, smu_table->metrics_table, sizeof(SmuMetrics_NV12_legacy_t));
- mutex_unlock(&smu->metrics_lock);
-
smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
gpu_metrics->temperature_edge = metrics.TemperatureEdge;
@@ -3059,20 +3266,14 @@ static ssize_t navi12_get_gpu_metrics(struct smu_context *smu,
SmuMetrics_NV12_t metrics;
int ret = 0;
- mutex_lock(&smu->metrics_lock);
-
- ret = smu_cmn_get_metrics_table_locked(smu,
- NULL,
- true);
- if (ret) {
- mutex_unlock(&smu->metrics_lock);
+ ret = smu_cmn_get_metrics_table(smu,
+ NULL,
+ true);
+ if (ret)
return ret;
- }
memcpy(&metrics, smu_table->metrics_table, sizeof(SmuMetrics_NV12_t));
- mutex_unlock(&smu->metrics_lock);
-
smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
gpu_metrics->temperature_edge = metrics.TemperatureEdge;
@@ -3229,6 +3430,54 @@ static int navi10_post_smu_init(struct smu_context *smu)
return ret;
}
+static int navi10_get_default_config_table_settings(struct smu_context *smu,
+ struct config_table_setting *table)
+{
+ if (!table)
+ return -EINVAL;
+
+ table->gfxclk_average_tau = 10;
+ table->socclk_average_tau = 10;
+ table->uclk_average_tau = 10;
+ table->gfx_activity_average_tau = 10;
+ table->mem_activity_average_tau = 10;
+ table->socket_power_average_tau = 10;
+
+ return 0;
+}
+
+static int navi10_set_config_table(struct smu_context *smu,
+ struct config_table_setting *table)
+{
+ DriverSmuConfig_t driver_smu_config_table;
+
+ if (!table)
+ return -EINVAL;
+
+ memset(&driver_smu_config_table,
+ 0,
+ sizeof(driver_smu_config_table));
+
+ driver_smu_config_table.GfxclkAverageLpfTau =
+ table->gfxclk_average_tau;
+ driver_smu_config_table.SocclkAverageLpfTau =
+ table->socclk_average_tau;
+ driver_smu_config_table.UclkAverageLpfTau =
+ table->uclk_average_tau;
+ driver_smu_config_table.GfxActivityLpfTau =
+ table->gfx_activity_average_tau;
+ driver_smu_config_table.UclkActivityLpfTau =
+ table->mem_activity_average_tau;
+ driver_smu_config_table.SocketPowerLpfTau =
+ table->socket_power_average_tau;
+
+ return smu_cmn_update_table(smu,
+ SMU_TABLE_DRIVER_SMU_CONFIG,
+ 0,
+ (void *)&driver_smu_config_table,
+ true);
+}
+
static const struct pptable_funcs navi10_ppt_funcs = {
.get_allowed_feature_mask = navi10_get_allowed_feature_mask,
.set_default_dpm_table = navi10_set_default_dpm_table,
@@ -3237,6 +3486,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.i2c_init = navi10_i2c_control_init,
.i2c_fini = navi10_i2c_control_fini,
.print_clk_levels = navi10_print_clk_levels,
+ .emit_clk_levels = navi10_emit_clk_levels,
.force_clk_levels = navi10_force_clk_levels,
.populate_umd_state_clk = navi10_populate_umd_state_clk,
.get_clock_by_type_with_latency = navi10_get_clock_by_type_with_latency,
@@ -3317,6 +3567,8 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.post_init = navi10_post_smu_init,
.interrupt_work = smu_v11_0_interrupt_work,
.set_mp1_state = smu_cmn_set_mp1_state,
+ .get_default_config_table_settings = navi10_get_default_config_table_settings,
+ .set_config_table = navi10_set_config_table,
};
void navi10_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 777f717c37ae..d9d634ce9575 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -27,6 +27,7 @@
#include <linux/pci.h>
#include <linux/i2c.h>
#include "amdgpu.h"
+#include "amdgpu_dpm.h"
#include "amdgpu_smu.h"
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
@@ -46,6 +47,7 @@
#include "mp/mp_11_0_sh_mask.h"
#include "asic_reg/mp/mp_11_0_sh_mask.h"
+#include "amdgpu_ras.h"
#include "smu_cmn.h"
/*
@@ -58,8 +60,6 @@
#undef pr_info
#undef pr_debug
-#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
-
#define FEATURE_MASK(feature) (1ULL << feature)
#define SMC_DPM_FEATURE ( \
FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT) | \
@@ -83,6 +83,12 @@
/* STB FIFO depth is in 64bit units */
#define SIENNA_CICHLID_STB_DEPTH_UNIT_BYTES 8
+/*
+ * SMU support ECCTABLE since version 58.70.0,
+ * use this to check whether ECCTABLE feature is supported.
+ */
+#define SUPPORT_ECCTABLE_SMU_VERSION 0x003a4600
+
static int get_table_size(struct smu_context *smu)
{
if (smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 13))
@@ -225,6 +231,7 @@ static struct cmn2asic_mapping sienna_cichlid_table_map[SMU_TABLE_COUNT] = {
TAB_MAP(OVERDRIVE),
TAB_MAP(I2C_COMMANDS),
TAB_MAP(PACE),
+ TAB_MAP(ECCINFO),
};
static struct cmn2asic_mapping sienna_cichlid_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
@@ -341,7 +348,7 @@ sienna_cichlid_get_allowed_feature_mask(struct smu_context *smu,
if (smu->dc_controlled_by_gpio)
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ACDC_BIT);
- if (amdgpu_aspm)
+ if (amdgpu_device_should_use_aspm(adev))
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_LCLK_BIT);
return 0;
@@ -421,6 +428,36 @@ static int sienna_cichlid_store_powerplay_table(struct smu_context *smu)
return 0;
}
+static int sienna_cichlid_patch_pptable_quirk(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t *board_reserved;
+ uint16_t *freq_table_gfx;
+ uint32_t i;
+
+ /* Fix some OEM SKU specific stability issues */
+ GET_PPTABLE_MEMBER(BoardReserved, &board_reserved);
+ if ((adev->pdev->device == 0x73DF) &&
+ (adev->pdev->revision == 0XC3) &&
+ (adev->pdev->subsystem_device == 0x16C2) &&
+ (adev->pdev->subsystem_vendor == 0x1043))
+ board_reserved[0] = 1387;
+
+ GET_PPTABLE_MEMBER(FreqTableGfx, &freq_table_gfx);
+ if ((adev->pdev->device == 0x73DF) &&
+ (adev->pdev->revision == 0XC3) &&
+ ((adev->pdev->subsystem_device == 0x16C2) ||
+ (adev->pdev->subsystem_device == 0x133C)) &&
+ (adev->pdev->subsystem_vendor == 0x1043)) {
+ for (i = 0; i < NUM_GFXCLK_DPM_LEVELS; i++) {
+ if (freq_table_gfx[i] > 2500)
+ freq_table_gfx[i] = 2500;
+ }
+ }
+
+ return 0;
+}
+
static int sienna_cichlid_setup_pptable(struct smu_context *smu)
{
int ret = 0;
@@ -441,7 +478,7 @@ static int sienna_cichlid_setup_pptable(struct smu_context *smu)
if (ret)
return ret;
- return ret;
+ return sienna_cichlid_patch_pptable_quirk(smu);
}
static int sienna_cichlid_tables_init(struct smu_context *smu)
@@ -466,6 +503,10 @@ static int sienna_cichlid_tables_init(struct smu_context *smu)
SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF,
sizeof(DpmActivityMonitorCoeffIntExternal_t), PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, SMU_TABLE_ECCINFO, sizeof(EccInfoTable_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, SMU_TABLE_DRIVER_SMU_CONFIG, sizeof(DriverSmuConfigExternal_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
smu_table->metrics_table = kzalloc(sizeof(SmuMetricsExternal_t), GFP_KERNEL);
if (!smu_table->metrics_table)
@@ -481,8 +522,21 @@ static int sienna_cichlid_tables_init(struct smu_context *smu)
if (!smu_table->watermarks_table)
goto err2_out;
+ smu_table->ecc_table = kzalloc(tables[SMU_TABLE_ECCINFO].size, GFP_KERNEL);
+ if (!smu_table->ecc_table)
+ goto err3_out;
+
+ smu_table->driver_smu_config_table =
+ kzalloc(tables[SMU_TABLE_DRIVER_SMU_CONFIG].size, GFP_KERNEL);
+ if (!smu_table->driver_smu_config_table)
+ goto err4_out;
+
return 0;
+err4_out:
+ kfree(smu_table->ecc_table);
+err3_out:
+ kfree(smu_table->watermarks_table);
err2_out:
kfree(smu_table->gpu_metrics_table);
err1_out:
@@ -525,15 +579,11 @@ static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu,
uint16_t average_gfx_activity;
int ret = 0;
- mutex_lock(&smu->metrics_lock);
-
- ret = smu_cmn_get_metrics_table_locked(smu,
- NULL,
- false);
- if (ret) {
- mutex_unlock(&smu->metrics_lock);
+ ret = smu_cmn_get_metrics_table(smu,
+ NULL,
+ false);
+ if (ret)
return ret;
- }
switch (member) {
case METRICS_CURR_GFXCLK:
@@ -633,8 +683,6 @@ static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu,
break;
}
- mutex_unlock(&smu->metrics_lock);
-
return ret;
}
@@ -1036,10 +1084,6 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
if (ret)
goto print_clk_out;
- /* no need to disable gfxoff when retrieving the current gfxclk */
- if ((clk_type == SMU_GFXCLK) || (clk_type == SMU_SCLK))
- amdgpu_gfx_off_ctrl(adev, false);
-
ret = smu_v11_0_get_dpm_level_count(smu, clk_type, &count);
if (ret)
goto print_clk_out;
@@ -1168,25 +1212,18 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
}
print_clk_out:
- if ((clk_type == SMU_GFXCLK) || (clk_type == SMU_SCLK))
- amdgpu_gfx_off_ctrl(adev, true);
-
return size;
}
static int sienna_cichlid_force_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, uint32_t mask)
{
- struct amdgpu_device *adev = smu->adev;
int ret = 0;
uint32_t soft_min_level = 0, soft_max_level = 0, min_freq = 0, max_freq = 0;
soft_min_level = mask ? (ffs(mask) - 1) : 0;
soft_max_level = mask ? (fls(mask) - 1) : 0;
- if ((clk_type == SMU_GFXCLK) || (clk_type == SMU_SCLK))
- amdgpu_gfx_off_ctrl(adev, false);
-
switch (clk_type) {
case SMU_GFXCLK:
case SMU_SCLK:
@@ -1220,9 +1257,6 @@ static int sienna_cichlid_force_clk_levels(struct smu_context *smu,
}
forec_level_out:
- if ((clk_type == SMU_GFXCLK) || (clk_type == SMU_SCLK))
- amdgpu_gfx_off_ctrl(adev, true);
-
return 0;
}
@@ -1238,21 +1272,37 @@ static int sienna_cichlid_populate_umd_state_clk(struct smu_context *smu)
&dpm_context->dpm_tables.soc_table;
struct smu_umd_pstate_table *pstate_table =
&smu->pstate_table;
+ struct amdgpu_device *adev = smu->adev;
pstate_table->gfxclk_pstate.min = gfx_table->min;
pstate_table->gfxclk_pstate.peak = gfx_table->max;
- if (gfx_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK)
- pstate_table->gfxclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK;
pstate_table->uclk_pstate.min = mem_table->min;
pstate_table->uclk_pstate.peak = mem_table->max;
- if (mem_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK)
- pstate_table->uclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK;
pstate_table->socclk_pstate.min = soc_table->min;
pstate_table->socclk_pstate.peak = soc_table->max;
- if (soc_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK)
+
+ switch (adev->asic_type) {
+ case CHIP_SIENNA_CICHLID:
+ case CHIP_NAVY_FLOUNDER:
+ pstate_table->gfxclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK;
+ pstate_table->uclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK;
pstate_table->socclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK;
+ break;
+ case CHIP_DIMGREY_CAVEFISH:
+ pstate_table->gfxclk_pstate.standard = DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_GFXCLK;
+ pstate_table->uclk_pstate.standard = DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_MEMCLK;
+ pstate_table->socclk_pstate.standard = DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_SOCCLK;
+ break;
+ case CHIP_BEIGE_GOBY:
+ pstate_table->gfxclk_pstate.standard = BEIGE_GOBY_UMD_PSTATE_PROFILING_GFXCLK;
+ pstate_table->uclk_pstate.standard = BEIGE_GOBY_UMD_PSTATE_PROFILING_MEMCLK;
+ pstate_table->socclk_pstate.standard = BEIGE_GOBY_UMD_PSTATE_PROFILING_SOCCLK;
+ break;
+ default:
+ break;
+ }
return 0;
}
@@ -1287,8 +1337,8 @@ static int sienna_cichlid_display_config_changed(struct smu_context *smu)
int ret = 0;
if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
- smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
- smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
+ smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
+ smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
#if 0
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays,
smu->display_config->num_display,
@@ -1304,15 +1354,12 @@ static int sienna_cichlid_display_config_changed(struct smu_context *smu)
static bool sienna_cichlid_is_dpm_running(struct smu_context *smu)
{
int ret = 0;
- uint32_t feature_mask[2];
uint64_t feature_enabled;
- ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2);
+ ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
if (ret)
return false;
- feature_enabled = (uint64_t)feature_mask[1] << 32 | feature_mask[0];
-
return !!(feature_enabled & SMC_DPM_FEATURE);
}
@@ -1527,13 +1574,13 @@ static int sienna_cichlid_notify_smc_display_config(struct smu_context *smu)
min_clocks.dcef_clock_in_sr = smu->display_config->min_dcef_deep_sleep_set_clk;
min_clocks.memory_clock = smu->display_config->min_mem_set_clock;
- if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
clock_req.clock_type = amd_pp_dcef_clock;
clock_req.clock_freq_in_khz = min_clocks.dcef_clock * 10;
ret = smu_v11_0_display_clock_voltage_request(smu, &clock_req);
if (!ret) {
- if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetMinDeepSleepDcefclk,
min_clocks.dcef_clock_in_sr/100,
@@ -1625,7 +1672,6 @@ static int sienna_cichlid_read_sensor(struct smu_context *smu,
if(!data || !size)
return -EINVAL;
- mutex_lock(&smu->sensor_lock);
switch (sensor) {
case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
GET_PPTABLE_MEMBER(FanMaximumRpm, &temp);
@@ -1686,7 +1732,6 @@ static int sienna_cichlid_read_sensor(struct smu_context *smu,
ret = -EOPNOTSUPP;
break;
}
- mutex_unlock(&smu->sensor_lock);
return ret;
}
@@ -1865,16 +1910,7 @@ static int sienna_cichlid_get_dpm_ultimate_freq(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *min, uint32_t *max)
{
- struct amdgpu_device *adev = smu->adev;
- int ret;
-
- if (clk_type == SMU_GFXCLK)
- amdgpu_gfx_off_ctrl(adev, false);
- ret = smu_v11_0_get_dpm_ultimate_freq(smu, clk_type, min, max);
- if (clk_type == SMU_GFXCLK)
- amdgpu_gfx_off_ctrl(adev, true);
-
- return ret;
+ return smu_v11_0_get_dpm_ultimate_freq(smu, clk_type, min, max);
}
static void sienna_cichlid_dump_od_table(struct smu_context *smu,
@@ -3458,18 +3494,23 @@ static void sienna_cichlid_dump_pptable(struct smu_context *smu)
static int sienna_cichlid_i2c_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg *msg, int num_msgs)
{
- struct amdgpu_device *adev = to_amdgpu_device(i2c_adap);
- struct smu_table_context *smu_table = &adev->smu.smu_table;
+ struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c_adap);
+ struct amdgpu_device *adev = smu_i2c->adev;
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *table = &smu_table->driver_table;
SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr;
int i, j, r, c;
u16 dir;
+ if (!adev->pm.dpm_enabled)
+ return -EBUSY;
+
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
- req->I2CcontrollerPort = 1;
+ req->I2CcontrollerPort = smu_i2c->port;
req->I2CSpeed = I2C_SPEED_FAST_400K;
req->SlaveAddress = msg[0].addr << 1; /* wants an 8-bit address */
dir = msg[0].flags & I2C_M_RD;
@@ -3505,9 +3546,9 @@ static int sienna_cichlid_i2c_xfer(struct i2c_adapter *i2c_adap,
}
}
}
- mutex_lock(&adev->smu.mutex);
- r = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
- mutex_unlock(&adev->smu.mutex);
+ mutex_lock(&adev->pm.mutex);
+ r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
+ mutex_unlock(&adev->pm.mutex);
if (r)
goto fail;
@@ -3547,28 +3588,61 @@ static const struct i2c_adapter_quirks sienna_cichlid_i2c_control_quirks = {
.max_comb_2nd_msg_len = MAX_SW_I2C_COMMANDS - 2,
};
-static int sienna_cichlid_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control)
+static int sienna_cichlid_i2c_control_init(struct smu_context *smu)
{
- struct amdgpu_device *adev = to_amdgpu_device(control);
- int res;
-
- control->owner = THIS_MODULE;
- control->class = I2C_CLASS_HWMON;
- control->dev.parent = &adev->pdev->dev;
- control->algo = &sienna_cichlid_i2c_algo;
- snprintf(control->name, sizeof(control->name), "AMDGPU SMU");
- control->quirks = &sienna_cichlid_i2c_control_quirks;
+ struct amdgpu_device *adev = smu->adev;
+ int res, i;
+
+ for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
+ struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
+ struct i2c_adapter *control = &smu_i2c->adapter;
+
+ smu_i2c->adev = adev;
+ smu_i2c->port = i;
+ mutex_init(&smu_i2c->mutex);
+ control->owner = THIS_MODULE;
+ control->class = I2C_CLASS_HWMON;
+ control->dev.parent = &adev->pdev->dev;
+ control->algo = &sienna_cichlid_i2c_algo;
+ snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i);
+ control->quirks = &sienna_cichlid_i2c_control_quirks;
+ i2c_set_adapdata(control, smu_i2c);
+
+ res = i2c_add_adapter(control);
+ if (res) {
+ DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
+ goto Out_err;
+ }
+ }
+ /* assign the buses used for the FRU EEPROM and RAS EEPROM */
+ /* XXX ideally this would be something in a vbios data table */
+ adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[1].adapter;
+ adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
- res = i2c_add_adapter(control);
- if (res)
- DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
+ return 0;
+Out_err:
+ for ( ; i >= 0; i--) {
+ struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
+ struct i2c_adapter *control = &smu_i2c->adapter;
+ i2c_del_adapter(control);
+ }
return res;
}
-static void sienna_cichlid_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control)
+static void sienna_cichlid_i2c_control_fini(struct smu_context *smu)
{
- i2c_del_adapter(control);
+ struct amdgpu_device *adev = smu->adev;
+ int i;
+
+ for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
+ struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
+ struct i2c_adapter *control = &smu_i2c->adapter;
+
+ i2c_del_adapter(control);
+ }
+ adev->pm.ras_eeprom_i2c_bus = NULL;
+ adev->pm.fru_eeprom_i2c_bus = NULL;
}
static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
@@ -3588,14 +3662,11 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
uint16_t average_gfx_activity;
int ret = 0;
- mutex_lock(&smu->metrics_lock);
- ret = smu_cmn_get_metrics_table_locked(smu,
- &metrics_external,
- true);
- if (ret) {
- mutex_unlock(&smu->metrics_lock);
+ ret = smu_cmn_get_metrics_table(smu,
+ &metrics_external,
+ true);
+ if (ret)
return ret;
- }
smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
@@ -3685,8 +3756,6 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
smu_v11_0_get_current_pcie_link_speed(smu);
}
- mutex_unlock(&smu->metrics_lock);
-
gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
*table = (void *)gpu_metrics;
@@ -3694,16 +3763,70 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
return sizeof(struct gpu_metrics_v1_3);
}
+static int sienna_cichlid_check_ecc_table_support(struct smu_context *smu)
+{
+ uint32_t if_version = 0xff, smu_version = 0xff;
+ int ret = 0;
+
+ ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
+ if (ret)
+ return -EOPNOTSUPP;
+
+ if (smu_version < SUPPORT_ECCTABLE_SMU_VERSION)
+ ret = -EOPNOTSUPP;
+
+ return ret;
+}
+
+static ssize_t sienna_cichlid_get_ecc_info(struct smu_context *smu,
+ void *table)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ EccInfoTable_t *ecc_table = NULL;
+ struct ecc_info_per_ch *ecc_info_per_channel = NULL;
+ int i, ret = 0;
+ struct umc_ecc_info *eccinfo = (struct umc_ecc_info *)table;
+
+ ret = sienna_cichlid_check_ecc_table_support(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_update_table(smu,
+ SMU_TABLE_ECCINFO,
+ 0,
+ smu_table->ecc_table,
+ false);
+ if (ret) {
+ dev_info(smu->adev->dev, "Failed to export SMU ecc table!\n");
+ return ret;
+ }
+
+ ecc_table = (EccInfoTable_t *)smu_table->ecc_table;
+
+ for (i = 0; i < SIENNA_CICHLID_UMC_CHANNEL_NUM; i++) {
+ ecc_info_per_channel = &(eccinfo->ecc[i]);
+ ecc_info_per_channel->ce_count_lo_chip =
+ ecc_table->EccInfo[i].ce_count_lo_chip;
+ ecc_info_per_channel->ce_count_hi_chip =
+ ecc_table->EccInfo[i].ce_count_hi_chip;
+ ecc_info_per_channel->mca_umc_status =
+ ecc_table->EccInfo[i].mca_umc_status;
+ ecc_info_per_channel->mca_umc_addr =
+ ecc_table->EccInfo[i].mca_umc_addr;
+ }
+
+ return ret;
+}
static int sienna_cichlid_enable_mgpu_fan_boost(struct smu_context *smu)
{
- struct smu_table_context *table_context = &smu->smu_table;
- PPTable_t *smc_pptable = table_context->driver_pptable;
+ uint16_t *mgpu_fan_boost_limit_rpm;
+ GET_PPTABLE_MEMBER(MGpuFanBoostLimitRpm, &mgpu_fan_boost_limit_rpm);
/*
* Skip the MGpuFanBoost setting for those ASICs
* which do not support it
*/
- if (!smc_pptable->MGpuFanBoostLimitRpm)
+ if (*mgpu_fan_boost_limit_rpm == 0)
return 0;
return smu_cmn_send_smc_msg_with_param(smu,
@@ -3719,7 +3842,7 @@ static int sienna_cichlid_gpo_control(struct smu_context *smu,
int ret = 0;
- if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_GFX_GPO_BIT)) {
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFX_GPO_BIT)) {
ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
if (ret)
return ret;
@@ -3832,9 +3955,61 @@ static void sienna_cichlid_stb_init(struct smu_context *smu)
}
-int sienna_cichlid_stb_get_data_direct(struct smu_context *smu,
- void *buf,
- uint32_t size)
+static int sienna_cichlid_get_default_config_table_settings(struct smu_context *smu,
+ struct config_table_setting *table)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ if (!table)
+ return -EINVAL;
+
+ table->gfxclk_average_tau = 10;
+ table->socclk_average_tau = 10;
+ table->fclk_average_tau = 10;
+ table->uclk_average_tau = 10;
+ table->gfx_activity_average_tau = 10;
+ table->mem_activity_average_tau = 10;
+ table->socket_power_average_tau = 100;
+ if (adev->asic_type != CHIP_SIENNA_CICHLID)
+ table->apu_socket_power_average_tau = 100;
+
+ return 0;
+}
+
+static int sienna_cichlid_set_config_table(struct smu_context *smu,
+ struct config_table_setting *table)
+{
+ DriverSmuConfigExternal_t driver_smu_config_table;
+
+ if (!table)
+ return -EINVAL;
+
+ memset(&driver_smu_config_table,
+ 0,
+ sizeof(driver_smu_config_table));
+ driver_smu_config_table.DriverSmuConfig.GfxclkAverageLpfTau =
+ table->gfxclk_average_tau;
+ driver_smu_config_table.DriverSmuConfig.FclkAverageLpfTau =
+ table->fclk_average_tau;
+ driver_smu_config_table.DriverSmuConfig.UclkAverageLpfTau =
+ table->uclk_average_tau;
+ driver_smu_config_table.DriverSmuConfig.GfxActivityLpfTau =
+ table->gfx_activity_average_tau;
+ driver_smu_config_table.DriverSmuConfig.UclkActivityLpfTau =
+ table->mem_activity_average_tau;
+ driver_smu_config_table.DriverSmuConfig.SocketPowerLpfTau =
+ table->socket_power_average_tau;
+
+ return smu_cmn_update_table(smu,
+ SMU_TABLE_DRIVER_SMU_CONFIG,
+ 0,
+ (void *)&driver_smu_config_table,
+ true);
+}
+
+static int sienna_cichlid_stb_get_data_direct(struct smu_context *smu,
+ void *buf,
+ uint32_t size)
{
uint32_t *p = buf;
struct amdgpu_device *adev = smu->adev;
@@ -3945,6 +4120,9 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.gpo_control = sienna_cichlid_gpo_control,
.set_mp1_state = sienna_cichlid_set_mp1_state,
.stb_collect_info = sienna_cichlid_stb_get_data_direct,
+ .get_ecc_info = sienna_cichlid_get_ecc_info,
+ .get_default_config_table_settings = sienna_cichlid_get_default_config_table_settings,
+ .set_config_table = sienna_cichlid_set_config_table,
};
void sienna_cichlid_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h
index 38cd0ece24f6..42f705c7a36f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h
@@ -33,6 +33,14 @@ typedef enum {
#define SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK 960
#define SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK 1000
+#define DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_GFXCLK 1950
+#define DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_SOCCLK 960
+#define DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_MEMCLK 676
+
+#define BEIGE_GOBY_UMD_PSTATE_PROFILING_GFXCLK 2200
+#define BEIGE_GOBY_UMD_PSTATE_PROFILING_SOCCLK 960
+#define BEIGE_GOBY_UMD_PSTATE_PROFILING_MEMCLK 1000
+
extern void sienna_cichlid_set_ppt_funcs(struct smu_context *smu);
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index 4e9e2cf39859..b87f550af26b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -225,15 +225,15 @@ int smu_v11_0_check_fw_version(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
uint32_t if_version = 0xff, smu_version = 0xff;
- uint16_t smu_major;
- uint8_t smu_minor, smu_debug;
+ uint8_t smu_program, smu_major, smu_minor, smu_debug;
int ret = 0;
ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
if (ret)
return ret;
- smu_major = (smu_version >> 16) & 0xffff;
+ smu_program = (smu_version >> 24) & 0xff;
+ smu_major = (smu_version >> 16) & 0xff;
smu_minor = (smu_version >> 8) & 0xff;
smu_debug = (smu_version >> 0) & 0xff;
if (smu->is_apu)
@@ -287,9 +287,9 @@ int smu_v11_0_check_fw_version(struct smu_context *smu)
*/
if (if_version != smu->smc_driver_if_version) {
dev_info(smu->adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
- "smu fw version = 0x%08x (%d.%d.%d)\n",
+ "smu fw program = %d, version = 0x%08x (%d.%d.%d)\n",
smu->smc_driver_if_version, if_version,
- smu_version, smu_major, smu_minor, smu_debug);
+ smu_program, smu_version, smu_major, smu_minor, smu_debug);
dev_warn(smu->adev->dev, "SMU driver if version not matched\n");
}
@@ -473,8 +473,12 @@ int smu_v11_0_fini_smc_tables(struct smu_context *smu)
kfree(smu_table->hardcode_pptable);
smu_table->hardcode_pptable = NULL;
+ kfree(smu_table->driver_smu_config_table);
+ kfree(smu_table->ecc_table);
kfree(smu_table->metrics_table);
kfree(smu_table->watermarks_table);
+ smu_table->driver_smu_config_table = NULL;
+ smu_table->ecc_table = NULL;
smu_table->metrics_table = NULL;
smu_table->watermarks_table = NULL;
smu_table->metrics_time = 0;
@@ -796,30 +800,8 @@ failed:
int smu_v11_0_system_features_control(struct smu_context *smu,
bool en)
{
- struct smu_feature *feature = &smu->smu_feature;
- uint32_t feature_mask[2];
- int ret = 0;
-
- ret = smu_cmn_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
- SMU_MSG_DisableAllSmuFeatures), NULL);
- if (ret)
- return ret;
-
- bitmap_zero(feature->enabled, feature->feature_num);
- bitmap_zero(feature->supported, feature->feature_num);
-
- if (en) {
- ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2);
- if (ret)
- return ret;
-
- bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
- feature->feature_num);
- bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
- feature->feature_num);
- }
-
- return ret;
+ return smu_cmn_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
+ SMU_MSG_DisableAllSmuFeatures), NULL);
}
int smu_v11_0_notify_display_change(struct smu_context *smu)
@@ -1372,7 +1354,7 @@ static int smu_v11_0_set_irq_state(struct amdgpu_device *adev,
unsigned tyep,
enum amdgpu_interrupt_state state)
{
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
uint32_t low, high;
uint32_t val = 0;
@@ -1441,7 +1423,7 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
uint32_t client_id = entry->client_id;
uint32_t src_id = entry->src_id;
/*
@@ -1615,13 +1597,8 @@ bool smu_v11_0_baco_is_support(struct smu_context *smu)
enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu)
{
struct smu_baco_context *smu_baco = &smu->smu_baco;
- enum smu_baco_state baco_state;
-
- mutex_lock(&smu_baco->mutex);
- baco_state = smu_baco->state;
- mutex_unlock(&smu_baco->mutex);
- return baco_state;
+ return smu_baco->state;
}
#define D3HOT_BACO_SEQUENCE 0
@@ -1638,8 +1615,6 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
if (smu_v11_0_baco_get_state(smu) == state)
return 0;
- mutex_lock(&smu_baco->mutex);
-
if (state == SMU_BACO_STATE_ENTER) {
switch (adev->ip_versions[MP1_HWIP][0]) {
case IP_VERSION(11, 0, 7):
@@ -1680,18 +1655,16 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
} else {
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_ExitBaco, NULL);
if (ret)
- goto out;
+ return ret;
/* clear vbios scratch 6 and 7 for coming asic reinit */
WREG32(adev->bios_scratch_reg_offset + 6, 0);
WREG32(adev->bios_scratch_reg_offset + 7, 0);
}
- if (ret)
- goto out;
- smu_baco->state = state;
-out:
- mutex_unlock(&smu_baco->mutex);
+ if (!ret)
+ smu_baco->state = state;
+
return ret;
}
@@ -1798,7 +1771,6 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu,
uint32_t min,
uint32_t max)
{
- struct amdgpu_device *adev = smu->adev;
int ret = 0, clk_id = 0;
uint32_t param;
@@ -1811,9 +1783,6 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu,
if (clk_id < 0)
return clk_id;
- if (clk_type == SMU_GFXCLK)
- amdgpu_gfx_off_ctrl(adev, false);
-
if (max > 0) {
param = (uint32_t)((clk_id << 16) | (max & 0xffff));
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
@@ -1831,9 +1800,6 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu,
}
out:
- if (clk_type == SMU_GFXCLK)
- amdgpu_gfx_off_ctrl(adev, true);
-
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index 5cb07ed227fb..5551e1426ef5 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -273,15 +273,11 @@ static int vangogh_get_legacy_smu_metrics_data(struct smu_context *smu,
SmuMetrics_legacy_t *metrics = (SmuMetrics_legacy_t *)smu_table->metrics_table;
int ret = 0;
- mutex_lock(&smu->metrics_lock);
-
- ret = smu_cmn_get_metrics_table_locked(smu,
- NULL,
- false);
- if (ret) {
- mutex_unlock(&smu->metrics_lock);
+ ret = smu_cmn_get_metrics_table(smu,
+ NULL,
+ false);
+ if (ret)
return ret;
- }
switch (member) {
case METRICS_CURR_GFXCLK:
@@ -335,8 +331,6 @@ static int vangogh_get_legacy_smu_metrics_data(struct smu_context *smu,
break;
}
- mutex_unlock(&smu->metrics_lock);
-
return ret;
}
@@ -348,15 +342,11 @@ static int vangogh_get_smu_metrics_data(struct smu_context *smu,
SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
int ret = 0;
- mutex_lock(&smu->metrics_lock);
-
- ret = smu_cmn_get_metrics_table_locked(smu,
- NULL,
- false);
- if (ret) {
- mutex_unlock(&smu->metrics_lock);
+ ret = smu_cmn_get_metrics_table(smu,
+ NULL,
+ false);
+ if (ret)
return ret;
- }
switch (member) {
case METRICS_CURR_GFXCLK:
@@ -410,8 +400,6 @@ static int vangogh_get_smu_metrics_data(struct smu_context *smu,
break;
}
- mutex_unlock(&smu->metrics_lock);
-
return ret;
}
@@ -512,21 +500,17 @@ static bool vangogh_is_dpm_running(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
int ret = 0;
- uint32_t feature_mask[2];
uint64_t feature_enabled;
/* we need to re-init after suspend so return false */
if (adev->in_suspend)
return false;
- ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2);
+ ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
if (ret)
return false;
- feature_enabled = (unsigned long)((uint64_t)feature_mask[0] |
- ((uint64_t)feature_mask[1] << 32));
-
return !!(feature_enabled & SMC_DPM_FEATURE);
}
@@ -1400,7 +1384,7 @@ static int vangogh_set_peak_clock_by_device(struct smu_context *smu)
static int vangogh_set_performance_level(struct smu_context *smu,
enum amd_dpm_forced_level level)
{
- int ret = 0;
+ int ret = 0, i;
uint32_t soc_mask, mclk_mask, fclk_mask;
uint32_t vclk_mask = 0, dclk_mask = 0;
@@ -1494,6 +1478,24 @@ static int vangogh_set_performance_level(struct smu_context *smu,
if (ret)
return ret;
+ if (smu->adev->pm.fw_version >= 0x43f1b00) {
+ for (i = 0; i < smu->cpu_core_num; i++) {
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinCclk,
+ ((i << 20)
+ | smu->cpu_actual_soft_min_freq),
+ NULL);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxCclk,
+ ((i << 20)
+ | smu->cpu_actual_soft_max_freq),
+ NULL);
+ if (ret)
+ return ret;
+ }
+ }
+
return ret;
}
@@ -1506,7 +1508,6 @@ static int vangogh_read_sensor(struct smu_context *smu,
if (!data || !size)
return -EINVAL;
- mutex_lock(&smu->sensor_lock);
switch (sensor) {
case AMDGPU_PP_SENSOR_GPU_LOAD:
ret = vangogh_common_get_smu_metrics_data(smu,
@@ -1568,7 +1569,6 @@ static int vangogh_read_sensor(struct smu_context *smu,
ret = -EOPNOTSUPP;
break;
}
- mutex_unlock(&smu->sensor_lock);
return ret;
}
@@ -1965,30 +1965,13 @@ static int vangogh_get_dpm_clock_table(struct smu_context *smu, struct dpm_clock
static int vangogh_system_features_control(struct smu_context *smu, bool en)
{
struct amdgpu_device *adev = smu->adev;
- struct smu_feature *feature = &smu->smu_feature;
- uint32_t feature_mask[2];
int ret = 0;
if (adev->pm.fw_version >= 0x43f1700 && !en)
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RlcPowerNotify,
RLC_STATUS_OFF, NULL);
- bitmap_zero(feature->enabled, feature->feature_num);
- bitmap_zero(feature->supported, feature->feature_num);
-
- if (!en)
- return ret;
-
- ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2);
- if (ret)
- return ret;
-
- bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
- feature->feature_num);
- bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
- feature->feature_num);
-
- return 0;
+ return ret;
}
static int vangogh_post_smu_init(struct smu_context *smu)
@@ -2003,7 +1986,7 @@ static int vangogh_post_smu_init(struct smu_context *smu)
adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines;
/* allow message will be sent after enable message on Vangogh*/
- if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_GFXCLK_BIT) &&
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) &&
(adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_EnableGfxOff, NULL);
if (ret) {
@@ -2196,7 +2179,7 @@ static const struct pptable_funcs vangogh_ppt_funcs = {
.dpm_set_jpeg_enable = vangogh_dpm_set_jpeg_enable,
.is_dpm_running = vangogh_is_dpm_running,
.read_sensor = vangogh_read_sensor,
- .get_enabled_mask = smu_cmn_get_enabled_32_bits_mask,
+ .get_enabled_mask = smu_cmn_get_enabled_mask,
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.set_watermarks_table = vangogh_set_watermarks_table,
.set_driver_table_location = smu_v11_0_set_driver_table_location,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index 25c4b135f830..e99e7b2bf25b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -1128,15 +1128,11 @@ static int renoir_get_smu_metrics_data(struct smu_context *smu,
SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
int ret = 0;
- mutex_lock(&smu->metrics_lock);
-
- ret = smu_cmn_get_metrics_table_locked(smu,
- NULL,
- false);
- if (ret) {
- mutex_unlock(&smu->metrics_lock);
+ ret = smu_cmn_get_metrics_table(smu,
+ NULL,
+ false);
+ if (ret)
return ret;
- }
switch (member) {
case METRICS_AVERAGE_GFXCLK:
@@ -1201,8 +1197,6 @@ static int renoir_get_smu_metrics_data(struct smu_context *smu,
break;
}
- mutex_unlock(&smu->metrics_lock);
-
return ret;
}
@@ -1215,7 +1209,6 @@ static int renoir_read_sensor(struct smu_context *smu,
if (!data || !size)
return -EINVAL;
- mutex_lock(&smu->sensor_lock);
switch (sensor) {
case AMDGPU_PP_SENSOR_GPU_LOAD:
ret = renoir_get_smu_metrics_data(smu,
@@ -1283,7 +1276,6 @@ static int renoir_read_sensor(struct smu_context *smu,
ret = -EOPNOTSUPP;
break;
}
- mutex_unlock(&smu->sensor_lock);
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
index 9c91e79c955f..56a02bc60cee 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
@@ -74,15 +74,15 @@ int smu_v12_0_check_fw_version(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
uint32_t if_version = 0xff, smu_version = 0xff;
- uint16_t smu_major;
- uint8_t smu_minor, smu_debug;
+ uint8_t smu_program, smu_major, smu_minor, smu_debug;
int ret = 0;
ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
if (ret)
return ret;
- smu_major = (smu_version >> 16) & 0xffff;
+ smu_program = (smu_version >> 24) & 0xff;
+ smu_major = (smu_version >> 16) & 0xff;
smu_minor = (smu_version >> 8) & 0xff;
smu_debug = (smu_version >> 0) & 0xff;
if (smu->is_apu)
@@ -98,9 +98,9 @@ int smu_v12_0_check_fw_version(struct smu_context *smu)
*/
if (if_version != smu->smc_driver_if_version) {
dev_info(smu->adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
- "smu fw version = 0x%08x (%d.%d.%d)\n",
+ "smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n",
smu->smc_driver_if_version, if_version,
- smu_version, smu_major, smu_minor, smu_debug);
+ smu_program, smu_version, smu_major, smu_minor, smu_debug);
dev_warn(smu->adev->dev, "SMU driver if version not matched\n");
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index 4885c4ae78b7..890acc4e2cb8 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -25,6 +25,7 @@
#include <linux/firmware.h>
#include "amdgpu.h"
+#include "amdgpu_dpm.h"
#include "amdgpu_smu.h"
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
@@ -33,7 +34,6 @@
#include "smu13_driver_if_aldebaran.h"
#include "soc15_common.h"
#include "atom.h"
-#include "power_state.h"
#include "aldebaran_ppt.h"
#include "smu_v13_0_pptable.h"
#include "aldebaran_ppsmc.h"
@@ -57,8 +57,6 @@
#undef pr_info
#undef pr_debug
-#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
-
#define ALDEBARAN_FEA_MAP(smu_feature, aldebaran_feature) \
[smu_feature] = {1, (aldebaran_feature)}
@@ -572,15 +570,11 @@ static int aldebaran_get_smu_metrics_data(struct smu_context *smu,
SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
int ret = 0;
- mutex_lock(&smu->metrics_lock);
-
- ret = smu_cmn_get_metrics_table_locked(smu,
- NULL,
- false);
- if (ret) {
- mutex_unlock(&smu->metrics_lock);
+ ret = smu_cmn_get_metrics_table(smu,
+ NULL,
+ false);
+ if (ret)
return ret;
- }
switch (member) {
case METRICS_CURR_GFXCLK:
@@ -654,8 +648,6 @@ static int aldebaran_get_smu_metrics_data(struct smu_context *smu,
break;
}
- mutex_unlock(&smu->metrics_lock);
-
return ret;
}
@@ -1148,7 +1140,6 @@ static int aldebaran_read_sensor(struct smu_context *smu,
if (!data || !size)
return -EINVAL;
- mutex_lock(&smu->sensor_lock);
switch (sensor) {
case AMDGPU_PP_SENSOR_MEM_LOAD:
case AMDGPU_PP_SENSOR_GPU_LOAD:
@@ -1187,7 +1178,6 @@ static int aldebaran_read_sensor(struct smu_context *smu,
ret = -EOPNOTSUPP;
break;
}
- mutex_unlock(&smu->sensor_lock);
return ret;
}
@@ -1460,32 +1450,34 @@ static int aldebaran_usr_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_
static bool aldebaran_is_dpm_running(struct smu_context *smu)
{
int ret;
- uint32_t feature_mask[2];
- unsigned long feature_enabled;
+ uint64_t feature_enabled;
- ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2);
+ ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
if (ret)
return false;
- feature_enabled = (unsigned long)((uint64_t)feature_mask[0] |
- ((uint64_t)feature_mask[1] << 32));
return !!(feature_enabled & SMC_DPM_FEATURE);
}
static int aldebaran_i2c_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg *msg, int num_msgs)
{
- struct amdgpu_device *adev = to_amdgpu_device(i2c_adap);
- struct smu_table_context *smu_table = &adev->smu.smu_table;
+ struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c_adap);
+ struct amdgpu_device *adev = smu_i2c->adev;
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *table = &smu_table->driver_table;
SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr;
int i, j, r, c;
u16 dir;
+ if (!adev->pm.dpm_enabled)
+ return -EBUSY;
+
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
- req->I2CcontrollerPort = 0;
+ req->I2CcontrollerPort = smu_i2c->port;
req->I2CSpeed = I2C_SPEED_FAST_400K;
req->SlaveAddress = msg[0].addr << 1; /* wants an 8-bit address */
dir = msg[0].flags & I2C_M_RD;
@@ -1521,9 +1513,9 @@ static int aldebaran_i2c_xfer(struct i2c_adapter *i2c_adap,
}
}
}
- mutex_lock(&adev->smu.mutex);
- r = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
- mutex_unlock(&adev->smu.mutex);
+ mutex_lock(&adev->pm.mutex);
+ r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
+ mutex_unlock(&adev->pm.mutex);
if (r)
goto fail;
@@ -1563,28 +1555,53 @@ static const struct i2c_adapter_quirks aldebaran_i2c_control_quirks = {
.max_comb_2nd_msg_len = MAX_SW_I2C_COMMANDS - 2,
};
-static int aldebaran_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control)
+static int aldebaran_i2c_control_init(struct smu_context *smu)
{
- struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct amdgpu_device *adev = smu->adev;
+ struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[0];
+ struct i2c_adapter *control = &smu_i2c->adapter;
int res;
+ smu_i2c->adev = adev;
+ smu_i2c->port = 0;
+ mutex_init(&smu_i2c->mutex);
control->owner = THIS_MODULE;
control->class = I2C_CLASS_SPD;
control->dev.parent = &adev->pdev->dev;
control->algo = &aldebaran_i2c_algo;
- snprintf(control->name, sizeof(control->name), "AMDGPU SMU");
+ snprintf(control->name, sizeof(control->name), "AMDGPU SMU 0");
control->quirks = &aldebaran_i2c_control_quirks;
+ i2c_set_adapdata(control, smu_i2c);
res = i2c_add_adapter(control);
- if (res)
+ if (res) {
DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
+ goto Out_err;
+ }
+
+ adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
+ adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
+
+ return 0;
+Out_err:
+ i2c_del_adapter(control);
return res;
}
-static void aldebaran_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control)
+static void aldebaran_i2c_control_fini(struct smu_context *smu)
{
- i2c_del_adapter(control);
+ struct amdgpu_device *adev = smu->adev;
+ int i;
+
+ for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
+ struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
+ struct i2c_adapter *control = &smu_i2c->adapter;
+
+ i2c_del_adapter(control);
+ }
+ adev->pm.ras_eeprom_i2c_bus = NULL;
+ adev->pm.fru_eeprom_i2c_bus = NULL;
}
static void aldebaran_get_unique_id(struct smu_context *smu)
@@ -1594,17 +1611,14 @@ static void aldebaran_get_unique_id(struct smu_context *smu)
uint32_t upper32 = 0, lower32 = 0;
int ret;
- mutex_lock(&smu->metrics_lock);
- ret = smu_cmn_get_metrics_table_locked(smu, NULL, false);
+ ret = smu_cmn_get_metrics_table(smu, NULL, false);
if (ret)
- goto out_unlock;
+ goto out;
upper32 = metrics->PublicSerialNumUpper32;
lower32 = metrics->PublicSerialNumLower32;
-out_unlock:
- mutex_unlock(&smu->metrics_lock);
-
+out:
adev->unique_id = ((uint64_t)upper32 << 32) | lower32;
if (adev->serial[0] == '\0')
sprintf(adev->serial, "%016llx", adev->unique_id);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index b54790d3483e..df6cbb7feef7 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -198,15 +198,15 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
uint32_t if_version = 0xff, smu_version = 0xff;
- uint16_t smu_major;
- uint8_t smu_minor, smu_debug;
+ uint8_t smu_program, smu_major, smu_minor, smu_debug;
int ret = 0;
ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
if (ret)
return ret;
- smu_major = (smu_version >> 16) & 0xffff;
+ smu_program = (smu_version >> 24) & 0xff;
+ smu_major = (smu_version >> 16) & 0xff;
smu_minor = (smu_version >> 8) & 0xff;
smu_debug = (smu_version >> 0) & 0xff;
if (smu->is_apu)
@@ -218,6 +218,7 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
break;
case IP_VERSION(13, 0, 1):
case IP_VERSION(13, 0, 3):
+ case IP_VERSION(13, 0, 8):
smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_YELLOW_CARP;
break;
default:
@@ -229,8 +230,8 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
/* only for dGPU w/ SMU13*/
if (adev->pm.fw)
- dev_dbg(adev->dev, "smu fw reported version = 0x%08x (%d.%d.%d)\n",
- smu_version, smu_major, smu_minor, smu_debug);
+ dev_dbg(smu->adev->dev, "smu fw reported program %d, version = 0x%08x (%d.%d.%d)\n",
+ smu_program, smu_version, smu_major, smu_minor, smu_debug);
/*
* 1. if_version mismatch is not critical as our fw is designed
@@ -242,9 +243,9 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
*/
if (if_version != smu->smc_driver_if_version) {
dev_info(adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
- "smu fw version = 0x%08x (%d.%d.%d)\n",
+ "smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n",
smu->smc_driver_if_version, if_version,
- smu_version, smu_major, smu_minor, smu_debug);
+ smu_program, smu_version, smu_major, smu_minor, smu_debug);
dev_warn(adev->dev, "SMU driver if version not matched\n");
}
@@ -722,25 +723,21 @@ int smu_v13_0_set_allowed_mask(struct smu_context *smu)
int ret = 0;
uint32_t feature_mask[2];
- mutex_lock(&feature->mutex);
- if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || feature->feature_num < 64)
- goto failed;
+ if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) ||
+ feature->feature_num < 64)
+ return -EINVAL;
bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64);
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
feature_mask[1], NULL);
if (ret)
- goto failed;
-
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow,
- feature_mask[0], NULL);
- if (ret)
- goto failed;
+ return ret;
-failed:
- mutex_unlock(&feature->mutex);
- return ret;
+ return smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetAllowedFeaturesMaskLow,
+ feature_mask[0],
+ NULL);
}
int smu_v13_0_gfx_off_control(struct smu_context *smu, bool enable)
@@ -768,30 +765,8 @@ int smu_v13_0_gfx_off_control(struct smu_context *smu, bool enable)
int smu_v13_0_system_features_control(struct smu_context *smu,
bool en)
{
- struct smu_feature *feature = &smu->smu_feature;
- uint32_t feature_mask[2];
- int ret = 0;
-
- ret = smu_cmn_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
- SMU_MSG_DisableAllSmuFeatures), NULL);
- if (ret)
- return ret;
-
- bitmap_zero(feature->enabled, feature->feature_num);
- bitmap_zero(feature->supported, feature->feature_num);
-
- if (en) {
- ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2);
- if (ret)
- return ret;
-
- bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
- feature->feature_num);
- bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
- feature->feature_num);
- }
-
- return ret;
+ return smu_cmn_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
+ SMU_MSG_DisableAllSmuFeatures), NULL);
}
int smu_v13_0_notify_display_change(struct smu_context *smu)
@@ -1200,7 +1175,7 @@ static int smu_v13_0_set_irq_state(struct amdgpu_device *adev,
unsigned tyep,
enum amdgpu_interrupt_state state)
{
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
uint32_t low, high;
uint32_t val = 0;
@@ -1275,7 +1250,7 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
uint32_t client_id = entry->client_id;
uint32_t src_id = entry->src_id;
/*
@@ -1321,11 +1296,11 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev,
switch (ctxid) {
case 0x3:
dev_dbg(adev->dev, "Switched to AC mode!\n");
- smu_v13_0_ack_ac_dc_interrupt(&adev->smu);
+ smu_v13_0_ack_ac_dc_interrupt(smu);
break;
case 0x4:
dev_dbg(adev->dev, "Switched to DC mode!\n");
- smu_v13_0_ack_ac_dc_interrupt(&adev->smu);
+ smu_v13_0_ack_ac_dc_interrupt(smu);
break;
case 0x7:
/*
@@ -1533,7 +1508,6 @@ int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu,
uint32_t min,
uint32_t max)
{
- struct amdgpu_device *adev = smu->adev;
int ret = 0, clk_id = 0;
uint32_t param;
@@ -1546,9 +1520,6 @@ int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu,
if (clk_id < 0)
return clk_id;
- if (clk_type == SMU_GFXCLK)
- amdgpu_gfx_off_ctrl(adev, false);
-
if (max > 0) {
param = (uint32_t)((clk_id << 16) | (max & 0xffff));
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
@@ -1566,9 +1537,6 @@ int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu,
}
out:
- if (clk_type == SMU_GFXCLK)
- amdgpu_gfx_off_ctrl(adev, true);
-
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
index caf1775d48ef..e2d099409123 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
@@ -195,30 +195,13 @@ static int yellow_carp_fini_smc_tables(struct smu_context *smu)
static int yellow_carp_system_features_control(struct smu_context *smu, bool en)
{
- struct smu_feature *feature = &smu->smu_feature;
struct amdgpu_device *adev = smu->adev;
- uint32_t feature_mask[2];
int ret = 0;
if (!en && !adev->in_s0ix)
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
- bitmap_zero(feature->enabled, feature->feature_num);
- bitmap_zero(feature->supported, feature->feature_num);
-
- if (!en)
- return ret;
-
- ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2);
- if (ret)
- return ret;
-
- bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
- feature->feature_num);
- bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
- feature->feature_num);
-
- return 0;
+ return ret;
}
static int yellow_carp_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
@@ -255,16 +238,13 @@ static int yellow_carp_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
static bool yellow_carp_is_dpm_running(struct smu_context *smu)
{
int ret = 0;
- uint32_t feature_mask[2];
uint64_t feature_enabled;
- ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2);
+ ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
if (ret)
return false;
- feature_enabled = (uint64_t)feature_mask[1] << 32 | feature_mask[0];
-
return !!(feature_enabled & SMC_DPM_FEATURE);
}
@@ -282,14 +262,9 @@ static int yellow_carp_post_smu_init(struct smu_context *smu)
static int yellow_carp_mode_reset(struct smu_context *smu, int type)
{
- int ret = 0, index = 0;
-
- index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
- SMU_MSG_GfxDeviceDriverReset);
- if (index < 0)
- return index == -EACCES ? 0 : index;
+ int ret = 0;
- ret = smu_cmn_send_smc_msg_with_param(smu, (uint16_t)index, type, NULL);
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, type, NULL);
if (ret)
dev_err(smu->adev->dev, "Failed to mode reset!\n");
@@ -310,13 +285,9 @@ static int yellow_carp_get_smu_metrics_data(struct smu_context *smu,
SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
int ret = 0;
- mutex_lock(&smu->metrics_lock);
-
- ret = smu_cmn_get_metrics_table_locked(smu, NULL, false);
- if (ret) {
- mutex_unlock(&smu->metrics_lock);
+ ret = smu_cmn_get_metrics_table(smu, NULL, false);
+ if (ret)
return ret;
- }
switch (member) {
case METRICS_AVERAGE_GFXCLK:
@@ -387,8 +358,6 @@ static int yellow_carp_get_smu_metrics_data(struct smu_context *smu,
break;
}
- mutex_unlock(&smu->metrics_lock);
-
return ret;
}
@@ -401,7 +370,6 @@ static int yellow_carp_read_sensor(struct smu_context *smu,
if (!data || !size)
return -EINVAL;
- mutex_lock(&smu->sensor_lock);
switch (sensor) {
case AMDGPU_PP_SENSOR_GPU_LOAD:
ret = yellow_carp_get_smu_metrics_data(smu,
@@ -469,7 +437,6 @@ static int yellow_carp_read_sensor(struct smu_context *smu,
ret = -EOPNOTSUPP;
break;
}
- mutex_unlock(&smu->sensor_lock);
return ret;
}
@@ -1182,7 +1149,7 @@ static const struct pptable_funcs yellow_carp_ppt_funcs = {
.is_dpm_running = yellow_carp_is_dpm_running,
.set_watermarks_table = yellow_carp_set_watermarks_table,
.get_gpu_metrics = yellow_carp_get_gpu_metrics,
- .get_enabled_mask = smu_cmn_get_enabled_32_bits_mask,
+ .get_enabled_mask = smu_cmn_get_enabled_mask,
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.set_driver_table_location = smu_v13_0_set_driver_table_location,
.gfx_off_control = smu_v13_0_gfx_off_control,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index ee1a312fd497..e9d4b82755dd 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -481,7 +481,6 @@ int smu_cmn_feature_is_supported(struct smu_context *smu,
{
struct smu_feature *feature = &smu->smu_feature;
int feature_id;
- int ret = 0;
feature_id = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_FEATURE,
@@ -491,22 +490,27 @@ int smu_cmn_feature_is_supported(struct smu_context *smu,
WARN_ON(feature_id > feature->feature_num);
- mutex_lock(&feature->mutex);
- ret = test_bit(feature_id, feature->supported);
- mutex_unlock(&feature->mutex);
-
- return ret;
+ return test_bit(feature_id, feature->supported);
}
int smu_cmn_feature_is_enabled(struct smu_context *smu,
enum smu_feature_mask mask)
{
- struct smu_feature *feature = &smu->smu_feature;
struct amdgpu_device *adev = smu->adev;
+ uint64_t enabled_features;
int feature_id;
- int ret = 0;
- if (smu->is_apu && adev->family < AMDGPU_FAMILY_VGH)
+ if (smu_cmn_get_enabled_mask(smu, &enabled_features)) {
+ dev_err(adev->dev, "Failed to retrieve enabled ppfeatures!\n");
+ return 0;
+ }
+
+ /*
+ * For Renoir and Cyan Skillfish, they are assumed to have all features
+ * enabled. Also considering they have no feature_map available, the
+ * check here can avoid unwanted feature_map check below.
+ */
+ if (enabled_features == ULLONG_MAX)
return 1;
feature_id = smu_cmn_to_asic_specific_index(smu,
@@ -515,13 +519,7 @@ int smu_cmn_feature_is_enabled(struct smu_context *smu,
if (feature_id < 0)
return 0;
- WARN_ON(feature_id > feature->feature_num);
-
- mutex_lock(&feature->mutex);
- ret = test_bit(feature_id, feature->enabled);
- mutex_unlock(&feature->mutex);
-
- return ret;
+ return test_bit(feature_id, (unsigned long *)&enabled_features);
}
bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu,
@@ -552,70 +550,62 @@ bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu,
}
int smu_cmn_get_enabled_mask(struct smu_context *smu,
- uint32_t *feature_mask,
- uint32_t num)
+ uint64_t *feature_mask)
{
- uint32_t feature_mask_high = 0, feature_mask_low = 0;
- struct smu_feature *feature = &smu->smu_feature;
- int ret = 0;
-
- if (!feature_mask || num < 2)
- return -EINVAL;
-
- if (bitmap_empty(feature->enabled, feature->feature_num)) {
- ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh, &feature_mask_high);
- if (ret)
- return ret;
-
- ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow, &feature_mask_low);
- if (ret)
- return ret;
-
- feature_mask[0] = feature_mask_low;
- feature_mask[1] = feature_mask_high;
- } else {
- bitmap_copy((unsigned long *)feature_mask, feature->enabled,
- feature->feature_num);
- }
-
- return ret;
-}
-
-int smu_cmn_get_enabled_32_bits_mask(struct smu_context *smu,
- uint32_t *feature_mask,
- uint32_t num)
-{
- uint32_t feature_mask_en_low = 0;
- uint32_t feature_mask_en_high = 0;
- struct smu_feature *feature = &smu->smu_feature;
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t *feature_mask_high;
+ uint32_t *feature_mask_low;
int ret = 0;
- if (!feature_mask || num < 2)
+ if (!feature_mask)
return -EINVAL;
- if (bitmap_empty(feature->enabled, feature->feature_num)) {
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetEnabledSmuFeatures, 0,
- &feature_mask_en_low);
+ feature_mask_low = &((uint32_t *)feature_mask)[0];
+ feature_mask_high = &((uint32_t *)feature_mask)[1];
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ /* For Vangogh and Yellow Carp */
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(13, 0, 1):
+ case IP_VERSION(13, 0, 3):
+ case IP_VERSION(13, 0, 8):
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetEnabledSmuFeatures,
+ 0,
+ feature_mask_low);
if (ret)
return ret;
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetEnabledSmuFeatures, 1,
- &feature_mask_en_high);
-
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetEnabledSmuFeatures,
+ 1,
+ feature_mask_high);
+ break;
+ /*
+ * For Cyan Skillfish and Renoir, there is no interface provided by PMFW
+ * to retrieve the enabled features. So, we assume all features are enabled.
+ * TODO: add other APU ASICs which suffer from the same issue here
+ */
+ case IP_VERSION(11, 0, 8):
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 0, 1):
+ memset(feature_mask, 0xff, sizeof(*feature_mask));
+ break;
+ /* other dGPU ASICs */
+ default:
+ ret = smu_cmn_send_smc_msg(smu,
+ SMU_MSG_GetEnabledSmuFeaturesHigh,
+ feature_mask_high);
if (ret)
return ret;
- feature_mask[0] = feature_mask_en_low;
- feature_mask[1] = feature_mask_en_high;
-
- } else {
- bitmap_copy((unsigned long *)feature_mask, feature->enabled,
- feature->feature_num);
+ ret = smu_cmn_send_smc_msg(smu,
+ SMU_MSG_GetEnabledSmuFeaturesLow,
+ feature_mask_low);
+ break;
}
return ret;
-
}
uint64_t smu_cmn_get_indep_throttler_status(
@@ -635,7 +625,6 @@ int smu_cmn_feature_update_enable_state(struct smu_context *smu,
uint64_t feature_mask,
bool enabled)
{
- struct smu_feature *feature = &smu->smu_feature;
int ret = 0;
if (enabled) {
@@ -649,8 +638,6 @@ int smu_cmn_feature_update_enable_state(struct smu_context *smu,
SMU_MSG_EnableSmuFeaturesHigh,
upper_32_bits(feature_mask),
NULL);
- if (ret)
- return ret;
} else {
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_DisableSmuFeaturesLow,
@@ -662,19 +649,8 @@ int smu_cmn_feature_update_enable_state(struct smu_context *smu,
SMU_MSG_DisableSmuFeaturesHigh,
upper_32_bits(feature_mask),
NULL);
- if (ret)
- return ret;
}
- mutex_lock(&feature->mutex);
- if (enabled)
- bitmap_or(feature->enabled, feature->enabled,
- (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
- else
- bitmap_andnot(feature->enabled, feature->enabled,
- (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
- mutex_unlock(&feature->mutex);
-
return ret;
}
@@ -682,7 +658,6 @@ int smu_cmn_feature_set_enabled(struct smu_context *smu,
enum smu_feature_mask mask,
bool enable)
{
- struct smu_feature *feature = &smu->smu_feature;
int feature_id;
feature_id = smu_cmn_to_asic_specific_index(smu,
@@ -691,8 +666,6 @@ int smu_cmn_feature_set_enabled(struct smu_context *smu,
if (feature_id < 0)
return -EINVAL;
- WARN_ON(feature_id > feature->feature_num);
-
return smu_cmn_feature_update_enable_state(smu,
1ULL << feature_id,
enable);
@@ -715,29 +688,21 @@ static const char *smu_get_feature_name(struct smu_context *smu,
size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,
char *buf)
{
- uint32_t feature_mask[2] = { 0 };
+ uint64_t feature_mask;
int feature_index = 0;
uint32_t count = 0;
int8_t sort_feature[SMU_FEATURE_COUNT];
size_t size = 0;
int ret = 0, i;
+ int feature_id;
- if (!smu->is_apu) {
- ret = smu_cmn_get_enabled_mask(smu,
- feature_mask,
- 2);
- if (ret)
- return 0;
- } else {
- ret = smu_cmn_get_enabled_32_bits_mask(smu,
- feature_mask,
- 2);
- if (ret)
- return 0;
- }
+ ret = smu_cmn_get_enabled_mask(smu,
+ &feature_mask);
+ if (ret)
+ return 0;
size = sysfs_emit_at(buf, size, "features high: 0x%08x low: 0x%08x\n",
- feature_mask[1], feature_mask[0]);
+ upper_32_bits(feature_mask), lower_32_bits(feature_mask));
memset(sort_feature, -1, sizeof(sort_feature));
@@ -758,11 +723,18 @@ size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,
if (sort_feature[i] < 0)
continue;
+ /* convert to asic spcific feature ID */
+ feature_id = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_FEATURE,
+ sort_feature[i]);
+ if (feature_id < 0)
+ continue;
+
size += sysfs_emit_at(buf, size, "%02d. %-20s (%2d) : %s\n",
count++,
smu_get_feature_name(smu, sort_feature[i]),
i,
- !!smu_cmn_feature_is_enabled(smu, sort_feature[i]) ?
+ !!test_bit(feature_id, (unsigned long *)&feature_mask) ?
"enabled" : "disabled");
}
@@ -773,22 +745,17 @@ int smu_cmn_set_pp_feature_mask(struct smu_context *smu,
uint64_t new_mask)
{
int ret = 0;
- uint32_t feature_mask[2] = { 0 };
+ uint64_t feature_mask;
uint64_t feature_2_enabled = 0;
uint64_t feature_2_disabled = 0;
- uint64_t feature_enables = 0;
ret = smu_cmn_get_enabled_mask(smu,
- feature_mask,
- 2);
+ &feature_mask);
if (ret)
return ret;
- feature_enables = ((uint64_t)feature_mask[1] << 32 |
- (uint64_t)feature_mask[0]);
-
- feature_2_enabled = ~feature_enables & new_mask;
- feature_2_disabled = feature_enables & ~new_mask;
+ feature_2_enabled = ~feature_mask & new_mask;
+ feature_2_disabled = feature_mask & ~new_mask;
if (feature_2_enabled) {
ret = smu_cmn_feature_update_enable_state(smu,
@@ -814,9 +781,6 @@ int smu_cmn_set_pp_feature_mask(struct smu_context *smu,
* @mask
*
* @smu: smu_context pointer
- * @no_hw_disablement: whether real dpm disablement should be performed
- * true: update the cache(about dpm enablement state) only
- * false: real dpm disablement plus cache update
* @mask: the dpm feature which should not be disabled
* SMU_FEATURE_COUNT: no exception, all dpm features
* to disable
@@ -825,10 +789,8 @@ int smu_cmn_set_pp_feature_mask(struct smu_context *smu,
* 0 on success or a negative error code on failure.
*/
int smu_cmn_disable_all_features_with_exception(struct smu_context *smu,
- bool no_hw_disablement,
enum smu_feature_mask mask)
{
- struct smu_feature *feature = &smu->smu_feature;
uint64_t features_to_disable = U64_MAX;
int skipped_feature_id;
@@ -842,18 +804,9 @@ int smu_cmn_disable_all_features_with_exception(struct smu_context *smu,
features_to_disable &= ~(1ULL << skipped_feature_id);
}
- if (no_hw_disablement) {
- mutex_lock(&feature->mutex);
- bitmap_andnot(feature->enabled, feature->enabled,
- (unsigned long *)(&features_to_disable), SMU_FEATURE_MAX);
- mutex_unlock(&feature->mutex);
-
- return 0;
- } else {
- return smu_cmn_feature_update_enable_state(smu,
- features_to_disable,
- 0);
- }
+ return smu_cmn_feature_update_enable_state(smu,
+ features_to_disable,
+ 0);
}
int smu_cmn_get_smc_version(struct smu_context *smu,
@@ -964,9 +917,9 @@ int smu_cmn_write_pptable(struct smu_context *smu)
true);
}
-int smu_cmn_get_metrics_table_locked(struct smu_context *smu,
- void *metrics_table,
- bool bypass_cache)
+int smu_cmn_get_metrics_table(struct smu_context *smu,
+ void *metrics_table,
+ bool bypass_cache)
{
struct smu_table_context *smu_table= &smu->smu_table;
uint32_t table_size =
@@ -994,21 +947,6 @@ int smu_cmn_get_metrics_table_locked(struct smu_context *smu,
return 0;
}
-int smu_cmn_get_metrics_table(struct smu_context *smu,
- void *metrics_table,
- bool bypass_cache)
-{
- int ret = 0;
-
- mutex_lock(&smu->metrics_lock);
- ret = smu_cmn_get_metrics_table_locked(smu,
- metrics_table,
- bypass_cache);
- mutex_unlock(&smu->metrics_lock);
-
- return ret;
-}
-
void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
{
struct metrics_table_header *header = (struct metrics_table_header *)table;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
index beea03810bca..a4c593ed8b03 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
@@ -26,6 +26,10 @@
#include "amdgpu_smu.h"
#if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3) || defined(SWSMU_CODE_LAYER_L4)
+
+#define FDO_PWM_MODE_STATIC 1
+#define FDO_PWM_MODE_STATIC_RPM 5
+
int smu_cmn_send_msg_without_waiting(struct smu_context *smu,
uint16_t msg_index,
uint32_t param);
@@ -54,12 +58,7 @@ bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu,
enum smu_clk_type clk_type);
int smu_cmn_get_enabled_mask(struct smu_context *smu,
- uint32_t *feature_mask,
- uint32_t num);
-
-int smu_cmn_get_enabled_32_bits_mask(struct smu_context *smu,
- uint32_t *feature_mask,
- uint32_t num);
+ uint64_t *feature_mask);
uint64_t smu_cmn_get_indep_throttler_status(
const unsigned long dep_status,
@@ -80,7 +79,6 @@ int smu_cmn_set_pp_feature_mask(struct smu_context *smu,
uint64_t new_mask);
int smu_cmn_disable_all_features_with_exception(struct smu_context *smu,
- bool no_hw_disablement,
enum smu_feature_mask mask);
int smu_cmn_get_smc_version(struct smu_context *smu,
@@ -97,10 +95,6 @@ int smu_cmn_write_watermarks_table(struct smu_context *smu);
int smu_cmn_write_pptable(struct smu_context *smu);
-int smu_cmn_get_metrics_table_locked(struct smu_context *smu,
- void *metrics_table,
- bool bypass_cache);
-
int smu_cmn_get_metrics_table(struct smu_context *smu,
void *metrics_table,
bool bypass_cache);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
index 59f9cfff3d61..5f21ead860f9 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
@@ -55,9 +55,9 @@
#define smu_send_smc_msg(smu, msg, read_arg) smu_ppt_funcs(send_smc_msg, 0, smu, msg, read_arg)
#define smu_init_display_count(smu, count) smu_ppt_funcs(init_display_count, 0, smu, count)
#define smu_feature_set_allowed_mask(smu) smu_ppt_funcs(set_allowed_mask, 0, smu)
-#define smu_feature_get_enabled_mask(smu, mask, num) smu_ppt_funcs(get_enabled_mask, 0, smu, mask, num)
+#define smu_feature_get_enabled_mask(smu, mask) smu_ppt_funcs(get_enabled_mask, -EOPNOTSUPP, smu, mask)
#define smu_feature_is_enabled(smu, mask) smu_ppt_funcs(feature_is_enabled, 0, smu, mask)
-#define smu_disable_all_features_with_exception(smu, no_hw_disablement, mask) smu_ppt_funcs(disable_all_features_with_exception, 0, smu, no_hw_disablement, mask)
+#define smu_disable_all_features_with_exception(smu, mask) smu_ppt_funcs(disable_all_features_with_exception, 0, smu, mask)
#define smu_is_dpm_running(smu) smu_ppt_funcs(is_dpm_running, 0 , smu)
#define smu_notify_display_change(smu) smu_ppt_funcs(notify_display_change, 0, smu)
#define smu_populate_umd_state_clk(smu) smu_ppt_funcs(populate_umd_state_clk, 0, smu)
@@ -78,8 +78,8 @@
#define smu_dump_pptable(smu) smu_ppt_funcs(dump_pptable, 0, smu)
#define smu_update_pcie_parameters(smu, pcie_gen_cap, pcie_width_cap) smu_ppt_funcs(update_pcie_parameters, 0, smu, pcie_gen_cap, pcie_width_cap)
#define smu_set_power_source(smu, power_src) smu_ppt_funcs(set_power_source, 0, smu, power_src)
-#define smu_i2c_init(smu, control) smu_ppt_funcs(i2c_init, 0, smu, control)
-#define smu_i2c_fini(smu, control) smu_ppt_funcs(i2c_fini, 0, smu, control)
+#define smu_i2c_init(smu) smu_ppt_funcs(i2c_init, 0, smu)
+#define smu_i2c_fini(smu) smu_ppt_funcs(i2c_fini, 0, smu)
#define smu_get_unique_id(smu) smu_ppt_funcs(get_unique_id, 0, smu)
#define smu_log_thermal_throttling(smu) smu_ppt_funcs(log_thermal_throttling_event, 0, smu)
#define smu_get_asic_power_limits(smu, current, default, max) smu_ppt_funcs(get_power_limit, 0, smu, current, default, max)
@@ -91,6 +91,8 @@
#define smu_post_init(smu) smu_ppt_funcs(post_init, 0, smu)
#define smu_gpo_control(smu, enablement) smu_ppt_funcs(gpo_control, 0, smu, enablement)
#define smu_set_fine_grain_gfx_freq_parameters(smu) smu_ppt_funcs(set_fine_grain_gfx_freq_parameters, 0, smu)
+#define smu_get_default_config_table_settings(smu, config_table) smu_ppt_funcs(get_default_config_table_settings, -EOPNOTSUPP, smu, config_table)
+#define smu_set_config_table(smu, config_table) smu_ppt_funcs(set_config_table, -EOPNOTSUPP, smu, config_table)
#endif
#endif