diff options
author | 2020-06-08 04:47:57 +0000 | |
---|---|---|
committer | 2020-06-08 04:47:57 +0000 | |
commit | c349dbc7938c71a30e13c1be4acc1976165f4630 (patch) | |
tree | 8798187dfd7a927a15123e8dad31b782b074baa8 /sys/dev/pci/drm/amd/amdgpu/amdgpu_dpm.c | |
parent | The errcheck() function treats an errno of ERANGE or EDOM as something (diff) | |
download | wireguard-openbsd-c349dbc7938c71a30e13c1be4acc1976165f4630.tar.xz wireguard-openbsd-c349dbc7938c71a30e13c1be4acc1976165f4630.zip |
update drm to linux 5.7
adds kernel support for
amdgpu: vega20, raven2, renoir, navi10, navi14
inteldrm: icelake, tigerlake
Thanks to the OpenBSD Foundation for sponsoring this work, kettenis@ for
helping, patrick@ for helping adapt rockchip drm and many developers for
testing.
Diffstat (limited to 'sys/dev/pci/drm/amd/amdgpu/amdgpu_dpm.c')
-rw-r--r-- | sys/dev/pci/drm/amd/amdgpu/amdgpu_dpm.c | 366 |
1 files changed, 281 insertions, 85 deletions
diff --git a/sys/dev/pci/drm/amd/amdgpu/amdgpu_dpm.c b/sys/dev/pci/drm/amd/amdgpu/amdgpu_dpm.c index 4bf6f212447..89fffd4cb6e 100644 --- a/sys/dev/pci/drm/amd/amdgpu/amdgpu_dpm.c +++ b/sys/dev/pci/drm/amd/amdgpu/amdgpu_dpm.c @@ -22,7 +22,6 @@ * Authors: Alex Deucher */ -#include <drm/drmP.h> #include "amdgpu.h" #include "amdgpu_atombios.h" #include "amdgpu_i2c.h" @@ -184,61 +183,6 @@ u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev) return vrefresh; } -void amdgpu_calculate_u_and_p(u32 i, u32 r_c, u32 p_b, - u32 *p, u32 *u) -{ - u32 b_c = 0; - u32 i_c; - u32 tmp; - - i_c = (i * r_c) / 100; - tmp = i_c >> p_b; - - while (tmp) { - b_c++; - tmp >>= 1; - } - - *u = (b_c + 1) / 2; - *p = i_c / (1 << (2 * (*u))); -} - -int amdgpu_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th) -{ - u32 k, a, ah, al; - u32 t1; - - if ((fl == 0) || (fh == 0) || (fl > fh)) - return -EINVAL; - - k = (100 * fh) / fl; - t1 = (t * (k - 100)); - a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100)); - a = (a + 5) / 10; - ah = ((a * t) + 5000) / 10000; - al = a - ah; - - *th = t - ah; - *tl = t + al; - - return 0; -} - -bool amdgpu_is_uvd_state(u32 class, u32 class2) -{ - if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) - return true; - if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) - return true; - if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) - return true; - if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) - return true; - if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) - return true; - return false; -} - bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor) { switch (sensor) { @@ -951,46 +895,298 @@ enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev, return AMDGPU_PCIE_GEN1; } -u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev, - u16 asic_lanes, - u16 default_lanes) +struct amd_vce_state* +amdgpu_get_vce_clock_state(void *handle, u32 idx) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (idx < adev->pm.dpm.num_of_vce_states) + return &adev->pm.dpm.vce_states[idx]; + + return NULL; +} + +int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low) { - switch (asic_lanes) { - case 0: + uint32_t clk_freq; + int ret = 0; + if (is_support_sw_smu(adev)) { + ret = smu_get_dpm_freq_range(&adev->smu, SMU_GFXCLK, + low ? &clk_freq : NULL, + !low ? &clk_freq : NULL, + true); + if (ret) + return 0; + return clk_freq * 100; + + } else { + return (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (low)); + } +} + +int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low) +{ + uint32_t clk_freq; + int ret = 0; + if (is_support_sw_smu(adev)) { + ret = smu_get_dpm_freq_range(&adev->smu, SMU_UCLK, + low ? &clk_freq : NULL, + !low ? &clk_freq : NULL, + true); + if (ret) + return 0; + return clk_freq * 100; + + } else { + return (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (low)); + } +} + +int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate) +{ + int ret = 0; + bool swsmu = is_support_sw_smu(adev); + + switch (block_type) { + case AMD_IP_BLOCK_TYPE_UVD: + case AMD_IP_BLOCK_TYPE_VCE: + if (swsmu) { + ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate); + } else if (adev->powerplay.pp_funcs && + adev->powerplay.pp_funcs->set_powergating_by_smu) { + /* + * TODO: need a better lock mechanism + * + * Here adev->pm.mutex lock protection is enforced on + * UVD and VCE cases only. Since for other cases, there + * may be already lock protection in amdgpu_pm.c. + * This is a quick fix for the deadlock issue below. + * NFO: task ocltst:2028 blocked for more than 120 seconds. + * Tainted: G OE 5.0.0-37-generic #40~18.04.1-Ubuntu + * echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. + * cltst D 0 2028 2026 0x00000000 + * all Trace: + * __schedule+0x2c0/0x870 + * schedule+0x2c/0x70 + * schedule_preempt_disabled+0xe/0x10 + * __mutex_lock.isra.9+0x26d/0x4e0 + * __mutex_lock_slowpath+0x13/0x20 + * ? __mutex_lock_slowpath+0x13/0x20 + * mutex_lock+0x2f/0x40 + * amdgpu_dpm_set_powergating_by_smu+0x64/0xe0 [amdgpu] + * gfx_v8_0_enable_gfx_static_mg_power_gating+0x3c/0x70 [amdgpu] + * gfx_v8_0_set_powergating_state+0x66/0x260 [amdgpu] + * amdgpu_device_ip_set_powergating_state+0x62/0xb0 [amdgpu] + * pp_dpm_force_performance_level+0xe7/0x100 [amdgpu] + * amdgpu_set_dpm_forced_performance_level+0x129/0x330 [amdgpu] + */ + mutex_lock(&adev->pm.mutex); + ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu( + (adev)->powerplay.pp_handle, block_type, gate)); + mutex_unlock(&adev->pm.mutex); + } + break; + case AMD_IP_BLOCK_TYPE_GFX: + case AMD_IP_BLOCK_TYPE_VCN: + case AMD_IP_BLOCK_TYPE_SDMA: + if (swsmu) + ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate); + else if (adev->powerplay.pp_funcs && + adev->powerplay.pp_funcs->set_powergating_by_smu) + ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu( + (adev)->powerplay.pp_handle, block_type, gate)); + break; + case AMD_IP_BLOCK_TYPE_JPEG: + if (swsmu) + ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate); + break; + case AMD_IP_BLOCK_TYPE_GMC: + case AMD_IP_BLOCK_TYPE_ACP: + if (adev->powerplay.pp_funcs && + adev->powerplay.pp_funcs->set_powergating_by_smu) + ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu( + (adev)->powerplay.pp_handle, block_type, gate)); + break; default: - return default_lanes; - case 1: - return 1; - case 2: - return 2; - case 4: - return 4; - case 8: - return 8; - case 12: - return 12; - case 16: - return 16; + break; + } + + return ret; +} + +int amdgpu_dpm_baco_enter(struct amdgpu_device *adev) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + void *pp_handle = adev->powerplay.pp_handle; + struct smu_context *smu = &adev->smu; + int ret = 0; + + if (is_support_sw_smu(adev)) { + ret = smu_baco_enter(smu); + } else { + if (!pp_funcs || !pp_funcs->set_asic_baco_state) + return -ENOENT; + + /* enter BACO state */ + ret = pp_funcs->set_asic_baco_state(pp_handle, 1); } + + return ret; } -u8 amdgpu_encode_pci_lane_width(u32 lanes) +int amdgpu_dpm_baco_exit(struct amdgpu_device *adev) { - u8 encoded_lanes[] = { 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6 }; + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + void *pp_handle = adev->powerplay.pp_handle; + struct smu_context *smu = &adev->smu; + int ret = 0; + + if (is_support_sw_smu(adev)) { + ret = smu_baco_exit(smu); + } else { + if (!pp_funcs || !pp_funcs->set_asic_baco_state) + return -ENOENT; - if (lanes > 16) - return 0; + /* exit BACO state */ + ret = pp_funcs->set_asic_baco_state(pp_handle, 0); + } - return encoded_lanes[lanes]; + return ret; } -struct amd_vce_state* -amdgpu_get_vce_clock_state(void *handle, u32 idx) +int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev, + enum pp_mp1_state mp1_state) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int ret = 0; - if (idx < adev->pm.dpm.num_of_vce_states) - return &adev->pm.dpm.vce_states[idx]; + if (is_support_sw_smu(adev)) { + ret = smu_set_mp1_state(&adev->smu, mp1_state); + } else if (adev->powerplay.pp_funcs && + adev->powerplay.pp_funcs->set_mp1_state) { + ret = adev->powerplay.pp_funcs->set_mp1_state( + adev->powerplay.pp_handle, + mp1_state); + } - return NULL; + return ret; +} + +bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + void *pp_handle = adev->powerplay.pp_handle; + struct smu_context *smu = &adev->smu; + bool baco_cap; + + if (is_support_sw_smu(adev)) { + return smu_baco_is_support(smu); + } else { + if (!pp_funcs || !pp_funcs->get_asic_baco_capability) + return false; + + if (pp_funcs->get_asic_baco_capability(pp_handle, &baco_cap)) + return false; + + return baco_cap ? true : false; + } +} + +int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + void *pp_handle = adev->powerplay.pp_handle; + struct smu_context *smu = &adev->smu; + + if (is_support_sw_smu(adev)) { + return smu_mode2_reset(smu); + } else { + if (!pp_funcs || !pp_funcs->asic_reset_mode_2) + return -ENOENT; + + return pp_funcs->asic_reset_mode_2(pp_handle); + } +} + +int amdgpu_dpm_baco_reset(struct amdgpu_device *adev) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + void *pp_handle = adev->powerplay.pp_handle; + struct smu_context *smu = &adev->smu; + int ret = 0; + + dev_info(adev->dev, "GPU BACO reset\n"); + + if (is_support_sw_smu(adev)) { + ret = smu_baco_enter(smu); + if (ret) + return ret; + + ret = smu_baco_exit(smu); + if (ret) + return ret; + } else { + if (!pp_funcs + || !pp_funcs->set_asic_baco_state) + return -ENOENT; + + /* enter BACO state */ + ret = pp_funcs->set_asic_baco_state(pp_handle, 1); + if (ret) + return ret; + + /* exit BACO state */ + ret = pp_funcs->set_asic_baco_state(pp_handle, 0); + if (ret) + return ret; + } + + return 0; +} + +int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev, + enum PP_SMC_POWER_PROFILE type, + bool en) +{ + int ret = 0; + + if (is_support_sw_smu(adev)) + ret = smu_switch_power_profile(&adev->smu, type, en); + else if (adev->powerplay.pp_funcs && + adev->powerplay.pp_funcs->switch_power_profile) + ret = adev->powerplay.pp_funcs->switch_power_profile( + adev->powerplay.pp_handle, type, en); + + return ret; +} + +int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev, + uint32_t pstate) +{ + int ret = 0; + + if (is_support_sw_smu_xgmi(adev)) + ret = smu_set_xgmi_pstate(&adev->smu, pstate); + else if (adev->powerplay.pp_funcs && + adev->powerplay.pp_funcs->set_xgmi_pstate) + ret = adev->powerplay.pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle, + pstate); + + return ret; +} + +int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev, + uint32_t cstate) +{ + int ret = 0; + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + void *pp_handle = adev->powerplay.pp_handle; + struct smu_context *smu = &adev->smu; + + if (is_support_sw_smu(adev)) + ret = smu_set_df_cstate(smu, cstate); + else if (pp_funcs && + pp_funcs->set_df_cstate) + ret = pp_funcs->set_df_cstate(pp_handle, cstate); + + return ret; } |