aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2020-11-26 13:16:55 +0100
committerPeter Zijlstra <peterz@infradead.org>2020-11-26 13:16:55 +0100
commit20c7775aecea04d8ca322039969d49dcf568e0e9 (patch)
tree138c057839197c9021043353e994815c0250e669 /drivers/gpu/drm/amd/amdgpu
parentperf/x86/intel: Add event constraint for CYCLE_ACTIVITY.STALLS_MEM_ANY (diff)
parentMerge tag 'media/v5.10-2' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media (diff)
downloadlinux-dev-20c7775aecea04d8ca322039969d49dcf568e0e9.tar.xz
linux-dev-20c7775aecea04d8ca322039969d49dcf568e0e9.zip
Merge remote-tracking branch 'origin/master' into perf/core
Further perf/core patches will depend on: d3f7b1bb2040 ("mm/gup: fix gup_fast with dynamic page table folding") which is already in Linus' tree.
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h110
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c221
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c216
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c851
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_df.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c1218
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h548
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c134
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c71
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c100
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c98
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c112
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c3928
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h97
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c234
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c127
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rap.h (renamed from drivers/gpu/drm/amd/amdgpu/cik_dpm.h)11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c315
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c138
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c382
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h67
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c251
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h102
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c126
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c221
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h276
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v1_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_crtc.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_dp.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_i2c.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c94
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c96
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c104
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c94
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c79
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v3_6.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c226
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c76
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h6
-rw-r--r--[-rwxr-xr-x]drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c117
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c134
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c296
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c533
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c3382
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.h229
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_smc.c218
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v10_1.c7
-rw-r--r--[-rwxr-xr-x]drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c189
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c81
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ppsmc.h200
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v12_0.c61
-rw-r--r--drivers/gpu/drm/amd/amdgpu/r600_dpm.h127
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c111
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c8079
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.h1015
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_smc.c273
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sislands_smc.h423
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c92
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ta_rap_if.h84
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_1.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_7.c331
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_7.h51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c28
171 files changed, 6234 insertions, 22388 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 403ec3db29df..39976c7b100c 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -30,7 +30,7 @@ FULL_AMD_DISPLAY_PATH = $(FULL_AMD_PATH)/$(DISPLAY_FOLDER_NAME)
ccflags-y := -I$(FULL_AMD_PATH)/include/asic_reg \
-I$(FULL_AMD_PATH)/include \
-I$(FULL_AMD_PATH)/amdgpu \
- -I$(FULL_AMD_PATH)/powerplay/inc \
+ -I$(FULL_AMD_PATH)/pm/inc \
-I$(FULL_AMD_PATH)/acp/include \
-I$(FULL_AMD_DISPLAY_PATH) \
-I$(FULL_AMD_DISPLAY_PATH)/include \
@@ -47,7 +47,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_encoders.o amdgpu_display.o amdgpu_i2c.o \
amdgpu_fb.o amdgpu_gem.o amdgpu_ring.o \
amdgpu_cs.o amdgpu_bios.o amdgpu_benchmark.o amdgpu_test.o \
- amdgpu_pm.o atombios_dp.o amdgpu_afmt.o amdgpu_trace_points.o \
+ atombios_dp.o amdgpu_afmt.o amdgpu_trace_points.o \
atombios_encoders.o amdgpu_sa.o atombios_i2c.o \
amdgpu_dma_buf.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
@@ -55,15 +55,15 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \
amdgpu_gmc.o amdgpu_mmhub.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o amdgpu_nbio.o \
- amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o
+ amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_rap.o
amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o
# add asic specific block
-amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
+amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o \
dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o
-amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o si_dpm.o si_smc.o \
+amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o \
uvd_v3_1.o
amdgpu-y += \
@@ -85,7 +85,7 @@ amdgpu-y += \
# add UMC block
amdgpu-y += \
- umc_v6_1.o umc_v6_0.o
+ umc_v6_1.o umc_v6_0.o umc_v8_7.o
# add IH block
amdgpu-y += \
@@ -105,10 +105,6 @@ amdgpu-y += \
psp_v11_0.o \
psp_v12_0.o
-# add SMC block
-amdgpu-y += \
- amdgpu_dpm.o
-
# add DCE block
amdgpu-y += \
dce_v10_0.o \
@@ -212,7 +208,7 @@ amdgpu-$(CONFIG_VGA_SWITCHEROO) += amdgpu_atpx_handler.o
amdgpu-$(CONFIG_ACPI) += amdgpu_acpi.o
amdgpu-$(CONFIG_HMM_MIRROR) += amdgpu_mn.o
-include $(FULL_AMD_PATH)/powerplay/Makefile
+include $(FULL_AMD_PATH)/pm/Makefile
amdgpu-y += $(AMD_POWERPLAY_FILES)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 327a0daf4a1d..87f095dc385c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -49,6 +49,8 @@
#include <linux/rbtree.h>
#include <linux/hashtable.h>
#include <linux/dma-fence.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
@@ -102,6 +104,7 @@
#include "amdgpu_mes.h"
#include "amdgpu_umc.h"
#include "amdgpu_mmhub.h"
+#include "amdgpu_gfxhub.h"
#include "amdgpu_df.h"
#define MAX_GPU_INSTANCE 16
@@ -178,6 +181,7 @@ extern uint amdgpu_dm_abm_level;
extern struct amdgpu_mgpu_info mgpu_info;
extern int amdgpu_ras_enable;
extern uint amdgpu_ras_mask;
+extern int amdgpu_bad_page_threshold;
extern int amdgpu_async_gfx_ring;
extern int amdgpu_mcbp;
extern int amdgpu_discovery;
@@ -187,9 +191,11 @@ extern int amdgpu_force_asic_type;
#ifdef CONFIG_HSA_AMD
extern int sched_policy;
extern bool debug_evictions;
+extern bool no_system_mem_limit;
#else
static const int sched_policy = KFD_SCHED_POLICY_HWS;
static const bool debug_evictions; /* = false */
+static const bool no_system_mem_limit;
#endif
extern int amdgpu_tmz;
@@ -201,6 +207,7 @@ extern int amdgpu_si_support;
#ifdef CONFIG_DRM_AMDGPU_CIK
extern int amdgpu_cik_support;
#endif
+extern int amdgpu_num_kcq;
#define AMDGPU_VM_MAX_NUM_CTX 4096
#define AMDGPU_SG_THRESHOLD (256*1024*1024)
@@ -212,6 +219,8 @@ extern int amdgpu_cik_support;
#define AMDGPUFB_CONN_LIMIT 4
#define AMDGPU_BIOS_NUM_SCRATCH 16
+#define AMDGPU_VBIOS_VGA_ALLOCATION (9 * 1024 * 1024) /* reserve 8MB for vga emulator and 1 MB for FB */
+
/* hard reset data */
#define AMDGPU_ASIC_RESET_DATA 0x39d5e86b
@@ -245,6 +254,7 @@ struct amdgpu_fpriv;
struct amdgpu_bo_va_mapping;
struct amdgpu_atif;
struct kfd_vm_fault_info;
+struct amdgpu_hive_info;
enum amdgpu_cp_irq {
AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP = 0,
@@ -611,6 +621,8 @@ struct amdgpu_asic_funcs {
uint64_t (*get_pcie_replay_count)(struct amdgpu_device *adev);
/* device supports BACO */
bool (*supports_baco)(struct amdgpu_device *adev);
+ /* pre asic_init quirks */
+ void (*pre_asic_init)(struct amdgpu_device *adev);
};
/*
@@ -648,16 +660,6 @@ struct amdgpu_atcs {
};
/*
- * Firmware VRAM reservation
- */
-struct amdgpu_fw_vram_usage {
- u64 start_offset;
- u64 size;
- struct amdgpu_bo *reserved_bo;
- void *va;
-};
-
-/*
* CGS
*/
struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev);
@@ -725,13 +727,13 @@ struct amd_powerplay {
#define AMDGPU_MAX_DF_PERFMONS 4
struct amdgpu_device {
struct device *dev;
- struct drm_device *ddev;
struct pci_dev *pdev;
+ struct drm_device ddev;
#ifdef CONFIG_DRM_AMD_ACP
struct amdgpu_acp acp;
#endif
-
+ struct amdgpu_hive_info *hive;
/* ASIC */
enum amd_asic_type asic_type;
uint32_t family;
@@ -765,7 +767,6 @@ struct amdgpu_device {
bool is_atom_fw;
uint8_t *bios;
uint32_t bios_size;
- struct amdgpu_bo *stolen_vga_memory;
uint32_t bios_scratch_reg_offset;
uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
@@ -881,6 +882,9 @@ struct amdgpu_device {
/* mmhub */
struct amdgpu_mmhub mmhub;
+ /* gfxhub */
+ struct amdgpu_gfxhub gfxhub;
+
/* gfx */
struct amdgpu_gfx gfx;
@@ -917,11 +921,6 @@ struct amdgpu_device {
/* display related functionality */
struct amdgpu_display_manager dm;
- /* discovery */
- uint8_t *discovery_bin;
- uint32_t discovery_tmr_size;
- struct amdgpu_bo *discovery_memory;
-
/* mes */
bool enable_mes;
struct amdgpu_mes mes;
@@ -946,8 +945,6 @@ struct amdgpu_device {
struct delayed_work delayed_init_work;
struct amdgpu_virt virt;
- /* firmware VRAM reservation */
- struct amdgpu_fw_vram_usage fw_vram_usage;
/* link all shadow bo */
struct list_head shadow_list;
@@ -961,9 +958,9 @@ struct amdgpu_device {
bool in_suspend;
bool in_hibernate;
- bool in_gpu_reset;
+ atomic_t in_gpu_reset;
enum pp_mp1_state mp1_state;
- struct mutex lock_reset;
+ struct rw_semaphore reset_sem;
struct amdgpu_doorbell_index doorbell_index;
struct mutex notifier_lock;
@@ -995,34 +992,60 @@ struct amdgpu_device {
atomic_t throttling_logging_enabled;
struct ratelimit_state throttling_logging_rs;
+ uint32_t ras_features;
+
+ bool in_pci_err_recovery;
+ struct pci_saved_state *pci_state;
};
+static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)
+{
+ return container_of(ddev, struct amdgpu_device, ddev);
+}
+
+static inline struct drm_device *adev_to_drm(struct amdgpu_device *adev)
+{
+ return &adev->ddev;
+}
+
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
{
return container_of(bdev, struct amdgpu_device, mman.bdev);
}
int amdgpu_device_init(struct amdgpu_device *adev,
- struct drm_device *ddev,
- struct pci_dev *pdev,
uint32_t flags);
void amdgpu_device_fini(struct amdgpu_device *adev);
int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev);
void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
uint32_t *buf, size_t size, bool write);
-uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
+uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
+ uint32_t reg, uint32_t acc_flags);
+void amdgpu_device_wreg(struct amdgpu_device *adev,
+ uint32_t reg, uint32_t v,
uint32_t acc_flags);
-void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
- uint32_t acc_flags);
-void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
- uint32_t acc_flags);
+void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
+ uint32_t reg, uint32_t v);
void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value);
uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset);
u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg);
void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v);
+u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
+ u32 pcie_index, u32 pcie_data,
+ u32 reg_addr);
+u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
+ u32 pcie_index, u32 pcie_data,
+ u32 reg_addr);
+void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
+ u32 pcie_index, u32 pcie_data,
+ u32 reg_addr, u32 reg_data);
+void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
+ u32 pcie_index, u32 pcie_data,
+ u32 reg_addr, u64 reg_data);
+
bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type);
bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
@@ -1033,8 +1056,8 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
*/
#define AMDGPU_REGS_NO_KIQ (1<<1)
-#define RREG32_NO_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ)
-#define WREG32_NO_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ)
+#define RREG32_NO_KIQ(reg) amdgpu_device_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ)
+#define WREG32_NO_KIQ(reg, v) amdgpu_device_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ)
#define RREG32_KIQ(reg) amdgpu_kiq_rreg(adev, (reg))
#define WREG32_KIQ(reg, v) amdgpu_kiq_wreg(adev, (reg), (v))
@@ -1042,9 +1065,9 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
#define RREG8(reg) amdgpu_mm_rreg8(adev, (reg))
#define WREG8(reg, v) amdgpu_mm_wreg8(adev, (reg), (v))
-#define RREG32(reg) amdgpu_mm_rreg(adev, (reg), 0)
-#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_mm_rreg(adev, (reg), 0))
-#define WREG32(reg, v) amdgpu_mm_wreg(adev, (reg), (v), 0)
+#define RREG32(reg) amdgpu_device_rreg(adev, (reg), 0)
+#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_device_rreg(adev, (reg), 0))
+#define WREG32(reg, v) amdgpu_device_wreg(adev, (reg), (v), 0)
#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
#define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg))
@@ -1090,7 +1113,7 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
WREG32_SMC(_Reg, tmp); \
} while (0)
-#define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_mm_rreg((adev), (reg), false))
+#define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_device_rreg((adev), (reg), false))
#define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg))
#define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v))
@@ -1141,10 +1164,12 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
#define amdgpu_asic_need_reset_on_init(adev) (adev)->asic_funcs->need_reset_on_init((adev))
#define amdgpu_asic_get_pcie_replay_count(adev) ((adev)->asic_funcs->get_pcie_replay_count((adev)))
#define amdgpu_asic_supports_baco(adev) (adev)->asic_funcs->supports_baco((adev))
+#define amdgpu_asic_pre_asic_init(adev) (adev)->asic_funcs->pre_asic_init((adev))
#define amdgpu_inc_vram_lost(adev) atomic_inc(&((adev)->vram_lost_counter));
/* Common functions */
+bool amdgpu_device_has_job_running(struct amdgpu_device *adev);
bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev);
int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
struct amdgpu_job* job);
@@ -1194,7 +1219,7 @@ static inline void *amdgpu_atpx_get_dhandle(void) { return NULL; }
extern const struct drm_ioctl_desc amdgpu_ioctls_kms[];
extern const int amdgpu_max_kms_ioctl;
-int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags);
+int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags);
void amdgpu_driver_unload_kms(struct drm_device *dev);
void amdgpu_driver_lastclose_kms(struct drm_device *dev);
int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
@@ -1258,6 +1283,15 @@ static inline int amdgpu_dm_display_resume(struct amdgpu_device *adev) { return
void amdgpu_register_gpu_instance(struct amdgpu_device *adev);
void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev);
+pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state);
+pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev);
+pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev);
+void amdgpu_pci_resume(struct pci_dev *pdev);
+
+bool amdgpu_device_cache_pci_state(struct pci_dev *pdev);
+bool amdgpu_device_load_pci_state(struct pci_dev *pdev);
+
#include "amdgpu_object.h"
/* used by df_v3_6.c and amdgpu_pmu.c */
@@ -1278,4 +1312,8 @@ static inline bool amdgpu_is_tmz(struct amdgpu_device *adev)
return adev->gmc.tmz_enabled;
}
+static inline int amdgpu_in_reset(struct amdgpu_device *adev)
+{
+ return atomic_read(&adev->in_gpu_reset);
+}
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index 12247a32f9ef..d3e51d361179 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -136,9 +136,7 @@ static int acp_poweroff(struct generic_pm_domain *genpd)
* 2. power off the acp tiles
* 3. check and enter ulv state
*/
- if (adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->set_powergating_by_smu)
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
}
return 0;
}
@@ -157,8 +155,7 @@ static int acp_poweron(struct generic_pm_domain *genpd)
* 2. turn on acp clock
* 3. power on acp tiles
*/
- if (adev->powerplay.pp_funcs->set_powergating_by_smu)
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
}
return 0;
}
@@ -529,9 +526,7 @@ static int acp_set_powergating_state(void *handle,
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_PG_STATE_GATE);
- if (adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->set_powergating_by_smu)
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, enable);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, enable);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 913c8f0513bd..165b02e267b0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -463,11 +463,11 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
if (adev->flags & AMD_IS_PX) {
- pm_runtime_get_sync(adev->ddev->dev);
+ pm_runtime_get_sync(adev_to_drm(adev)->dev);
/* Just fire off a uevent and let userspace tell us what to do */
- drm_helper_hpd_irq_event(adev->ddev);
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ drm_helper_hpd_irq_event(adev_to_drm(adev));
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
}
}
/* TODO: check other events */
@@ -806,8 +806,8 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
}
adev->atif = atif;
- if (atif->notifications.brightness_change) {
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+ if (atif->notifications.brightness_change) {
if (amdgpu_device_has_dc_support(adev)) {
#if defined(CONFIG_DRM_AMD_DC)
struct amdgpu_display_manager *dm = &adev->dm;
@@ -817,7 +817,7 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
struct drm_encoder *tmp;
/* Find the encoder controlling the brightness */
- list_for_each_entry(tmp, &adev->ddev->mode_config.encoder_list,
+ list_for_each_entry(tmp, &adev_to_drm(adev)->mode_config.encoder_list,
head) {
struct amdgpu_encoder *enc = to_amdgpu_encoder(tmp);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 1b865fed74ca..0544460653b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -36,6 +36,8 @@
*/
uint64_t amdgpu_amdkfd_total_mem_size;
+static bool kfd_initialized;
+
int amdgpu_amdkfd_init(void)
{
struct sysinfo si;
@@ -51,19 +53,26 @@ int amdgpu_amdkfd_init(void)
#else
ret = -ENOENT;
#endif
+ kfd_initialized = !ret;
return ret;
}
void amdgpu_amdkfd_fini(void)
{
- kgd2kfd_exit();
+ if (kfd_initialized) {
+ kgd2kfd_exit();
+ kfd_initialized = false;
+ }
}
void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
{
bool vf = amdgpu_sriov_vf(adev);
+ if (!kfd_initialized)
+ return;
+
adev->kfd.dev = kgd2kfd_probe((struct kgd_dev *)adev,
adev->pdev, adev->asic_type, vf);
@@ -119,7 +128,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
.gpuvm_size = min(adev->vm_manager.max_pfn
<< AMDGPU_GPU_PAGE_SHIFT,
AMDGPU_GMC_HOLE_START),
- .drm_render_minor = adev->ddev->render->index,
+ .drm_render_minor = adev_to_drm(adev)->render->index,
.sdma_doorbell_idx = adev->doorbell_index.sdma_engine,
};
@@ -160,7 +169,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
adev->doorbell_index.last_non_cp;
}
- kgd2kfd_device_init(adev->kfd.dev, adev->ddev, &gpu_resources);
+ kgd2kfd_device_init(adev->kfd.dev, adev_to_drm(adev), &gpu_resources);
}
}
@@ -479,11 +488,11 @@ int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
goto out_put;
obj = dma_buf->priv;
- if (obj->dev->driver != adev->ddev->driver)
+ if (obj->dev->driver != adev_to_drm(adev)->driver)
/* Can't handle buffers from different drivers */
goto out_put;
- adev = obj->dev->dev_private;
+ adev = drm_to_adev(obj->dev);
bo = gem_to_amdgpu_bo(obj);
if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT)))
@@ -517,8 +526,9 @@ out_put:
uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+ struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
- return amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
+ return amdgpu_vram_mgr_usage(vram_man);
}
uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd)
@@ -571,6 +581,13 @@ uint32_t amdgpu_amdkfd_get_asic_rev_id(struct kgd_dev *kgd)
return adev->rev_id;
}
+int amdgpu_amdkfd_get_noretry(struct kgd_dev *kgd)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+
+ return adev->gmc.noretry;
+}
+
int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
uint32_t vmid, uint64_t gpu_addr,
uint32_t *ib_cmd, uint32_t ib_len)
@@ -612,6 +629,7 @@ int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
job->vmid = vmid;
ret = amdgpu_ib_schedule(ring, 1, ib, job, &f);
+
if (ret) {
DRM_ERROR("amdgpu: failed to schedule IB.\n");
goto err_ib_sched;
@@ -755,4 +773,8 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
{
}
+
+void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask)
+{
+}
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index ffe149aafc39..ea391ca7f2f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -181,6 +181,7 @@ uint64_t amdgpu_amdkfd_get_unique_id(struct kgd_dev *kgd);
uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd);
uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd);
uint32_t amdgpu_amdkfd_get_asic_rev_id(struct kgd_dev *kgd);
+int amdgpu_amdkfd_get_noretry(struct kgd_dev *kgd);
uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src);
/* Read user wptr from a specified user address space with page fault
@@ -207,11 +208,11 @@ uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *s
})
/* GPUVM API */
-int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, unsigned int pasid,
+int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, u32 pasid,
void **vm, void **process_info,
struct dma_fence **ef);
int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
- struct file *filp, unsigned int pasid,
+ struct file *filp, u32 pasid,
void **vm, void **process_info,
struct dma_fence **ef);
void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
@@ -270,5 +271,6 @@ int kgd2kfd_resume_mm(struct mm_struct *mm);
int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
struct dma_fence *fence);
void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd);
+void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask);
#endif /* AMDGPU_AMDKFD_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
index 35d4a5ab0228..1afa8f122e7d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
@@ -283,22 +283,6 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
return 0;
}
-static void kgd_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
- uint64_t page_table_base)
-{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
- if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
- pr_err("trying to set page table base for wrong VMID %u\n",
- vmid);
- return;
- }
-
- mmhub_v9_4_setup_vm_pt_regs(adev, vmid, page_table_base);
-
- gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
-}
-
const struct kfd2kgd_calls arcturus_kfd2kgd = {
.program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
@@ -317,7 +301,7 @@ const struct kfd2kgd_calls arcturus_kfd2kgd = {
.wave_control_execute = kgd_gfx_v9_wave_control_execute,
.address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset,
.get_atc_vmid_pasid_mapping_info =
- kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
- .set_vm_context_page_table_base = kgd_set_vm_context_page_table_base,
- .get_hive_id = amdgpu_amdkfd_get_hive_id,
+ kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
+ .set_vm_context_page_table_base =
+ kgd_gfx_v9_set_vm_context_page_table_base,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
index bf927f432506..4763bab7a4d0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
@@ -32,7 +32,6 @@
#include "v10_structs.h"
#include "nv.h"
#include "nvd.h"
-#include "gfxhub_v2_0.h"
enum hqd_dequeue_request_type {
NO_ACTION = 0,
@@ -105,7 +104,7 @@ static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
unlock_srbm(kgd);
}
-static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
+static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid,
unsigned int vmid)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
@@ -542,7 +541,7 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
uint32_t temp;
struct v10_compute_mqd *m = get_mqd(mqd);
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EIO;
#if 0
@@ -753,7 +752,7 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
}
/* SDMA is on gfxhub as well for Navi1* series */
- gfxhub_v2_0_setup_vm_pt_regs(adev, vmid, page_table_base);
+ adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
}
const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
@@ -776,6 +775,4 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
.get_atc_vmid_pasid_mapping_info =
get_atc_vmid_pasid_mapping_info,
.set_vm_context_page_table_base = set_vm_context_page_table_base,
- .get_hive_id = amdgpu_amdkfd_get_hive_id,
- .get_unique_id = amdgpu_amdkfd_get_unique_id,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
index 7e59e473a190..50016bf9c427 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
@@ -31,7 +31,6 @@
#include "v10_structs.h"
#include "nv.h"
#include "nvd.h"
-#include "gfxhub_v2_1.h"
enum hqd_dequeue_request_type {
NO_ACTION = 0,
@@ -152,7 +151,7 @@ static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
dev_warn(adev->dev,
"Invalid sdma engine id (%d), using engine id 0\n",
engine_id);
- /* fall through */
+ fallthrough;
case 0:
sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
@@ -657,7 +656,7 @@ static void set_vm_context_page_table_base_v10_3(struct kgd_dev *kgd, uint32_t v
struct amdgpu_device *adev = get_amdgpu_device(kgd);
/* SDMA is on gfxhub as well for Navi1* series */
- gfxhub_v2_1_setup_vm_pt_regs(adev, vmid, page_table_base);
+ adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
}
#if 0
@@ -822,7 +821,6 @@ const struct kfd2kgd_calls gfx_v10_3_kfd2kgd = {
.address_watch_get_offset = address_watch_get_offset_v10_3,
.get_atc_vmid_pasid_mapping_info = NULL,
.set_vm_context_page_table_base = set_vm_context_page_table_base_v10_3,
- .get_hive_id = amdgpu_amdkfd_get_hive_id,
#if 0
.enable_debug_trap = enable_debug_trap_v10_3,
.disable_debug_trap = disable_debug_trap_v10_3,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 744366c7ee85..b91d27e39bad 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -139,7 +139,7 @@ static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
unlock_srbm(kgd);
}
-static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
+static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid,
unsigned int vmid)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
@@ -423,7 +423,7 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
unsigned long flags, end_jiffies;
int retry;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EIO;
acquire_queue(kgd, pipe_id, queue_id);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index feab4cc6e836..5ce0ce704a21 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -96,7 +96,7 @@ static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
unlock_srbm(kgd);
}
-static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
+static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid,
unsigned int vmid)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
@@ -419,7 +419,7 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
int retry;
struct vi_mqd *m = get_mqd(mqd);
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EIO;
acquire_queue(kgd, pipe_id, queue_id);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index c7fd0c47b254..43b18863a8b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -36,9 +36,7 @@
#include "v9_structs.h"
#include "soc15.h"
#include "soc15d.h"
-#include "mmhub_v1_0.h"
-#include "gfxhub_v1_0.h"
-
+#include "gfx_v9_0.h"
enum hqd_dequeue_request_type {
NO_ACTION = 0,
@@ -110,7 +108,7 @@ void kgd_gfx_v9_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
unlock_srbm(kgd);
}
-int kgd_gfx_v9_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
+int kgd_gfx_v9_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid,
unsigned int vmid)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
@@ -195,19 +193,32 @@ static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
unsigned int engine_id,
unsigned int queue_id)
{
- uint32_t sdma_engine_reg_base[2] = {
- SOC15_REG_OFFSET(SDMA0, 0,
- mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL,
- SOC15_REG_OFFSET(SDMA1, 0,
- mmSDMA1_RLC0_RB_CNTL) - mmSDMA1_RLC0_RB_CNTL
- };
- uint32_t retval = sdma_engine_reg_base[engine_id]
+ uint32_t sdma_engine_reg_base = 0;
+ uint32_t sdma_rlc_reg_offset;
+
+ switch (engine_id) {
+ default:
+ dev_warn(adev->dev,
+ "Invalid sdma engine id (%d), using engine id 0\n",
+ engine_id);
+ fallthrough;
+ case 0:
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
+ mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
+ break;
+ case 1:
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA1, 0,
+ mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
+ break;
+ }
+
+ sdma_rlc_reg_offset = sdma_engine_reg_base
+ queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
- queue_id, retval);
+ queue_id, sdma_rlc_reg_offset);
- return retval;
+ return sdma_rlc_reg_offset;
}
static inline struct v9_mqd *get_mqd(void *mqd)
@@ -539,7 +550,7 @@ int kgd_gfx_v9_hqd_destroy(struct kgd_dev *kgd, void *mqd,
uint32_t temp;
struct v9_mqd *m = get_mqd(mqd);
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EIO;
acquire_queue(kgd, pipe_id, queue_id);
@@ -677,7 +688,7 @@ uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd,
return 0;
}
-static void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd,
+void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd,
uint32_t vmid, uint64_t page_table_base)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
@@ -688,9 +699,182 @@ static void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd,
return;
}
- mmhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
+ adev->mmhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
+
+ adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
+}
+
+static void lock_spi_csq_mutexes(struct amdgpu_device *adev)
+{
+ mutex_lock(&adev->srbm_mutex);
+ mutex_lock(&adev->grbm_idx_mutex);
+
+}
+
+static void unlock_spi_csq_mutexes(struct amdgpu_device *adev)
+{
+ mutex_unlock(&adev->grbm_idx_mutex);
+ mutex_unlock(&adev->srbm_mutex);
+}
+
+/**
+ * @get_wave_count: Read device registers to get number of waves in flight for
+ * a particular queue. The method also returns the VMID associated with the
+ * queue.
+ *
+ * @adev: Handle of device whose registers are to be read
+ * @queue_idx: Index of queue in the queue-map bit-field
+ * @wave_cnt: Output parameter updated with number of waves in flight
+ * @vmid: Output parameter updated with VMID of queue whose wave count
+ * is being collected
+ */
+static void get_wave_count(struct amdgpu_device *adev, int queue_idx,
+ int *wave_cnt, int *vmid)
+{
+ int pipe_idx;
+ int queue_slot;
+ unsigned int reg_val;
+
+ /*
+ * Program GRBM with appropriate MEID, PIPEID, QUEUEID and VMID
+ * parameters to read out waves in flight. Get VMID if there are
+ * non-zero waves in flight.
+ */
+ *vmid = 0xFF;
+ *wave_cnt = 0;
+ pipe_idx = queue_idx / adev->gfx.mec.num_queue_per_pipe;
+ queue_slot = queue_idx % adev->gfx.mec.num_queue_per_pipe;
+ soc15_grbm_select(adev, 1, pipe_idx, queue_slot, 0);
+ reg_val = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_CSQ_WF_ACTIVE_COUNT_0) +
+ queue_slot);
+ *wave_cnt = reg_val & SPI_CSQ_WF_ACTIVE_COUNT_0__COUNT_MASK;
+ if (*wave_cnt != 0)
+ *vmid = (RREG32_SOC15(GC, 0, mmCP_HQD_VMID) &
+ CP_HQD_VMID__VMID_MASK) >> CP_HQD_VMID__VMID__SHIFT;
+}
+
+/**
+ * @kgd_gfx_v9_get_cu_occupancy: Reads relevant registers associated with each
+ * shader engine and aggregates the number of waves that are in flight for the
+ * process whose pasid is provided as a parameter. The process could have ZERO
+ * or more queues running and submitting waves to compute units.
+ *
+ * @kgd: Handle of device from which to get number of waves in flight
+ * @pasid: Identifies the process for which this query call is invoked
+ * @wave_cnt: Output parameter updated with number of waves in flight that
+ * belong to process with given pasid
+ * @max_waves_per_cu: Output parameter updated with maximum number of waves
+ * possible per Compute Unit
+ *
+ * @note: It's possible that the device has too many queues (oversubscription)
+ * in which case a VMID could be remapped to a different PASID. This could lead
+ * to an iaccurate wave count. Following is a high-level sequence:
+ * Time T1: vmid = getVmid(); vmid is associated with Pasid P1
+ * Time T2: passId = getPasId(vmid); vmid is associated with Pasid P2
+ * In the sequence above wave count obtained from time T1 will be incorrectly
+ * lost or added to total wave count.
+ *
+ * The registers that provide the waves in flight are:
+ *
+ * SPI_CSQ_WF_ACTIVE_STATUS - bit-map of queues per pipe. The bit is ON if a
+ * queue is slotted, OFF if there is no queue. A process could have ZERO or
+ * more queues slotted and submitting waves to be run on compute units. Even
+ * when there is a queue it is possible there could be zero wave fronts, this
+ * can happen when queue is waiting on top-of-pipe events - e.g. waitRegMem
+ * command
+ *
+ * For each bit that is ON from above:
+ *
+ * Read (SPI_CSQ_WF_ACTIVE_COUNT_0 + queue_idx) register. It provides the
+ * number of waves that are in flight for the queue at specified index. The
+ * index ranges from 0 to 7.
+ *
+ * If non-zero waves are in flight, read CP_HQD_VMID register to obtain VMID
+ * of the wave(s).
+ *
+ * Determine if VMID from above step maps to pasid provided as parameter. If
+ * it matches agrregate the wave count. That the VMID will not match pasid is
+ * a normal condition i.e. a device is expected to support multiple queues
+ * from multiple proceses.
+ *
+ * Reading registers referenced above involves programming GRBM appropriately
+ */
+static void kgd_gfx_v9_get_cu_occupancy(struct kgd_dev *kgd, int pasid,
+ int *pasid_wave_cnt, int *max_waves_per_cu)
+{
+ int qidx;
+ int vmid;
+ int se_idx;
+ int sh_idx;
+ int se_cnt;
+ int sh_cnt;
+ int wave_cnt;
+ int queue_map;
+ int pasid_tmp;
+ int max_queue_cnt;
+ int vmid_wave_cnt = 0;
+ struct amdgpu_device *adev;
+ DECLARE_BITMAP(cp_queue_bitmap, KGD_MAX_QUEUES);
+
+ adev = get_amdgpu_device(kgd);
+ lock_spi_csq_mutexes(adev);
+ soc15_grbm_select(adev, 1, 0, 0, 0);
+
+ /*
+ * Iterate through the shader engines and arrays of the device
+ * to get number of waves in flight
+ */
+ bitmap_complement(cp_queue_bitmap, adev->gfx.mec.queue_bitmap,
+ KGD_MAX_QUEUES);
+ max_queue_cnt = adev->gfx.mec.num_pipe_per_mec *
+ adev->gfx.mec.num_queue_per_pipe;
+ sh_cnt = adev->gfx.config.max_sh_per_se;
+ se_cnt = adev->gfx.config.max_shader_engines;
+ for (se_idx = 0; se_idx < se_cnt; se_idx++) {
+ for (sh_idx = 0; sh_idx < sh_cnt; sh_idx++) {
+
+ gfx_v9_0_select_se_sh(adev, se_idx, sh_idx, 0xffffffff);
+ queue_map = RREG32(SOC15_REG_OFFSET(GC, 0,
+ mmSPI_CSQ_WF_ACTIVE_STATUS));
+
+ /*
+ * Assumption: queue map encodes following schema: four
+ * pipes per each micro-engine, with each pipe mapping
+ * eight queues. This schema is true for GFX9 devices
+ * and must be verified for newer device families
+ */
+ for (qidx = 0; qidx < max_queue_cnt; qidx++) {
+
+ /* Skip qeueus that are not associated with
+ * compute functions
+ */
+ if (!test_bit(qidx, cp_queue_bitmap))
+ continue;
+
+ if (!(queue_map & (1 << qidx)))
+ continue;
+
+ /* Get number of waves in flight and aggregate them */
+ get_wave_count(adev, qidx, &wave_cnt, &vmid);
+ if (wave_cnt != 0) {
+ pasid_tmp =
+ RREG32(SOC15_REG_OFFSET(OSSSYS, 0,
+ mmIH_VMID_0_LUT) + vmid);
+ if (pasid_tmp == pasid)
+ vmid_wave_cnt += wave_cnt;
+ }
+ }
+ }
+ }
+
+ gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ soc15_grbm_select(adev, 0, 0, 0, 0);
+ unlock_spi_csq_mutexes(adev);
- gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
+ /* Update the output parameters and return */
+ *pasid_wave_cnt = vmid_wave_cnt;
+ *max_waves_per_cu = adev->gfx.cu_info.simd_per_cu *
+ adev->gfx.cu_info.max_waves_per_simd;
}
const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
@@ -713,6 +897,5 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
.get_atc_vmid_pasid_mapping_info =
kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
.set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
- .get_hive_id = amdgpu_amdkfd_get_hive_id,
- .get_unique_id = amdgpu_amdkfd_get_unique_id,
+ .get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
index aedf67d57449..fc8934b86d93 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
@@ -26,7 +26,7 @@ void kgd_gfx_v9_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
uint32_t sh_mem_config,
uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit,
uint32_t sh_mem_bases);
-int kgd_gfx_v9_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
+int kgd_gfx_v9_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid,
unsigned int vmid);
int kgd_gfx_v9_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
@@ -60,3 +60,6 @@ uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd,
bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
uint8_t vmid, uint16_t *p_pasid);
+
+void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd,
+ uint32_t vmid, uint64_t page_table_base);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index a58af513c952..5da487b64a66 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -148,8 +148,12 @@ static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
spin_lock(&kfd_mem_limit.mem_limit_lock);
+ if (kfd_mem_limit.system_mem_used + system_mem_needed >
+ kfd_mem_limit.max_system_mem_limit)
+ pr_debug("Set no_system_mem_limit=1 if using shared memory\n");
+
if ((kfd_mem_limit.system_mem_used + system_mem_needed >
- kfd_mem_limit.max_system_mem_limit) ||
+ kfd_mem_limit.max_system_mem_limit && !no_system_mem_limit) ||
(kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
kfd_mem_limit.max_ttm_mem_limit) ||
(adev->kfd.vram_used + vram_needed >
@@ -562,7 +566,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr)
mutex_lock(&process_info->lock);
- ret = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, user_addr, 0);
+ ret = amdgpu_ttm_tt_set_userptr(&bo->tbo, user_addr, 0);
if (ret) {
pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
goto out;
@@ -992,7 +996,7 @@ create_evict_fence_fail:
return ret;
}
-int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, unsigned int pasid,
+int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, u32 pasid,
void **vm, void **process_info,
struct dma_fence **ef)
{
@@ -1028,7 +1032,7 @@ amdgpu_vm_init_fail:
}
int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
- struct file *filp, unsigned int pasid,
+ struct file *filp, u32 pasid,
void **vm, void **process_info,
struct dma_fence **ef)
{
@@ -1668,7 +1672,7 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
return -EINVAL;
obj = dma_buf->priv;
- if (obj->dev->dev_private != adev)
+ if (drm_to_adev(obj->dev) != adev)
/* Can't handle buffers from other devices */
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 29f767e026e4..469352e2d6ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -148,7 +148,7 @@ void amdgpu_atombios_i2c_init(struct amdgpu_device *adev)
if (i2c.valid) {
sprintf(stmp, "0x%x", i2c.i2c_id);
- adev->i2c_bus[i] = amdgpu_i2c_create(adev->ddev, &i2c, stmp);
+ adev->i2c_bus[i] = amdgpu_i2c_create(adev_to_drm(adev), &i2c, stmp);
}
gpio = (ATOM_GPIO_I2C_ASSIGMENT *)
((u8 *)gpio + sizeof(ATOM_GPIO_I2C_ASSIGMENT));
@@ -541,7 +541,7 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *
}
}
- amdgpu_link_encoder_connector(adev->ddev);
+ amdgpu_link_encoder_connector(adev_to_drm(adev));
return true;
}
@@ -1786,9 +1786,9 @@ static int amdgpu_atombios_allocate_fb_scratch(struct amdgpu_device *adev)
(uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION <<
ATOM_VRAM_OPERATION_FLAGS_SHIFT)) {
/* Firmware request VRAM reservation for SR-IOV */
- adev->fw_vram_usage.start_offset = (start_addr &
+ adev->mman.fw_vram_usage_start_offset = (start_addr &
(~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
- adev->fw_vram_usage.size = size << 10;
+ adev->mman.fw_vram_usage_size = size << 10;
/* Use the default scratch size */
usage_bytes = 0;
} else {
@@ -1882,7 +1882,7 @@ static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
*/
static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
{
- struct amdgpu_device *adev = info->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(info->dev);
WREG32(reg, val);
}
@@ -1898,7 +1898,7 @@ static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
*/
static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
{
- struct amdgpu_device *adev = info->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(info->dev);
uint32_t r;
r = RREG32(reg);
@@ -1916,7 +1916,7 @@ static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
*/
static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
{
- struct amdgpu_device *adev = info->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(info->dev);
WREG32_IO(reg, val);
}
@@ -1932,7 +1932,7 @@ static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
*/
static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
{
- struct amdgpu_device *adev = info->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(info->dev);
uint32_t r;
r = RREG32_IO(reg);
@@ -1944,7 +1944,7 @@ static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
struct atom_context *ctx = adev->mode_info.atom_context;
return snprintf(buf, PAGE_SIZE, "%s\n", ctx->vbios_version);
@@ -1995,7 +1995,7 @@ int amdgpu_atombios_init(struct amdgpu_device *adev)
return -ENOMEM;
adev->mode_info.atom_card_info = atom_card_info;
- atom_card_info->dev = adev->ddev;
+ atom_card_info->dev = adev_to_drm(adev);
atom_card_info->reg_read = cail_reg_read;
atom_card_info->reg_write = cail_reg_write;
/* needed for iio ops */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index 1279053324f9..b4df6460e45a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -89,9 +89,9 @@ int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
(uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION <<
ATOM_VRAM_OPERATION_FLAGS_SHIFT)) {
/* Firmware request VRAM reservation for SR-IOV */
- adev->fw_vram_usage.start_offset = (start_addr &
+ adev->mman.fw_vram_usage_start_offset = (start_addr &
(~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
- adev->fw_vram_usage.size = size << 10;
+ adev->mman.fw_vram_usage_size = size << 10;
/* Use the default scratch size */
usage_bytes = 0;
} else {
@@ -543,6 +543,7 @@ int amdgpu_mem_train_support(struct amdgpu_device *adev)
case HW_REV(11, 0, 0):
case HW_REV(11, 0, 5):
case HW_REV(11, 0, 7):
+ case HW_REV(11, 0, 11):
ret = 1;
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 3e35a8f2c5e5..7abe9500c0c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -616,7 +616,7 @@ static bool amdgpu_atpx_detect(void)
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
vga_count++;
- has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
+ has_atpx |= amdgpu_atpx_pci_probe_handle(pdev);
parent_pdev = pci_upstream_bridge(pdev);
d3_supported |= parent_pdev && parent_pdev->bridge_d3;
@@ -626,7 +626,7 @@ static bool amdgpu_atpx_detect(void)
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
vga_count++;
- has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
+ has_atpx |= amdgpu_atpx_pci_probe_handle(pdev);
parent_pdev = pci_upstream_bridge(pdev);
d3_supported |= parent_pdev && parent_pdev->bridge_d3;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index b1172d93c99c..6333cada1e09 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -417,26 +417,40 @@ static inline bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev)
bool amdgpu_get_bios(struct amdgpu_device *adev)
{
- if (amdgpu_atrm_get_bios(adev))
+ if (amdgpu_atrm_get_bios(adev)) {
+ dev_info(adev->dev, "Fetched VBIOS from ATRM\n");
goto success;
+ }
- if (amdgpu_acpi_vfct_bios(adev))
+ if (amdgpu_acpi_vfct_bios(adev)) {
+ dev_info(adev->dev, "Fetched VBIOS from VFCT\n");
goto success;
+ }
- if (igp_read_bios_from_vram(adev))
+ if (igp_read_bios_from_vram(adev)) {
+ dev_info(adev->dev, "Fetched VBIOS from VRAM BAR\n");
goto success;
+ }
- if (amdgpu_read_bios(adev))
+ if (amdgpu_read_bios(adev)) {
+ dev_info(adev->dev, "Fetched VBIOS from ROM BAR\n");
goto success;
+ }
- if (amdgpu_read_bios_from_rom(adev))
+ if (amdgpu_read_bios_from_rom(adev)) {
+ dev_info(adev->dev, "Fetched VBIOS from ROM\n");
goto success;
+ }
- if (amdgpu_read_disabled_bios(adev))
+ if (amdgpu_read_disabled_bios(adev)) {
+ dev_info(adev->dev, "Fetched VBIOS from disabled ROM BAR\n");
goto success;
+ }
- if (amdgpu_read_platform_bios(adev))
+ if (amdgpu_read_platform_bios(adev)) {
+ dev_info(adev->dev, "Fetched VBIOS from platform\n");
goto success;
+ }
DRM_ERROR("Unable to locate a BIOS ROM\n");
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 4053597b3af2..15c45b2a3983 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -265,7 +265,7 @@ error_free:
int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_fpriv *fpriv = filp->driver_priv;
union drm_amdgpu_bo_list *args = data;
uint32_t handle = args->in.list_handle;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index a1aec205435d..65d1b23d7e74 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -26,6 +26,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_dp_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
@@ -41,7 +42,7 @@
void amdgpu_connector_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
/* bail if the connector does not have hpd pin, e.g.,
@@ -279,7 +280,7 @@ amdgpu_connector_get_hardcoded_edid(struct amdgpu_device *adev)
static void amdgpu_connector_get_edid(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->edid)
@@ -463,7 +464,7 @@ static int amdgpu_connector_set_property(struct drm_connector *connector,
uint64_t val)
{
struct drm_device *dev = connector->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder;
@@ -834,7 +835,7 @@ static enum drm_mode_status amdgpu_connector_vga_mode_valid(struct drm_connector
struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
/* XXX check mode bandwidth */
@@ -941,7 +942,7 @@ static bool
amdgpu_connector_check_hpd_status_unchanged(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
enum drm_connector_status status;
@@ -972,7 +973,7 @@ static enum drm_connector_status
amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
{
struct drm_device *dev = connector->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
const struct drm_encoder_helper_funcs *encoder_funcs;
int r;
@@ -1159,7 +1160,7 @@ static enum drm_mode_status amdgpu_connector_dvi_mode_valid(struct drm_connector
struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
/* XXX check mode bandwidth */
@@ -1311,7 +1312,7 @@ static bool amdgpu_connector_encoder_is_hbr2(struct drm_connector *connector)
bool amdgpu_connector_is_dp12_capable(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
if ((adev->clock.default_dispclk >= 53900) &&
amdgpu_connector_encoder_is_hbr2(connector)) {
@@ -1325,7 +1326,7 @@ static enum drm_connector_status
amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
{
struct drm_device *dev = connector->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
enum drm_connector_status ret = connector_status_disconnected;
struct amdgpu_connector_atom_dig *amdgpu_dig_connector = amdgpu_connector->con_priv;
@@ -1413,6 +1414,10 @@ out:
pm_runtime_put_autosuspend(connector->dev->dev);
}
+ drm_dp_set_subconnector_property(&amdgpu_connector->base,
+ ret,
+ amdgpu_dig_connector->dpcd,
+ amdgpu_dig_connector->downstream_ports);
return ret;
}
@@ -1521,7 +1526,7 @@ amdgpu_connector_add(struct amdgpu_device *adev,
struct amdgpu_hpd *hpd,
struct amdgpu_router *router)
{
- struct drm_device *dev = adev->ddev;
+ struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector;
@@ -1959,6 +1964,11 @@ amdgpu_connector_add(struct amdgpu_device *adev,
if (has_aux)
amdgpu_atombios_dp_aux_init(amdgpu_connector);
+ if (connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+ connector_type == DRM_MODE_CONNECTOR_eDP) {
+ drm_connector_attach_dp_subconnector_property(&amdgpu_connector->base);
+ }
+
return;
failed:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index a512ccbc4dea..12598a4b5c78 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -299,7 +299,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
{
s64 time_us, increment_us;
u64 free_vram, total_vram, used_vram;
-
+ struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
/* Allow a maximum of 200 accumulated ms. This is basically per-IB
* throttling.
*
@@ -316,7 +316,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
}
total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
- used_vram = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
+ used_vram = amdgpu_vram_mgr_usage(vram_man);
free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
spin_lock(&adev->mm_stats.lock);
@@ -363,7 +363,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) {
u64 total_vis_vram = adev->gmc.visible_vram_size;
u64 used_vis_vram =
- amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
+ amdgpu_vram_mgr_vis_usage(vram_man);
if (used_vis_vram < total_vis_vram) {
u64 free_vis_vram = total_vis_vram - used_vis_vram;
@@ -1275,13 +1275,24 @@ error_unlock:
return r;
}
+static void trace_amdgpu_cs_ibs(struct amdgpu_cs_parser *parser)
+{
+ int i;
+
+ if (!trace_amdgpu_cs_enabled())
+ return;
+
+ for (i = 0; i < parser->job->num_ibs; i++)
+ trace_amdgpu_cs(parser, i);
+}
+
int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
union drm_amdgpu_cs *cs = data;
struct amdgpu_cs_parser parser = {};
bool reserved_buffers = false;
- int i, r;
+ int r;
if (amdgpu_ras_intr_triggered())
return -EHWPOISON;
@@ -1294,7 +1305,8 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
r = amdgpu_cs_parser_init(&parser, data);
if (r) {
- DRM_ERROR("Failed to initialize parser %d!\n", r);
+ if (printk_ratelimit())
+ DRM_ERROR("Failed to initialize parser %d!\n", r);
goto out;
}
@@ -1319,8 +1331,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
reserved_buffers = true;
- for (i = 0; i < parser.job->num_ibs; i++)
- trace_amdgpu_cs(&parser, i);
+ trace_amdgpu_cs_ibs(&parser);
r = amdgpu_cs_vm_handling(&parser);
if (r)
@@ -1421,7 +1432,7 @@ static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
union drm_amdgpu_fence_to_handle *info = data;
struct dma_fence *fence;
struct drm_syncobj *syncobj;
@@ -1597,7 +1608,7 @@ err_free_fence_array:
int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
union drm_amdgpu_wait_fences *wait = data;
uint32_t fence_count = wait->in.fence_count;
struct drm_amdgpu_fence *fences_user;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 8842c55d4490..c80d8339f58c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -46,7 +46,7 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
static int amdgpu_ctx_priority_permit(struct drm_file *filp,
enum drm_sched_priority priority)
{
- if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
+ if (priority < 0 || priority >= DRM_SCHED_PRIORITY_COUNT)
return -EINVAL;
/* NORMAL and below are accessible by everyone */
@@ -65,7 +65,7 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp,
static enum gfx_pipe_priority amdgpu_ctx_sched_prio_to_compute_prio(enum drm_sched_priority prio)
{
switch (prio) {
- case DRM_SCHED_PRIORITY_HIGH_HW:
+ case DRM_SCHED_PRIORITY_HIGH:
case DRM_SCHED_PRIORITY_KERNEL:
return AMDGPU_GFX_PIPE_PRIO_HIGH;
default:
@@ -114,7 +114,11 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds;
- if (hw_ip == AMDGPU_HW_IP_VCN_ENC || hw_ip == AMDGPU_HW_IP_VCN_DEC) {
+ /* disable load balance if the hw engine retains context among dependent jobs */
+ if (hw_ip == AMDGPU_HW_IP_VCN_ENC ||
+ hw_ip == AMDGPU_HW_IP_VCN_DEC ||
+ hw_ip == AMDGPU_HW_IP_UVD_ENC ||
+ hw_ip == AMDGPU_HW_IP_UVD) {
sched = drm_sched_pick_best(scheds, num_scheds);
scheds = &sched;
num_scheds = 1;
@@ -385,16 +389,15 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
enum drm_sched_priority priority;
union drm_amdgpu_ctx *args = data;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_fpriv *fpriv = filp->driver_priv;
- r = 0;
id = args->in.ctx_id;
- priority = amdgpu_to_sched_priority(args->in.priority);
+ r = amdgpu_to_sched_priority(args->in.priority, &priority);
/* For backwards compatibility reasons, we need to accept
* ioctls with garbage in the priority field */
- if (priority == DRM_SCHED_PRIORITY_INVALID)
+ if (r == -EINVAL)
priority = DRM_SCHED_PRIORITY_NORMAL;
switch (args->in.op) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 193ffdb957b6..2d125b8b15ee 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -34,6 +34,7 @@
#include "amdgpu_pm.h"
#include "amdgpu_dm_debugfs.h"
#include "amdgpu_ras.h"
+#include "amdgpu_rap.h"
/**
* amdgpu_debugfs_add_files - Add simple debugfs entries
@@ -68,8 +69,8 @@ int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
adev->debugfs_count = i;
#if defined(CONFIG_DEBUG_FS)
drm_debugfs_create_files(files, nfiles,
- adev->ddev->primary->debugfs_root,
- adev->ddev->primary);
+ adev_to_drm(adev)->primary->debugfs_root,
+ adev_to_drm(adev)->primary);
#endif
return 0;
}
@@ -100,14 +101,18 @@ static int amdgpu_debugfs_autodump_open(struct inode *inode, struct file *file)
file->private_data = adev;
- mutex_lock(&adev->lock_reset);
+ ret = down_read_killable(&adev->reset_sem);
+ if (ret)
+ return ret;
+
if (adev->autodump.dumping.done) {
reinit_completion(&adev->autodump.dumping);
ret = 0;
} else {
ret = -EBUSY;
}
- mutex_unlock(&adev->lock_reset);
+
+ up_read(&adev->reset_sem);
return ret;
}
@@ -126,7 +131,7 @@ static unsigned int amdgpu_debugfs_autodump_poll(struct file *file, struct poll_
poll_wait(file, &adev->autodump.gpu_hang, poll_table);
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return POLLIN | POLLRDNORM | POLLWRNORM;
return 0;
@@ -146,7 +151,7 @@ static void amdgpu_debugfs_autodump_init(struct amdgpu_device *adev)
init_waitqueue_head(&adev->autodump.gpu_hang);
debugfs_create_file("amdgpu_autodump", 0600,
- adev->ddev->primary->debugfs_root,
+ adev_to_drm(adev)->primary->debugfs_root,
adev, &autodump_debug_fops);
}
@@ -222,23 +227,23 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
*pos &= (1UL << 22) - 1;
- r = pm_runtime_get_sync(adev->ddev->dev);
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
r = amdgpu_virt_enable_access_debugfs(adev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
if (use_bank) {
if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
(se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) {
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return -EINVAL;
}
@@ -262,7 +267,7 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
} else {
r = get_user(value, (uint32_t *)buf);
if (!r)
- amdgpu_mm_wreg_mmio_rlc(adev, *pos >> 2, value, 0);
+ amdgpu_mm_wreg_mmio_rlc(adev, *pos >> 2, value);
}
if (r) {
result = r;
@@ -287,8 +292,8 @@ end:
if (pm_pg_lock)
mutex_unlock(&adev->pm.mutex);
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return result;
@@ -335,15 +340,15 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
- r = pm_runtime_get_sync(adev->ddev->dev);
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
r = amdgpu_virt_enable_access_debugfs(adev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
@@ -353,8 +358,8 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
value = RREG32_PCIE(*pos >> 2);
r = put_user(value, (uint32_t *)buf);
if (r) {
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return r;
}
@@ -365,8 +370,8 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
size -= 4;
}
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return result;
@@ -394,15 +399,15 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
- r = pm_runtime_get_sync(adev->ddev->dev);
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
r = amdgpu_virt_enable_access_debugfs(adev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
@@ -411,8 +416,8 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
r = get_user(value, (uint32_t *)buf);
if (r) {
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return r;
}
@@ -425,8 +430,8 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
size -= 4;
}
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return result;
@@ -454,15 +459,15 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
- r = pm_runtime_get_sync(adev->ddev->dev);
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
r = amdgpu_virt_enable_access_debugfs(adev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
@@ -472,8 +477,8 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
value = RREG32_DIDT(*pos >> 2);
r = put_user(value, (uint32_t *)buf);
if (r) {
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return r;
}
@@ -484,8 +489,8 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
size -= 4;
}
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return result;
@@ -513,15 +518,15 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
- r = pm_runtime_get_sync(adev->ddev->dev);
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
r = amdgpu_virt_enable_access_debugfs(adev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
@@ -530,8 +535,8 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
r = get_user(value, (uint32_t *)buf);
if (r) {
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return r;
}
@@ -544,8 +549,8 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
size -= 4;
}
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return result;
@@ -573,15 +578,15 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
- r = pm_runtime_get_sync(adev->ddev->dev);
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
r = amdgpu_virt_enable_access_debugfs(adev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
@@ -591,8 +596,8 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
value = RREG32_SMC(*pos);
r = put_user(value, (uint32_t *)buf);
if (r) {
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return r;
}
@@ -603,8 +608,8 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
size -= 4;
}
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return result;
@@ -632,15 +637,15 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
- r = pm_runtime_get_sync(adev->ddev->dev);
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
r = amdgpu_virt_enable_access_debugfs(adev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
@@ -649,8 +654,8 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
r = get_user(value, (uint32_t *)buf);
if (r) {
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return r;
}
@@ -663,8 +668,8 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
size -= 4;
}
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return result;
@@ -791,22 +796,22 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
valuesize = sizeof(values);
- r = pm_runtime_get_sync(adev->ddev->dev);
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
r = amdgpu_virt_enable_access_debugfs(adev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (r) {
amdgpu_virt_disable_access_debugfs(adev);
@@ -873,15 +878,15 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
wave = (*pos & GENMASK_ULL(36, 31)) >> 31;
simd = (*pos & GENMASK_ULL(44, 37)) >> 37;
- r = pm_runtime_get_sync(adev->ddev->dev);
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
r = amdgpu_virt_enable_access_debugfs(adev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
@@ -896,8 +901,8 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
mutex_unlock(&adev->grbm_idx_mutex);
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (!x) {
amdgpu_virt_disable_access_debugfs(adev);
@@ -971,7 +976,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
if (!data)
return -ENOMEM;
- r = pm_runtime_get_sync(adev->ddev->dev);
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0)
goto err;
@@ -994,8 +999,8 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
mutex_unlock(&adev->grbm_idx_mutex);
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
while (size) {
uint32_t value;
@@ -1017,7 +1022,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
return result;
err:
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
kfree(data);
return r;
}
@@ -1042,9 +1047,9 @@ static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *bu
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
- r = pm_runtime_get_sync(adev->ddev->dev);
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
@@ -1053,8 +1058,8 @@ static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *bu
r = get_user(value, (uint32_t *)buf);
if (r) {
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
@@ -1066,8 +1071,8 @@ static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *bu
size -= 4;
}
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return result;
}
@@ -1091,7 +1096,7 @@ static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf,
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
- r = pm_runtime_get_sync(adev->ddev->dev);
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0)
return r;
@@ -1100,15 +1105,15 @@ static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf,
r = amdgpu_get_gfx_off_status(adev, &value);
if (r) {
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
r = put_user(value, (uint32_t *)buf);
if (r) {
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
@@ -1118,8 +1123,8 @@ static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf,
size -= 4;
}
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return result;
}
@@ -1211,7 +1216,7 @@ static const char *debugfs_regs_names[] = {
*/
int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
{
- struct drm_minor *minor = adev->ddev->primary;
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
struct dentry *ent, *root = minor->debugfs_root;
unsigned int i;
@@ -1231,17 +1236,19 @@ static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
int r = 0, i;
r = pm_runtime_get_sync(dev->dev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
/* Avoid accidently unparking the sched thread during GPU reset */
- mutex_lock(&adev->lock_reset);
+ r = down_read_killable(&adev->reset_sem);
+ if (r)
+ return r;
/* hold on the scheduler */
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
@@ -1268,7 +1275,7 @@ static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
kthread_unpark(ring->sched.thread);
}
- mutex_unlock(&adev->lock_reset);
+ up_read(&adev->reset_sem);
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
@@ -1280,7 +1287,7 @@ static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
seq_write(m, adev->bios, adev->bios_size);
return 0;
@@ -1290,12 +1297,12 @@ static int amdgpu_debugfs_evict_vram(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
int r;
r = pm_runtime_get_sync(dev->dev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
@@ -1311,12 +1318,12 @@ static int amdgpu_debugfs_evict_gtt(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
int r;
r = pm_runtime_get_sync(dev->dev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
@@ -1458,7 +1465,9 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
return -ENOMEM;
/* Avoid accidently unparking the sched thread during GPU reset */
- mutex_lock(&adev->lock_reset);
+ r = down_read_killable(&adev->reset_sem);
+ if (r)
+ goto pro_end;
/* stop the scheduler */
kthread_park(ring->sched.thread);
@@ -1499,13 +1508,14 @@ failure:
/* restart the scheduler */
kthread_unpark(ring->sched.thread);
- mutex_unlock(&adev->lock_reset);
+ up_read(&adev->reset_sem);
ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
+pro_end:
kfree(fences);
- return 0;
+ return r;
}
static int amdgpu_debugfs_sclk_set(void *data, u64 val)
@@ -1517,9 +1527,9 @@ static int amdgpu_debugfs_sclk_set(void *data, u64 val)
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
return -EINVAL;
- ret = pm_runtime_get_sync(adev->ddev->dev);
+ ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (ret < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return ret;
}
@@ -1532,8 +1542,8 @@ static int amdgpu_debugfs_sclk_set(void *data, u64 val)
return 0;
}
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (ret)
return -EINVAL;
@@ -1553,7 +1563,7 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
adev->debugfs_preempt =
debugfs_create_file("amdgpu_preempt_ib", 0600,
- adev->ddev->primary->debugfs_root, adev,
+ adev_to_drm(adev)->primary->debugfs_root, adev,
&fops_ib_preempt);
if (!(adev->debugfs_preempt)) {
DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n");
@@ -1562,7 +1572,7 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
adev->smu.debugfs_sclk =
debugfs_create_file("amdgpu_force_sclk", 0200,
- adev->ddev->primary->debugfs_root, adev,
+ adev_to_drm(adev)->primary->debugfs_root, adev,
&fops_sclk_set);
if (!(adev->smu.debugfs_sclk)) {
DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n");
@@ -1623,6 +1633,8 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
amdgpu_debugfs_autodump_init(adev);
+ amdgpu_rap_debugfs_init(adev);
+
return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list,
ARRAY_SIZE(amdgpu_debugfs_list));
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index eb7cfe87042e..e3783f5a459d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -80,8 +80,7 @@ MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
-MODULE_FIRMWARE("amdgpu/sienna_cichlid_gpu_info.bin");
-MODULE_FIRMWARE("amdgpu/navy_flounder_gpu_info.bin");
+MODULE_FIRMWARE("amdgpu/green_sardine_gpu_info.bin");
#define AMDGPU_RESUME_MS 2000
@@ -132,7 +131,7 @@ static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
return snprintf(buf, PAGE_SIZE, "%llu\n", cnt);
@@ -157,7 +156,7 @@ static ssize_t amdgpu_device_get_product_name(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_name);
}
@@ -179,7 +178,7 @@ static ssize_t amdgpu_device_get_product_number(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_number);
}
@@ -201,7 +200,7 @@ static ssize_t amdgpu_device_get_serial_number(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
return snprintf(buf, PAGE_SIZE, "%s\n", adev->serial);
}
@@ -219,7 +218,7 @@ static DEVICE_ATTR(serial_number, S_IRUGO,
*/
bool amdgpu_device_supports_boco(struct drm_device *dev)
{
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
if (adev->flags & AMD_IS_PX)
return true;
@@ -236,14 +235,16 @@ bool amdgpu_device_supports_boco(struct drm_device *dev)
*/
bool amdgpu_device_supports_baco(struct drm_device *dev)
{
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
return amdgpu_asic_supports_baco(adev);
}
+/*
+ * VRAM access helper functions
+ */
+
/**
- * VRAM access helper functions.
- *
* amdgpu_device_vram_access - read/write a buffer in vram
*
* @adev: amdgpu_device pointer
@@ -303,10 +304,10 @@ void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
}
/*
- * MMIO register access helper functions.
+ * register access helper functions.
*/
/**
- * amdgpu_mm_rreg - read a memory mapped IO register
+ * amdgpu_device_rreg - read a memory mapped IO or indirect register
*
* @adev: amdgpu_device pointer
* @reg: dword aligned register offset
@@ -314,25 +315,29 @@ void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
*
* Returns the 32 bit value from the offset specified.
*/
-uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
- uint32_t acc_flags)
+uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
+ uint32_t reg, uint32_t acc_flags)
{
uint32_t ret;
- if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
- return amdgpu_kiq_rreg(adev, reg);
-
- if ((reg * 4) < adev->rmmio_size)
- ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
- else {
- unsigned long flags;
+ if (adev->in_pci_err_recovery)
+ return 0;
- spin_lock_irqsave(&adev->mmio_idx_lock, flags);
- writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
- ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
- spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
+ if ((reg * 4) < adev->rmmio_size) {
+ if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
+ amdgpu_sriov_runtime(adev) &&
+ down_read_trylock(&adev->reset_sem)) {
+ ret = amdgpu_kiq_rreg(adev, reg);
+ up_read(&adev->reset_sem);
+ } else {
+ ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
+ }
+ } else {
+ ret = adev->pcie_rreg(adev, reg * 4);
}
- trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
+
+ trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
+
return ret;
}
@@ -350,7 +355,11 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
*
* Returns the 8 bit value from the offset specified.
*/
-uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) {
+uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
+{
+ if (adev->in_pci_err_recovery)
+ return 0;
+
if (offset < adev->rmmio_size)
return (readb(adev->rmmio + offset));
BUG();
@@ -371,31 +380,19 @@ uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) {
*
* Writes the value specified to the offset specified.
*/
-void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) {
+void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
+{
+ if (adev->in_pci_err_recovery)
+ return;
+
if (offset < adev->rmmio_size)
writeb(value, adev->rmmio + offset);
else
BUG();
}
-void static inline amdgpu_mm_wreg_mmio(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t acc_flags)
-{
- trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
-
- if ((reg * 4) < adev->rmmio_size)
- writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
- else {
- unsigned long flags;
-
- spin_lock_irqsave(&adev->mmio_idx_lock, flags);
- writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
- writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
- spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
- }
-}
-
/**
- * amdgpu_mm_wreg - write to a memory mapped IO register
+ * amdgpu_device_wreg - write to a memory mapped IO or indirect register
*
* @adev: amdgpu_device pointer
* @reg: dword aligned register offset
@@ -404,13 +401,27 @@ void static inline amdgpu_mm_wreg_mmio(struct amdgpu_device *adev, uint32_t reg,
*
* Writes the value specified to the offset specified.
*/
-void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
- uint32_t acc_flags)
+void amdgpu_device_wreg(struct amdgpu_device *adev,
+ uint32_t reg, uint32_t v,
+ uint32_t acc_flags)
{
- if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
- return amdgpu_kiq_wreg(adev, reg, v);
+ if (adev->in_pci_err_recovery)
+ return;
- amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags);
+ if ((reg * 4) < adev->rmmio_size) {
+ if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
+ amdgpu_sriov_runtime(adev) &&
+ down_read_trylock(&adev->reset_sem)) {
+ amdgpu_kiq_wreg(adev, reg, v);
+ up_read(&adev->reset_sem);
+ } else {
+ writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
+ }
+ } else {
+ adev->pcie_wreg(adev, reg * 4, v);
+ }
+
+ trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
}
/*
@@ -418,18 +429,20 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
*
* this function is invoked only the debugfs register access
* */
-void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
- uint32_t acc_flags)
+void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
+ uint32_t reg, uint32_t v)
{
- if (amdgpu_sriov_fullaccess(adev) &&
- adev->gfx.rlc.funcs &&
- adev->gfx.rlc.funcs->is_rlcg_access_range) {
+ if (adev->in_pci_err_recovery)
+ return;
+ if (amdgpu_sriov_fullaccess(adev) &&
+ adev->gfx.rlc.funcs &&
+ adev->gfx.rlc.funcs->is_rlcg_access_range) {
if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v);
+ } else {
+ writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
}
-
- amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags);
}
/**
@@ -442,6 +455,9 @@ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t
*/
u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
{
+ if (adev->in_pci_err_recovery)
+ return 0;
+
if ((reg * 4) < adev->rio_mem_size)
return ioread32(adev->rio_mem + (reg * 4));
else {
@@ -461,6 +477,9 @@ u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
*/
void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{
+ if (adev->in_pci_err_recovery)
+ return;
+
if ((reg * 4) < adev->rio_mem_size)
iowrite32(v, adev->rio_mem + (reg * 4));
else {
@@ -480,6 +499,9 @@ void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
*/
u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
{
+ if (adev->in_pci_err_recovery)
+ return 0;
+
if (index < adev->doorbell.num_doorbells) {
return readl(adev->doorbell.ptr + index);
} else {
@@ -500,6 +522,9 @@ u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
*/
void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
{
+ if (adev->in_pci_err_recovery)
+ return;
+
if (index < adev->doorbell.num_doorbells) {
writel(v, adev->doorbell.ptr + index);
} else {
@@ -518,6 +543,9 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
*/
u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
{
+ if (adev->in_pci_err_recovery)
+ return 0;
+
if (index < adev->doorbell.num_doorbells) {
return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
} else {
@@ -538,6 +566,9 @@ u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
*/
void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
{
+ if (adev->in_pci_err_recovery)
+ return;
+
if (index < adev->doorbell.num_doorbells) {
atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
} else {
@@ -546,9 +577,138 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
}
/**
+ * amdgpu_device_indirect_rreg - read an indirect register
+ *
+ * @adev: amdgpu_device pointer
+ * @pcie_index: mmio register offset
+ * @pcie_data: mmio register offset
+ *
+ * Returns the value of indirect register @reg_addr
+ */
+u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
+ u32 pcie_index, u32 pcie_data,
+ u32 reg_addr)
+{
+ unsigned long flags;
+ u32 r;
+ void __iomem *pcie_index_offset;
+ void __iomem *pcie_data_offset;
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
+ pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
+
+ writel(reg_addr, pcie_index_offset);
+ readl(pcie_index_offset);
+ r = readl(pcie_data_offset);
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+
+ return r;
+}
+
+/**
+ * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
+ *
+ * @adev: amdgpu_device pointer
+ * @pcie_index: mmio register offset
+ * @pcie_data: mmio register offset
+ *
+ * Returns the value of indirect register @reg_addr
+ */
+u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
+ u32 pcie_index, u32 pcie_data,
+ u32 reg_addr)
+{
+ unsigned long flags;
+ u64 r;
+ void __iomem *pcie_index_offset;
+ void __iomem *pcie_data_offset;
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
+ pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
+
+ /* read low 32 bits */
+ writel(reg_addr, pcie_index_offset);
+ readl(pcie_index_offset);
+ r = readl(pcie_data_offset);
+ /* read high 32 bits */
+ writel(reg_addr + 4, pcie_index_offset);
+ readl(pcie_index_offset);
+ r |= ((u64)readl(pcie_data_offset) << 32);
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+
+ return r;
+}
+
+/**
+ * amdgpu_device_indirect_wreg - write an indirect register address
+ *
+ * @adev: amdgpu_device pointer
+ * @pcie_index: mmio register offset
+ * @pcie_data: mmio register offset
+ * @reg_addr: indirect register offset
+ * @reg_data: indirect register data
+ *
+ */
+void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
+ u32 pcie_index, u32 pcie_data,
+ u32 reg_addr, u32 reg_data)
+{
+ unsigned long flags;
+ void __iomem *pcie_index_offset;
+ void __iomem *pcie_data_offset;
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
+ pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
+
+ writel(reg_addr, pcie_index_offset);
+ readl(pcie_index_offset);
+ writel(reg_data, pcie_data_offset);
+ readl(pcie_data_offset);
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+}
+
+/**
+ * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
+ *
+ * @adev: amdgpu_device pointer
+ * @pcie_index: mmio register offset
+ * @pcie_data: mmio register offset
+ * @reg_addr: indirect register offset
+ * @reg_data: indirect register data
+ *
+ */
+void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
+ u32 pcie_index, u32 pcie_data,
+ u32 reg_addr, u64 reg_data)
+{
+ unsigned long flags;
+ void __iomem *pcie_index_offset;
+ void __iomem *pcie_data_offset;
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
+ pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
+
+ /* write low 32 bits */
+ writel(reg_addr, pcie_index_offset);
+ readl(pcie_index_offset);
+ writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
+ readl(pcie_data_offset);
+ /* write high 32 bits */
+ writel(reg_addr + 4, pcie_index_offset);
+ readl(pcie_index_offset);
+ writel((u32)(reg_data >> 32), pcie_data_offset);
+ readl(pcie_data_offset);
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+}
+
+/**
* amdgpu_invalid_rreg - dummy reg read function
*
- * @adev: amdgpu device pointer
+ * @adev: amdgpu_device pointer
* @reg: offset of register
*
* Dummy register read function. Used for register blocks
@@ -565,7 +725,7 @@ static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
/**
* amdgpu_invalid_wreg - dummy reg write function
*
- * @adev: amdgpu device pointer
+ * @adev: amdgpu_device pointer
* @reg: offset of register
* @v: value to write to the register
*
@@ -582,7 +742,7 @@ static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32
/**
* amdgpu_invalid_rreg64 - dummy 64 bit reg read function
*
- * @adev: amdgpu device pointer
+ * @adev: amdgpu_device pointer
* @reg: offset of register
*
* Dummy register read function. Used for register blocks
@@ -599,7 +759,7 @@ static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
/**
* amdgpu_invalid_wreg64 - dummy reg write function
*
- * @adev: amdgpu device pointer
+ * @adev: amdgpu_device pointer
* @reg: offset of register
* @v: value to write to the register
*
@@ -616,7 +776,7 @@ static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint
/**
* amdgpu_block_invalid_rreg - dummy reg read function
*
- * @adev: amdgpu device pointer
+ * @adev: amdgpu_device pointer
* @block: offset of instance
* @reg: offset of register
*
@@ -636,7 +796,7 @@ static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
/**
* amdgpu_block_invalid_wreg - dummy reg write function
*
- * @adev: amdgpu device pointer
+ * @adev: amdgpu_device pointer
* @block: offset of instance
* @reg: offset of register
* @v: value to write to the register
@@ -654,9 +814,23 @@ static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
}
/**
+ * amdgpu_device_asic_init - Wrapper for atom asic_init
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Does any asic specific work and then calls atom asic init.
+ */
+static int amdgpu_device_asic_init(struct amdgpu_device *adev)
+{
+ amdgpu_asic_pre_asic_init(adev);
+
+ return amdgpu_atom_asic_init(adev->mode_info.atom_context);
+}
+
+/**
* amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
*
- * @adev: amdgpu device pointer
+ * @adev: amdgpu_device pointer
*
* Allocates a scratch page of VRAM for use by various things in the
* driver.
@@ -673,7 +847,7 @@ static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
/**
* amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
*
- * @adev: amdgpu device pointer
+ * @adev: amdgpu_device pointer
*
* Frees the VRAM scratch page.
*/
@@ -1199,6 +1373,15 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
amdgpu_gmc_tmz_set(adev);
+ if (amdgpu_num_kcq == -1) {
+ amdgpu_num_kcq = 8;
+ } else if (amdgpu_num_kcq > 8 || amdgpu_num_kcq < 0) {
+ amdgpu_num_kcq = 8;
+ dev_warn(adev->dev, "set kernel compute queue number to 8 due to invalid parameter provided by user\n");
+ }
+
+ amdgpu_gmc_noretry_set(adev);
+
return 0;
}
@@ -1211,7 +1394,8 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
* Callback for the switcheroo driver. Suspends or resumes the
* the asics before or after it is powered up using ACPI methods.
*/
-static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
+static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
+ enum vga_switcheroo_state state)
{
struct drm_device *dev = pci_get_drvdata(pdev);
int r;
@@ -1225,7 +1409,7 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
pci_set_power_state(dev->pdev, PCI_D0);
- pci_restore_state(dev->pdev);
+ amdgpu_device_load_pci_state(dev->pdev);
r = pci_enable_device(dev->pdev);
if (r)
DRM_WARN("pci_enable_device failed (%d)\n", r);
@@ -1238,7 +1422,7 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
drm_kms_helper_poll_disable(dev);
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
amdgpu_device_suspend(dev, true);
- pci_save_state(dev->pdev);
+ amdgpu_device_cache_pci_state(dev->pdev);
/* Shut down the device */
pci_disable_device(dev->pdev);
pci_set_power_state(dev->pdev, PCI_D3cold);
@@ -1504,7 +1688,7 @@ static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
adev->enable_virtual_display = false;
if (amdgpu_virtual_display) {
- struct drm_device *ddev = adev->ddev;
+ struct drm_device *ddev = adev_to_drm(adev);
const char *pci_address_name = pci_name(ddev->pdev);
char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
@@ -1563,7 +1747,7 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
adev->firmware.gpu_info_fw = NULL;
- if (adev->discovery_bin) {
+ if (adev->mman.discovery_bin) {
amdgpu_discovery_get_gfx_info(adev);
/*
@@ -1600,6 +1784,8 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
case CHIP_CARRIZO:
case CHIP_STONEY:
case CHIP_VEGA20:
+ case CHIP_SIENNA_CICHLID:
+ case CHIP_NAVY_FLOUNDER:
default:
return 0;
case CHIP_VEGA10:
@@ -1620,7 +1806,10 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
chip_name = "arcturus";
break;
case CHIP_RENOIR:
- chip_name = "renoir";
+ if (adev->apu_flags & AMD_APU_IS_RENOIR)
+ chip_name = "renoir";
+ else
+ chip_name = "green_sardine";
break;
case CHIP_NAVI10:
chip_name = "navi10";
@@ -1631,12 +1820,6 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
case CHIP_NAVI12:
chip_name = "navi12";
break;
- case CHIP_SIENNA_CICHLID:
- chip_name = "sienna_cichlid";
- break;
- case CHIP_NAVY_FLOUNDER:
- chip_name = "navy_flounder";
- break;
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
@@ -1935,7 +2118,7 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
if (adev->ip_blocks[i].status.hw == true)
break;
- if (adev->in_gpu_reset || adev->in_suspend) {
+ if (amdgpu_in_reset(adev) || adev->in_suspend) {
r = adev->ip_blocks[i].version->funcs->resume(adev);
if (r) {
DRM_ERROR("resume of IP block <%s> failed %d\n",
@@ -2055,13 +2238,19 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
* it should be called after amdgpu_device_ip_hw_init_phase2 since
* for some ASICs the RAS EEPROM code relies on SMU fully functioning
* for I2C communication which only true at this point.
- * recovery_init may fail, but it can free all resources allocated by
- * itself and its failure should not stop amdgpu init process.
+ *
+ * amdgpu_ras_recovery_init may fail, but the upper only cares the
+ * failure from bad gpu situation and stop amdgpu init process
+ * accordingly. For other failed cases, it will still release all
+ * the resource and print error message, rather than returning one
+ * negative value to upper level.
*
* Note: theoretically, this should be called before all vram allocations
* to protect retired page from abusing
*/
- amdgpu_ras_recovery_init(adev);
+ r = amdgpu_ras_recovery_init(adev);
+ if (r)
+ goto init_failed;
if (adev->gmc.xgmi.num_physical_nodes > 1)
amdgpu_xgmi_add_device(adev);
@@ -2106,7 +2295,7 @@ static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
AMDGPU_RESET_MAGIC_NUM))
return true;
- if (!adev->in_gpu_reset)
+ if (!amdgpu_in_reset(adev))
return false;
/*
@@ -2217,9 +2406,7 @@ static int amdgpu_device_enable_mgpu_fan_boost(void)
gpu_ins = &(mgpu_info.gpu_ins[i]);
adev = gpu_ins->adev;
if (!(adev->flags & AMD_IS_APU) &&
- !gpu_ins->mgpu_fan_enabled &&
- adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->enable_mgpu_fan_boost) {
+ !gpu_ins->mgpu_fan_enabled) {
ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
if (ret)
break;
@@ -2574,17 +2761,16 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
AMD_IP_BLOCK_TYPE_IH,
};
- for (i = 0; i < adev->num_ip_blocks; i++)
- adev->ip_blocks[i].status.hw = false;
-
for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
int j;
struct amdgpu_ip_block *block;
- for (j = 0; j < adev->num_ip_blocks; j++) {
- block = &adev->ip_blocks[j];
+ block = &adev->ip_blocks[i];
+ block->status.hw = false;
- if (block->version->type != ip_order[i] ||
+ for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
+
+ if (block->version->type != ip_order[j] ||
!block->status.valid)
continue;
@@ -2777,6 +2963,12 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
{
switch (asic_type) {
#if defined(CONFIG_DRM_AMD_DC)
+#if defined(CONFIG_DRM_AMD_DC_SI)
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ case CHIP_OLAND:
+#endif
case CHIP_BONAIRE:
case CHIP_KAVERI:
case CHIP_KABINI:
@@ -2825,13 +3017,13 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
/**
* amdgpu_device_has_dc_support - check if dc is supported
*
- * @adev: amdgpu_device_pointer
+ * @adev: amdgpu_device pointer
*
* Returns true for supported, false for not supported
*/
bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
{
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev) || adev->enable_virtual_display)
return false;
return amdgpu_device_asic_has_dc_support(adev->asic_type);
@@ -2842,7 +3034,7 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
{
struct amdgpu_device *adev =
container_of(__work, struct amdgpu_device, xgmi_reset_work);
- struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0);
+ struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
/* It's a bug to not have a hive within this function */
if (WARN_ON(!hive))
@@ -2857,13 +3049,13 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
task_barrier_enter(&hive->tb);
- adev->asic_reset_res = amdgpu_device_baco_enter(adev->ddev);
+ adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
if (adev->asic_reset_res)
goto fail;
task_barrier_exit(&hive->tb);
- adev->asic_reset_res = amdgpu_device_baco_exit(adev->ddev);
+ adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
if (adev->asic_reset_res)
goto fail;
@@ -2879,7 +3071,8 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
fail:
if (adev->asic_reset_res)
DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
- adev->asic_reset_res, adev->ddev->unique);
+ adev->asic_reset_res, adev_to_drm(adev)->unique);
+ amdgpu_put_xgmi_hive(hive);
}
static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
@@ -2958,12 +3151,11 @@ static const struct attribute *amdgpu_dev_attributes[] = {
NULL
};
+
/**
* amdgpu_device_init - initialize the driver
*
* @adev: amdgpu_device pointer
- * @ddev: drm dev pointer
- * @pdev: pci dev pointer
* @flags: driver flags
*
* Initializes the driver info and hw (all asics).
@@ -2971,18 +3163,15 @@ static const struct attribute *amdgpu_dev_attributes[] = {
* Called at driver startup.
*/
int amdgpu_device_init(struct amdgpu_device *adev,
- struct drm_device *ddev,
- struct pci_dev *pdev,
uint32_t flags)
{
+ struct drm_device *ddev = adev_to_drm(adev);
+ struct pci_dev *pdev = adev->pdev;
int r, i;
bool boco = false;
u32 max_MBps;
adev->shutdown = false;
- adev->dev = &pdev->dev;
- adev->ddev = ddev;
- adev->pdev = pdev;
adev->flags = flags;
if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
@@ -3038,7 +3227,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
mutex_init(&adev->mn_lock);
mutex_init(&adev->virt.vf_errors.lock);
hash_init(adev->mn_hash);
- mutex_init(&adev->lock_reset);
+ atomic_set(&adev->in_gpu_reset, 0);
+ init_rwsem(&adev->reset_sem);
mutex_init(&adev->psp.mutex);
mutex_init(&adev->notifier_lock);
@@ -3133,13 +3323,13 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_device_get_job_timeout_settings(adev);
if (r) {
dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
- return r;
+ goto failed_unmap;
}
/* early init functions */
r = amdgpu_device_ip_early_init(adev);
if (r)
- return r;
+ goto failed_unmap;
/* doorbell bar mapping and doorbell index init*/
amdgpu_device_doorbell_init(adev);
@@ -3180,6 +3370,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
}
}
+ pci_enable_pcie_error_reporting(adev->ddev.pdev);
+
/* Post card if necessary */
if (amdgpu_device_need_post(adev)) {
if (!adev->bios) {
@@ -3188,7 +3380,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
goto failed;
}
DRM_INFO("GPU posting now...\n");
- r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
+ r = amdgpu_device_asic_init(adev);
if (r) {
dev_err(adev->dev, "gpu post error!\n");
goto failed;
@@ -3226,7 +3418,7 @@ fence_driver_init:
}
/* init the mode config */
- drm_mode_config_init(adev->ddev);
+ drm_mode_config_init(adev_to_drm(adev));
r = amdgpu_device_ip_init(adev);
if (r) {
@@ -3322,16 +3514,18 @@ fence_driver_init:
flush_delayed_work(&adev->delayed_init_work);
r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
- if (r) {
+ if (r)
dev_err(adev->dev, "Could not create amdgpu device attr\n");
- return r;
- }
if (IS_ENABLED(CONFIG_PERF_EVENTS))
r = amdgpu_pmu_init(adev);
if (r)
dev_err(adev->dev, "amdgpu_pmu_init failed\n");
+ /* Have stored pci confspace at hand for restore in sudden PCI error */
+ if (amdgpu_device_cache_pci_state(adev->pdev))
+ pci_restore_state(pdev);
+
return 0;
failed:
@@ -3339,6 +3533,10 @@ failed:
if (boco)
vga_switcheroo_fini_domain_pm_ops(adev->dev);
+failed_unmap:
+ iounmap(adev->rmmio);
+ adev->rmmio = NULL;
+
return r;
}
@@ -3352,31 +3550,33 @@ failed:
*/
void amdgpu_device_fini(struct amdgpu_device *adev)
{
- int r;
-
- DRM_INFO("amdgpu: finishing device.\n");
+ dev_info(adev->dev, "amdgpu: finishing device.\n");
flush_delayed_work(&adev->delayed_init_work);
adev->shutdown = true;
+ kfree(adev->pci_state);
+
/* make sure IB test finished before entering exclusive mode
* to avoid preemption on IB test
* */
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
amdgpu_virt_request_full_gpu(adev, false);
+ amdgpu_virt_fini_data_exchange(adev);
+ }
/* disable all interrupts */
amdgpu_irq_disable_all(adev);
if (adev->mode_info.mode_config_initialized){
if (!amdgpu_device_has_dc_support(adev))
- drm_helper_force_disable_all(adev->ddev);
+ drm_helper_force_disable_all(adev_to_drm(adev));
else
- drm_atomic_helper_shutdown(adev->ddev);
+ drm_atomic_helper_shutdown(adev_to_drm(adev));
}
amdgpu_fence_driver_fini(adev);
if (adev->pm_sysfs_en)
amdgpu_pm_sysfs_fini(adev);
amdgpu_fbdev_fini(adev);
- r = amdgpu_device_ip_fini(adev);
+ amdgpu_device_ip_fini(adev);
release_firmware(adev->firmware.gpu_info_fw);
adev->firmware.gpu_info_fw = NULL;
adev->accel_working = false;
@@ -3394,7 +3594,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
amdgpu_has_atpx_dgpu_power_cntl()) &&
!pci_is_thunderbolt_attached(adev->pdev))
vga_switcheroo_unregister_client(adev->pdev);
- if (amdgpu_device_supports_boco(adev->ddev))
+ if (amdgpu_device_supports_boco(adev_to_drm(adev)))
vga_switcheroo_fini_domain_pm_ops(adev->dev);
vga_client_register(adev->pdev, NULL, NULL, NULL);
if (adev->rio_mem)
@@ -3410,7 +3610,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
if (IS_ENABLED(CONFIG_PERF_EVENTS))
amdgpu_pmu_fini(adev);
- if (adev->discovery_bin)
+ if (adev->mman.discovery_bin)
amdgpu_discovery_fini(adev);
}
@@ -3436,11 +3636,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
struct drm_connector_list_iter iter;
int r;
- if (dev == NULL || dev->dev_private == NULL) {
- return -ENODEV;
- }
-
- adev = dev->dev_private;
+ adev = drm_to_adev(dev);
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
@@ -3528,7 +3724,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
{
struct drm_connector *connector;
struct drm_connector_list_iter iter;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_crtc *crtc;
int r = 0;
@@ -3537,14 +3733,14 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
/* post card */
if (amdgpu_device_need_post(adev)) {
- r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
+ r = amdgpu_device_asic_init(adev);
if (r)
- DRM_ERROR("amdgpu asic init failed\n");
+ dev_err(adev->dev, "amdgpu asic init failed\n");
}
r = amdgpu_device_ip_resume(adev);
if (r) {
- DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r);
+ dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
return r;
}
amdgpu_fence_driver_resume(adev);
@@ -3568,7 +3764,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
if (r == 0) {
r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
if (r != 0)
- DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
+ dev_err(adev->dev, "Failed to pin cursor BO (%d)\n", r);
amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
amdgpu_bo_unreserve(aobj);
}
@@ -3658,7 +3854,7 @@ static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
adev->ip_blocks[i].status.hang =
adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
if (adev->ip_blocks[i].status.hang) {
- DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
+ dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
asic_hang = true;
}
}
@@ -3719,7 +3915,7 @@ static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
if (adev->ip_blocks[i].status.hang) {
- DRM_INFO("Some block need full reset!\n");
+ dev_info(adev->dev, "Some block need full reset!\n");
return true;
}
}
@@ -3807,7 +4003,7 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
else
tmo = msecs_to_jiffies(100);
- DRM_INFO("recover vram bo from shadow start\n");
+ dev_info(adev->dev, "recover vram bo from shadow start\n");
mutex_lock(&adev->shadow_list_lock);
list_for_each_entry(shadow, &adev->shadow_list, shadow_list) {
@@ -3843,11 +4039,11 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
dma_fence_put(fence);
if (r < 0 || tmo <= 0) {
- DRM_ERROR("recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
+ dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
return -EIO;
}
- DRM_INFO("recover vram bo from shadow done\n");
+ dev_info(adev->dev, "recover vram bo from shadow done\n");
return 0;
}
@@ -3855,7 +4051,7 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
/**
* amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
*
- * @adev: amdgpu device pointer
+ * @adev: amdgpu_device pointer
* @from_hypervisor: request from hypervisor
*
* do VF FLR and reinitialize Asic
@@ -3882,7 +4078,7 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
amdgpu_virt_init_data_exchange(adev);
/* we need recover gart prior to run SMC/CP/SDMA resume */
- amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
+ amdgpu_gtt_mgr_recover(ttm_manager_type(&adev->mman.bdev, TTM_PL_TT));
r = amdgpu_device_fw_loading(adev);
if (r)
@@ -3908,9 +4104,37 @@ error:
}
/**
+ * amdgpu_device_has_job_running - check if there is any job in mirror list
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * check if there is any job in mirror list
+ */
+bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
+{
+ int i;
+ struct drm_sched_job *job;
+
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+ struct amdgpu_ring *ring = adev->rings[i];
+
+ if (!ring || !ring->sched.thread)
+ continue;
+
+ spin_lock(&ring->sched.job_list_lock);
+ job = list_first_entry_or_null(&ring->sched.ring_mirror_list,
+ struct drm_sched_job, node);
+ spin_unlock(&ring->sched.job_list_lock);
+ if (job)
+ return true;
+ }
+ return false;
+}
+
+/**
* amdgpu_device_should_recover_gpu - check if we should try GPU recovery
*
- * @adev: amdgpu device pointer
+ * @adev: amdgpu_device pointer
*
* Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
* a hung GPU.
@@ -3918,7 +4142,7 @@ error:
bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
{
if (!amdgpu_device_ip_check_soft_reset(adev)) {
- DRM_INFO("Timeout, but no hardware hang detected.\n");
+ dev_info(adev->dev, "Timeout, but no hardware hang detected.\n");
return false;
}
@@ -3958,7 +4182,7 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
return true;
disabled:
- DRM_INFO("GPU recovery disabled.\n");
+ dev_info(adev->dev, "GPU recovery disabled.\n");
return false;
}
@@ -3972,6 +4196,11 @@ static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
amdgpu_debugfs_wait_dump(adev);
+ if (amdgpu_sriov_vf(adev)) {
+ /* stop the data exchange thread */
+ amdgpu_virt_fini_data_exchange(adev);
+ }
+
/* block all schedulers and reset given job's ring */
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
@@ -3997,7 +4226,7 @@ static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
r = amdgpu_device_ip_soft_reset(adev);
amdgpu_device_ip_post_soft_reset(adev);
if (r || amdgpu_device_ip_check_soft_reset(adev)) {
- DRM_INFO("soft reset failed, will fallback to full reset!\n");
+ dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
need_full_reset = true;
}
}
@@ -4013,7 +4242,8 @@ static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
struct list_head *device_list_handle,
- bool *need_full_reset_arg)
+ bool *need_full_reset_arg,
+ bool skip_hw_reset)
{
struct amdgpu_device *tmp_adev = NULL;
bool need_full_reset = *need_full_reset_arg, vram_lost = false;
@@ -4023,7 +4253,7 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
* ASIC reset has to be done on all HGMI hive nodes ASAP
* to allow proper links negotiation in FW (within 1 sec)
*/
- if (need_full_reset) {
+ if (!skip_hw_reset && need_full_reset) {
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
/* For XGMI run all resets in parallel to speed up the process */
if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
@@ -4033,8 +4263,8 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
r = amdgpu_asic_reset(tmp_adev);
if (r) {
- DRM_ERROR("ASIC reset failed with error, %d for drm dev, %s",
- r, tmp_adev->ddev->unique);
+ dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
+ r, adev_to_drm(tmp_adev)->unique);
break;
}
}
@@ -4066,8 +4296,8 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
if (need_full_reset) {
/* post card */
- if (amdgpu_atom_asic_init(tmp_adev->mode_info.atom_context))
- DRM_WARN("asic atom init failed!");
+ if (amdgpu_device_asic_init(tmp_adev))
+ dev_warn(tmp_adev->dev, "asic atom init failed!");
if (!r) {
dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
@@ -4081,8 +4311,7 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
amdgpu_inc_vram_lost(tmp_adev);
}
- r = amdgpu_gtt_mgr_recover(
- &tmp_adev->mman.bdev.man[TTM_PL_TT]);
+ r = amdgpu_gtt_mgr_recover(ttm_manager_type(&tmp_adev->mman.bdev, TTM_PL_TT));
if (r)
goto out;
@@ -4109,8 +4338,23 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
amdgpu_fbdev_set_suspend(tmp_adev, 0);
- /* must succeed. */
- amdgpu_ras_resume(tmp_adev);
+ /*
+ * The GPU enters bad state once faulty pages
+ * by ECC has reached the threshold, and ras
+ * recovery is scheduled next. So add one check
+ * here to break recovery if it indeed exceeds
+ * bad page threshold, and remind user to
+ * retire this GPU or setting one bigger
+ * bad_page_threshold value to fix this once
+ * probing driver again.
+ */
+ if (!amdgpu_ras_check_err_threshold(tmp_adev)) {
+ /* must succeed. */
+ amdgpu_ras_resume(tmp_adev);
+ } else {
+ r = -EINVAL;
+ goto out;
+ }
/* Update PSP FW topology after reset */
if (hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1)
@@ -4118,7 +4362,6 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
}
}
-
out:
if (!r) {
amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
@@ -4143,16 +4386,19 @@ end:
return r;
}
-static bool amdgpu_device_lock_adev(struct amdgpu_device *adev, bool trylock)
+static bool amdgpu_device_lock_adev(struct amdgpu_device *adev,
+ struct amdgpu_hive_info *hive)
{
- if (trylock) {
- if (!mutex_trylock(&adev->lock_reset))
- return false;
- } else
- mutex_lock(&adev->lock_reset);
+ if (atomic_cmpxchg(&adev->in_gpu_reset, 0, 1) != 0)
+ return false;
+
+ if (hive) {
+ down_write_nest_lock(&adev->reset_sem, &hive->hive_lock);
+ } else {
+ down_write(&adev->reset_sem);
+ }
atomic_inc(&adev->gpu_reset_counter);
- adev->in_gpu_reset = true;
switch (amdgpu_asic_reset_method(adev)) {
case AMD_RESET_METHOD_MODE1:
adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
@@ -4172,8 +4418,8 @@ static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
{
amdgpu_vf_error_trans_all(adev);
adev->mp1_state = PP_MP1_STATE_NONE;
- adev->in_gpu_reset = false;
- mutex_unlock(&adev->lock_reset);
+ atomic_set(&adev->in_gpu_reset, 0);
+ up_write(&adev->reset_sem);
}
static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
@@ -4237,7 +4483,7 @@ static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
/**
* amdgpu_device_gpu_recover - reset the asic and recover scheduler
*
- * @adev: amdgpu device pointer
+ * @adev: amdgpu_device pointer
* @job: which job trigger hang
*
* Attempt to reset the GPU if it has hung (all asics).
@@ -4257,7 +4503,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
bool need_emergency_restart = false;
bool audio_suspended = false;
- /**
+ /*
* Special case: RAS triggered and full reset isn't supported
*/
need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
@@ -4283,12 +4529,15 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
* We always reset all schedulers for device and all devices for XGMI
* hive so that should take care of them too.
*/
- hive = amdgpu_get_xgmi_hive(adev, true);
- if (hive && !mutex_trylock(&hive->reset_lock)) {
- DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
- job ? job->base.id : -1, hive->hive_id);
- mutex_unlock(&hive->hive_lock);
- return 0;
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (hive) {
+ if (atomic_cmpxchg(&hive->in_reset, 0, 1) != 0) {
+ DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
+ job ? job->base.id : -1, hive->hive_id);
+ amdgpu_put_xgmi_hive(hive);
+ return 0;
+ }
+ mutex_lock(&hive->hive_lock);
}
/*
@@ -4310,11 +4559,11 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
/* block all schedulers and reset given job's ring */
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
- if (!amdgpu_device_lock_adev(tmp_adev, !hive)) {
- DRM_INFO("Bailing on TDR for s_job:%llx, as another already in progress",
+ if (!amdgpu_device_lock_adev(tmp_adev, hive)) {
+ dev_info(tmp_adev->dev, "Bailing on TDR for s_job:%llx, as another already in progress",
job ? job->base.id : -1);
- mutex_unlock(&hive->hive_lock);
- return 0;
+ r = 0;
+ goto skip_recovery;
}
/*
@@ -4382,12 +4631,12 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
retry: /* Rest of adevs pre asic reset from XGMI hive. */
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
r = amdgpu_device_pre_asic_reset(tmp_adev,
- NULL,
+ (tmp_adev == adev) ? job : NULL,
&need_full_reset);
/*TODO Should we stop ?*/
if (r) {
- DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
- r, tmp_adev->ddev->unique);
+ dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
+ r, adev_to_drm(tmp_adev)->unique);
tmp_adev->asic_reset_res = r;
}
}
@@ -4399,7 +4648,7 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
if (r)
adev->asic_reset_res = r;
} else {
- r = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset);
+ r = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset, false);
if (r && r == -EAGAIN)
goto retry;
}
@@ -4423,7 +4672,7 @@ skip_hw_reset:
}
if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) {
- drm_helper_resume_force_mode(tmp_adev->ddev);
+ drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
}
tmp_adev->asic_reset_res = 0;
@@ -4447,9 +4696,11 @@ skip_sched_resume:
amdgpu_device_unlock_adev(tmp_adev);
}
+skip_recovery:
if (hive) {
- mutex_unlock(&hive->reset_lock);
+ atomic_set(&hive->in_reset, 0);
mutex_unlock(&hive->hive_lock);
+ amdgpu_put_xgmi_hive(hive);
}
if (r)
@@ -4595,10 +4846,10 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
int amdgpu_device_baco_enter(struct drm_device *dev)
{
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
- if (!amdgpu_device_supports_baco(adev->ddev))
+ if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
return -ENOTSUPP;
if (ras && ras->supported)
@@ -4609,11 +4860,11 @@ int amdgpu_device_baco_enter(struct drm_device *dev)
int amdgpu_device_baco_exit(struct drm_device *dev)
{
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
int ret = 0;
- if (!amdgpu_device_supports_baco(adev->ddev))
+ if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
return -ENOTSUPP;
ret = amdgpu_dpm_baco_exit(adev);
@@ -4625,3 +4876,235 @@ int amdgpu_device_baco_exit(struct drm_device *dev)
return 0;
}
+
+static void amdgpu_cancel_all_tdr(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+ struct amdgpu_ring *ring = adev->rings[i];
+
+ if (!ring || !ring->sched.thread)
+ continue;
+
+ cancel_delayed_work_sync(&ring->sched.work_tdr);
+ }
+}
+
+/**
+ * amdgpu_pci_error_detected - Called when a PCI error is detected.
+ * @pdev: PCI device struct
+ * @state: PCI channel state
+ *
+ * Description: Called when a PCI error is detected.
+ *
+ * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
+ */
+pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ int i;
+
+ DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
+
+ if (adev->gmc.xgmi.num_physical_nodes > 1) {
+ DRM_WARN("No support for XGMI hive yet...");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ switch (state) {
+ case pci_channel_io_normal:
+ return PCI_ERS_RESULT_CAN_RECOVER;
+ /* Fatal error, prepare for slot reset */
+ case pci_channel_io_frozen:
+ /*
+ * Cancel and wait for all TDRs in progress if failing to
+ * set adev->in_gpu_reset in amdgpu_device_lock_adev
+ *
+ * Locking adev->reset_sem will prevent any external access
+ * to GPU during PCI error recovery
+ */
+ while (!amdgpu_device_lock_adev(adev, NULL))
+ amdgpu_cancel_all_tdr(adev);
+
+ /*
+ * Block any work scheduling as we do for regular GPU reset
+ * for the duration of the recovery
+ */
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+ struct amdgpu_ring *ring = adev->rings[i];
+
+ if (!ring || !ring->sched.thread)
+ continue;
+
+ drm_sched_stop(&ring->sched, NULL);
+ }
+ return PCI_ERS_RESULT_NEED_RESET;
+ case pci_channel_io_perm_failure:
+ /* Permanent error, prepare for device removal */
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
+ * @pdev: pointer to PCI device
+ */
+pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
+{
+
+ DRM_INFO("PCI error: mmio enabled callback!!\n");
+
+ /* TODO - dump whatever for debugging purposes */
+
+ /* This called only if amdgpu_pci_error_detected returns
+ * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
+ * works, no need to reset slot.
+ */
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
+ * @pdev: PCI device struct
+ *
+ * Description: This routine is called by the pci error recovery
+ * code after the PCI slot has been reset, just before we
+ * should resume normal operations.
+ */
+pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ int r, i;
+ bool need_full_reset = true;
+ u32 memsize;
+ struct list_head device_list;
+
+ DRM_INFO("PCI error: slot reset callback!!\n");
+
+ INIT_LIST_HEAD(&device_list);
+ list_add_tail(&adev->gmc.xgmi.head, &device_list);
+
+ /* wait for asic to come out of reset */
+ msleep(500);
+
+ /* Restore PCI confspace */
+ amdgpu_device_load_pci_state(pdev);
+
+ /* confirm ASIC came out of reset */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ memsize = amdgpu_asic_get_config_memsize(adev);
+
+ if (memsize != 0xffffffff)
+ break;
+ udelay(1);
+ }
+ if (memsize == 0xffffffff) {
+ r = -ETIME;
+ goto out;
+ }
+
+ adev->in_pci_err_recovery = true;
+ r = amdgpu_device_pre_asic_reset(adev, NULL, &need_full_reset);
+ adev->in_pci_err_recovery = false;
+ if (r)
+ goto out;
+
+ r = amdgpu_do_asic_reset(NULL, &device_list, &need_full_reset, true);
+
+out:
+ if (!r) {
+ if (amdgpu_device_cache_pci_state(adev->pdev))
+ pci_restore_state(adev->pdev);
+
+ DRM_INFO("PCIe error recovery succeeded\n");
+ } else {
+ DRM_ERROR("PCIe error recovery failed, err:%d", r);
+ amdgpu_device_unlock_adev(adev);
+ }
+
+ return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * amdgpu_pci_resume() - resume normal ops after PCI reset
+ * @pdev: pointer to PCI device
+ *
+ * Called when the error recovery driver tells us that its
+ * OK to resume normal operation. Use completion to allow
+ * halted scsi ops to resume.
+ */
+void amdgpu_pci_resume(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ int i;
+
+
+ DRM_INFO("PCI error: resume callback!!\n");
+
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+ struct amdgpu_ring *ring = adev->rings[i];
+
+ if (!ring || !ring->sched.thread)
+ continue;
+
+
+ drm_sched_resubmit_jobs(&ring->sched);
+ drm_sched_start(&ring->sched, true);
+ }
+
+ amdgpu_device_unlock_adev(adev);
+}
+
+bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ int r;
+
+ r = pci_save_state(pdev);
+ if (!r) {
+ kfree(adev->pci_state);
+
+ adev->pci_state = pci_store_saved_state(pdev);
+
+ if (!adev->pci_state) {
+ DRM_ERROR("Failed to store PCI saved state");
+ return false;
+ }
+ } else {
+ DRM_WARN("Failed to save PCI state, err:%d\n", r);
+ return false;
+ }
+
+ return true;
+}
+
+bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ int r;
+
+ if (!adev->pci_state)
+ return false;
+
+ r = pci_load_saved_state(pdev, adev->pci_state);
+
+ if (!r) {
+ pci_restore_state(pdev);
+ } else {
+ DRM_WARN("Failed to load PCI state, err:%d\n", r);
+ return false;
+ }
+
+ return true;
+}
+
+
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h
index 61a26c15c8dd..373cdebe0e2f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h
@@ -44,9 +44,9 @@ struct amdgpu_df_funcs {
void (*enable_ecc_force_par_wr_rmw)(struct amdgpu_device *adev,
bool enable);
int (*pmc_start)(struct amdgpu_device *adev, uint64_t config,
- int is_enable);
+ int is_add);
int (*pmc_stop)(struct amdgpu_device *adev, uint64_t config,
- int is_disable);
+ int is_remove);
void (*pmc_get_count)(struct amdgpu_device *adev, uint64_t config,
uint64_t *count);
uint64_t (*get_fica)(struct amdgpu_device *adev, uint32_t ficaa_val);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index a50ff2306504..bfb95143ba5e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -136,7 +136,7 @@ static int amdgpu_discovery_read_binary(struct amdgpu_device *adev, uint8_t *bin
uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
- adev->discovery_tmr_size, false);
+ adev->mman.discovery_tmr_size, false);
return 0;
}
@@ -168,18 +168,18 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
uint16_t checksum;
int r;
- adev->discovery_tmr_size = DISCOVERY_TMR_SIZE;
- adev->discovery_bin = kzalloc(adev->discovery_tmr_size, GFP_KERNEL);
- if (!adev->discovery_bin)
+ adev->mman.discovery_tmr_size = DISCOVERY_TMR_SIZE;
+ adev->mman.discovery_bin = kzalloc(adev->mman.discovery_tmr_size, GFP_KERNEL);
+ if (!adev->mman.discovery_bin)
return -ENOMEM;
- r = amdgpu_discovery_read_binary(adev, adev->discovery_bin);
+ r = amdgpu_discovery_read_binary(adev, adev->mman.discovery_bin);
if (r) {
DRM_ERROR("failed to read ip discovery binary\n");
goto out;
}
- bhdr = (struct binary_header *)adev->discovery_bin;
+ bhdr = (struct binary_header *)adev->mman.discovery_bin;
if (le32_to_cpu(bhdr->binary_signature) != BINARY_SIGNATURE) {
DRM_ERROR("invalid ip discovery binary signature\n");
@@ -192,7 +192,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
size = bhdr->binary_size - offset;
checksum = bhdr->binary_checksum;
- if (!amdgpu_discovery_verify_checksum(adev->discovery_bin + offset,
+ if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
size, checksum)) {
DRM_ERROR("invalid ip discovery binary checksum\n");
r = -EINVAL;
@@ -202,7 +202,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
info = &bhdr->table_list[IP_DISCOVERY];
offset = le16_to_cpu(info->offset);
checksum = le16_to_cpu(info->checksum);
- ihdr = (struct ip_discovery_header *)(adev->discovery_bin + offset);
+ ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + offset);
if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) {
DRM_ERROR("invalid ip discovery data table signature\n");
@@ -210,7 +210,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
goto out;
}
- if (!amdgpu_discovery_verify_checksum(adev->discovery_bin + offset,
+ if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
ihdr->size, checksum)) {
DRM_ERROR("invalid ip discovery data table checksum\n");
r = -EINVAL;
@@ -220,9 +220,9 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
info = &bhdr->table_list[GC];
offset = le16_to_cpu(info->offset);
checksum = le16_to_cpu(info->checksum);
- ghdr = (struct gpu_info_header *)(adev->discovery_bin + offset);
+ ghdr = (struct gpu_info_header *)(adev->mman.discovery_bin + offset);
- if (!amdgpu_discovery_verify_checksum(adev->discovery_bin + offset,
+ if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
ghdr->size, checksum)) {
DRM_ERROR("invalid gc data table checksum\n");
r = -EINVAL;
@@ -232,16 +232,16 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
return 0;
out:
- kfree(adev->discovery_bin);
- adev->discovery_bin = NULL;
+ kfree(adev->mman.discovery_bin);
+ adev->mman.discovery_bin = NULL;
return r;
}
void amdgpu_discovery_fini(struct amdgpu_device *adev)
{
- kfree(adev->discovery_bin);
- adev->discovery_bin = NULL;
+ kfree(adev->mman.discovery_bin);
+ adev->mman.discovery_bin = NULL;
}
int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
@@ -265,8 +265,8 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
return r;
}
- bhdr = (struct binary_header *)adev->discovery_bin;
- ihdr = (struct ip_discovery_header *)(adev->discovery_bin +
+ bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
num_dies = le16_to_cpu(ihdr->num_dies);
@@ -274,7 +274,7 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
for (i = 0; i < num_dies; i++) {
die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
- dhdr = (struct die_header *)(adev->discovery_bin + die_offset);
+ dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
num_ips = le16_to_cpu(dhdr->num_ips);
ip_offset = die_offset + sizeof(*dhdr);
@@ -288,7 +288,7 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
le16_to_cpu(dhdr->die_id), num_ips);
for (j = 0; j < num_ips; j++) {
- ip = (struct ip *)(adev->discovery_bin + ip_offset);
+ ip = (struct ip *)(adev->mman.discovery_bin + ip_offset);
num_base_address = ip->num_base_address;
DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n",
@@ -337,24 +337,24 @@ int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id,
uint16_t num_ips;
int i, j;
- if (!adev->discovery_bin) {
+ if (!adev->mman.discovery_bin) {
DRM_ERROR("ip discovery uninitialized\n");
return -EINVAL;
}
- bhdr = (struct binary_header *)adev->discovery_bin;
- ihdr = (struct ip_discovery_header *)(adev->discovery_bin +
+ bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
num_dies = le16_to_cpu(ihdr->num_dies);
for (i = 0; i < num_dies; i++) {
die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
- dhdr = (struct die_header *)(adev->discovery_bin + die_offset);
+ dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
num_ips = le16_to_cpu(dhdr->num_ips);
ip_offset = die_offset + sizeof(*dhdr);
for (j = 0; j < num_ips; j++) {
- ip = (struct ip *)(adev->discovery_bin + ip_offset);
+ ip = (struct ip *)(adev->mman.discovery_bin + ip_offset);
if (le16_to_cpu(ip->hw_id) == hw_id) {
if (major)
@@ -377,13 +377,13 @@ int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
struct binary_header *bhdr;
struct gc_info_v1_0 *gc_info;
- if (!adev->discovery_bin) {
+ if (!adev->mman.discovery_bin) {
DRM_ERROR("ip discovery uninitialized\n");
return -EINVAL;
}
- bhdr = (struct binary_header *)adev->discovery_bin;
- gc_info = (struct gc_info_v1_0 *)(adev->discovery_bin +
+ bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ gc_info = (struct gc_info_v1_0 *)(adev->mman.discovery_bin +
le16_to_cpu(bhdr->table_list[GC].offset));
adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->gc_num_se);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index d76172965199..7cc7af2a6822 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -93,7 +93,7 @@ static void amdgpu_display_flip_work_func(struct work_struct *__work)
* targeted by the flip
*/
if (amdgpu_crtc->enabled &&
- (amdgpu_display_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0,
+ (amdgpu_display_get_crtc_scanoutpos(adev_to_drm(adev), work->crtc_id, 0,
&vpos, &hpos, NULL, NULL,
&crtc->hwmode)
& (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
@@ -152,7 +152,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_gem_object *obj;
struct amdgpu_flip_work *work;
@@ -292,12 +292,12 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
pm_runtime_mark_last_busy(dev->dev);
- adev = dev->dev_private;
+ adev = drm_to_adev(dev);
/* if we have active crtcs and we don't have a power ref,
take the current one */
if (active && !adev->have_disp_power_ref) {
adev->have_disp_power_ref = true;
- goto out;
+ return ret;
}
/* if we have no active crtcs, then drop the power ref
we got before */
@@ -619,51 +619,51 @@ int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
int sz;
adev->mode_info.coherent_mode_property =
- drm_property_create_range(adev->ddev, 0 , "coherent", 0, 1);
+ drm_property_create_range(adev_to_drm(adev), 0, "coherent", 0, 1);
if (!adev->mode_info.coherent_mode_property)
return -ENOMEM;
adev->mode_info.load_detect_property =
- drm_property_create_range(adev->ddev, 0, "load detection", 0, 1);
+ drm_property_create_range(adev_to_drm(adev), 0, "load detection", 0, 1);
if (!adev->mode_info.load_detect_property)
return -ENOMEM;
- drm_mode_create_scaling_mode_property(adev->ddev);
+ drm_mode_create_scaling_mode_property(adev_to_drm(adev));
sz = ARRAY_SIZE(amdgpu_underscan_enum_list);
adev->mode_info.underscan_property =
- drm_property_create_enum(adev->ddev, 0,
- "underscan",
- amdgpu_underscan_enum_list, sz);
+ drm_property_create_enum(adev_to_drm(adev), 0,
+ "underscan",
+ amdgpu_underscan_enum_list, sz);
adev->mode_info.underscan_hborder_property =
- drm_property_create_range(adev->ddev, 0,
- "underscan hborder", 0, 128);
+ drm_property_create_range(adev_to_drm(adev), 0,
+ "underscan hborder", 0, 128);
if (!adev->mode_info.underscan_hborder_property)
return -ENOMEM;
adev->mode_info.underscan_vborder_property =
- drm_property_create_range(adev->ddev, 0,
- "underscan vborder", 0, 128);
+ drm_property_create_range(adev_to_drm(adev), 0,
+ "underscan vborder", 0, 128);
if (!adev->mode_info.underscan_vborder_property)
return -ENOMEM;
sz = ARRAY_SIZE(amdgpu_audio_enum_list);
adev->mode_info.audio_property =
- drm_property_create_enum(adev->ddev, 0,
+ drm_property_create_enum(adev_to_drm(adev), 0,
"audio",
amdgpu_audio_enum_list, sz);
sz = ARRAY_SIZE(amdgpu_dither_enum_list);
adev->mode_info.dither_property =
- drm_property_create_enum(adev->ddev, 0,
+ drm_property_create_enum(adev_to_drm(adev), 0,
"dither",
amdgpu_dither_enum_list, sz);
if (amdgpu_device_has_dc_support(adev)) {
adev->mode_info.abm_level_property =
- drm_property_create_range(adev->ddev, 0,
- "abm level", 0, 4);
+ drm_property_create_range(adev_to_drm(adev), 0,
+ "abm level", 0, 4);
if (!adev->mode_info.abm_level_property)
return -ENOMEM;
}
@@ -813,7 +813,7 @@ int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev,
int vbl_start, vbl_end, vtotal, ret = 0;
bool in_vbl = true;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 519ce4427fce..957934926b24 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -35,6 +35,7 @@
#include "amdgpu_display.h"
#include "amdgpu_gem.h"
#include "amdgpu_dma_buf.h"
+#include "amdgpu_xgmi.h"
#include <drm/amdgpu_drm.h>
#include <linux/dma-buf.h>
#include <linux/dma-fence-array.h>
@@ -302,7 +303,8 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
switch (bo->tbo.mem.mem_type) {
case TTM_PL_TT:
- sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages,
+ sgt = drm_prime_pages_to_sg(obj->dev,
+ bo->tbo.ttm->pages,
bo->tbo.num_pages);
if (IS_ERR(sgt))
return sgt;
@@ -454,7 +456,7 @@ static struct drm_gem_object *
amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf)
{
struct dma_resv *resv = dma_buf->resv;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_bo *bo;
struct amdgpu_bo_param bp;
int ret;
@@ -595,3 +597,36 @@ struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
obj->import_attach = attach;
return obj;
}
+
+/**
+ * amdgpu_dmabuf_is_xgmi_accessible - Check if xgmi available for P2P transfer
+ *
+ * @adev: amdgpu_device pointer of the importer
+ * @bo: amdgpu buffer object
+ *
+ * Returns:
+ * True if dmabuf accessible over xgmi, false otherwise.
+ */
+bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev,
+ struct amdgpu_bo *bo)
+{
+ struct drm_gem_object *obj = &bo->tbo.base;
+ struct drm_gem_object *gobj;
+
+ if (obj->import_attach) {
+ struct dma_buf *dma_buf = obj->import_attach->dmabuf;
+
+ if (dma_buf->ops != &amdgpu_dmabuf_ops)
+ /* No XGMI with non AMD GPUs */
+ return false;
+
+ gobj = dma_buf->priv;
+ bo = gem_to_amdgpu_bo(gobj);
+ }
+
+ if (amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev)) &&
+ (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM))
+ return true;
+
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h
index ec447a7b6b28..2c5c84a06bb9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h
@@ -29,6 +29,8 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj,
int flags);
struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf);
+bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev,
+ struct amdgpu_bo *bo);
void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int amdgpu_gem_prime_mmap(struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
deleted file mode 100644
index 2082c0acd216..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ /dev/null
@@ -1,1218 +0,0 @@
-/*
- * Copyright 2011 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Alex Deucher
- */
-
-#include "amdgpu.h"
-#include "amdgpu_atombios.h"
-#include "amdgpu_i2c.h"
-#include "amdgpu_dpm.h"
-#include "atom.h"
-#include "amd_pcie.h"
-
-void amdgpu_dpm_print_class_info(u32 class, u32 class2)
-{
- const char *s;
-
- switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
- case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
- default:
- s = "none";
- break;
- case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
- s = "battery";
- break;
- case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
- s = "balanced";
- break;
- case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
- s = "performance";
- break;
- }
- printk("\tui class: %s\n", s);
- printk("\tinternal class:");
- if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
- (class2 == 0))
- pr_cont(" none");
- else {
- if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
- pr_cont(" boot");
- if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
- pr_cont(" thermal");
- if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
- pr_cont(" limited_pwr");
- if (class & ATOM_PPLIB_CLASSIFICATION_REST)
- pr_cont(" rest");
- if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
- pr_cont(" forced");
- if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
- pr_cont(" 3d_perf");
- if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
- pr_cont(" ovrdrv");
- if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
- pr_cont(" uvd");
- if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
- pr_cont(" 3d_low");
- if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
- pr_cont(" acpi");
- if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
- pr_cont(" uvd_hd2");
- if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
- pr_cont(" uvd_hd");
- if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
- pr_cont(" uvd_sd");
- if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
- pr_cont(" limited_pwr2");
- if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
- pr_cont(" ulv");
- if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
- pr_cont(" uvd_mvc");
- }
- pr_cont("\n");
-}
-
-void amdgpu_dpm_print_cap_info(u32 caps)
-{
- printk("\tcaps:");
- if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
- pr_cont(" single_disp");
- if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
- pr_cont(" video");
- if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
- pr_cont(" no_dc");
- pr_cont("\n");
-}
-
-void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
- struct amdgpu_ps *rps)
-{
- printk("\tstatus:");
- if (rps == adev->pm.dpm.current_ps)
- pr_cont(" c");
- if (rps == adev->pm.dpm.requested_ps)
- pr_cont(" r");
- if (rps == adev->pm.dpm.boot_ps)
- pr_cont(" b");
- pr_cont("\n");
-}
-
-void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev)
-{
- struct drm_device *ddev = adev->ddev;
- struct drm_crtc *crtc;
- struct amdgpu_crtc *amdgpu_crtc;
-
- adev->pm.dpm.new_active_crtcs = 0;
- adev->pm.dpm.new_active_crtc_count = 0;
- if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
- list_for_each_entry(crtc,
- &ddev->mode_config.crtc_list, head) {
- amdgpu_crtc = to_amdgpu_crtc(crtc);
- if (amdgpu_crtc->enabled) {
- adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
- adev->pm.dpm.new_active_crtc_count++;
- }
- }
- }
-}
-
-
-u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
-{
- struct drm_device *dev = adev->ddev;
- struct drm_crtc *crtc;
- struct amdgpu_crtc *amdgpu_crtc;
- u32 vblank_in_pixels;
- u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
-
- if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- amdgpu_crtc = to_amdgpu_crtc(crtc);
- if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
- vblank_in_pixels =
- amdgpu_crtc->hw_mode.crtc_htotal *
- (amdgpu_crtc->hw_mode.crtc_vblank_end -
- amdgpu_crtc->hw_mode.crtc_vdisplay +
- (amdgpu_crtc->v_border * 2));
-
- vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
- break;
- }
- }
- }
-
- return vblank_time_us;
-}
-
-u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
-{
- struct drm_device *dev = adev->ddev;
- struct drm_crtc *crtc;
- struct amdgpu_crtc *amdgpu_crtc;
- u32 vrefresh = 0;
-
- if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- amdgpu_crtc = to_amdgpu_crtc(crtc);
- if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
- vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
- break;
- }
- }
- }
-
- return vrefresh;
-}
-
-bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)
-{
- switch (sensor) {
- case THERMAL_TYPE_RV6XX:
- case THERMAL_TYPE_RV770:
- case THERMAL_TYPE_EVERGREEN:
- case THERMAL_TYPE_SUMO:
- case THERMAL_TYPE_NI:
- case THERMAL_TYPE_SI:
- case THERMAL_TYPE_CI:
- case THERMAL_TYPE_KV:
- return true;
- case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
- case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
- return false; /* need special handling */
- case THERMAL_TYPE_NONE:
- case THERMAL_TYPE_EXTERNAL:
- case THERMAL_TYPE_EXTERNAL_GPIO:
- default:
- return false;
- }
-}
-
-union power_info {
- struct _ATOM_POWERPLAY_INFO info;
- struct _ATOM_POWERPLAY_INFO_V2 info_2;
- struct _ATOM_POWERPLAY_INFO_V3 info_3;
- struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
- struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
- struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
- struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
- struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
-};
-
-union fan_info {
- struct _ATOM_PPLIB_FANTABLE fan;
- struct _ATOM_PPLIB_FANTABLE2 fan2;
- struct _ATOM_PPLIB_FANTABLE3 fan3;
-};
-
-static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table,
- ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
-{
- u32 size = atom_table->ucNumEntries *
- sizeof(struct amdgpu_clock_voltage_dependency_entry);
- int i;
- ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
-
- amdgpu_table->entries = kzalloc(size, GFP_KERNEL);
- if (!amdgpu_table->entries)
- return -ENOMEM;
-
- entry = &atom_table->entries[0];
- for (i = 0; i < atom_table->ucNumEntries; i++) {
- amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
- (entry->ucClockHigh << 16);
- amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage);
- entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
- ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
- }
- amdgpu_table->count = atom_table->ucNumEntries;
-
- return 0;
-}
-
-int amdgpu_get_platform_caps(struct amdgpu_device *adev)
-{
- struct amdgpu_mode_info *mode_info = &adev->mode_info;
- union power_info *power_info;
- int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
- u16 data_offset;
- u8 frev, crev;
-
- if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
- &frev, &crev, &data_offset))
- return -EINVAL;
- power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
-
- adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
- adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
- adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
-
- return 0;
-}
-
-/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
-#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
-#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
-#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
-#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
-#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
-#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
-#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
-#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
-
-int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
-{
- struct amdgpu_mode_info *mode_info = &adev->mode_info;
- union power_info *power_info;
- union fan_info *fan_info;
- ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
- int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
- u16 data_offset;
- u8 frev, crev;
- int ret, i;
-
- if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
- &frev, &crev, &data_offset))
- return -EINVAL;
- power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
-
- /* fan table */
- if (le16_to_cpu(power_info->pplib.usTableSize) >=
- sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
- if (power_info->pplib3.usFanTableOffset) {
- fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib3.usFanTableOffset));
- adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
- adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
- adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
- adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
- adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
- adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
- adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
- if (fan_info->fan.ucFanTableFormat >= 2)
- adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
- else
- adev->pm.dpm.fan.t_max = 10900;
- adev->pm.dpm.fan.cycle_delay = 100000;
- if (fan_info->fan.ucFanTableFormat >= 3) {
- adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
- adev->pm.dpm.fan.default_max_fan_pwm =
- le16_to_cpu(fan_info->fan3.usFanPWMMax);
- adev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
- adev->pm.dpm.fan.fan_output_sensitivity =
- le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
- }
- adev->pm.dpm.fan.ucode_fan_control = true;
- }
- }
-
- /* clock dependancy tables, shedding tables */
- if (le16_to_cpu(power_info->pplib.usTableSize) >=
- sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
- if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
- dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
- ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
- dep_table);
- if (ret) {
- amdgpu_free_extended_power_table(adev);
- return ret;
- }
- }
- if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
- dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
- ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
- dep_table);
- if (ret) {
- amdgpu_free_extended_power_table(adev);
- return ret;
- }
- }
- if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
- dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
- ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
- dep_table);
- if (ret) {
- amdgpu_free_extended_power_table(adev);
- return ret;
- }
- }
- if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
- dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
- ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
- dep_table);
- if (ret) {
- amdgpu_free_extended_power_table(adev);
- return ret;
- }
- }
- if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
- ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
- (ATOM_PPLIB_Clock_Voltage_Limit_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
- if (clk_v->ucNumEntries) {
- adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
- le16_to_cpu(clk_v->entries[0].usSclkLow) |
- (clk_v->entries[0].ucSclkHigh << 16);
- adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
- le16_to_cpu(clk_v->entries[0].usMclkLow) |
- (clk_v->entries[0].ucMclkHigh << 16);
- adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
- le16_to_cpu(clk_v->entries[0].usVddc);
- adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
- le16_to_cpu(clk_v->entries[0].usVddci);
- }
- }
- if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
- ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
- (ATOM_PPLIB_PhaseSheddingLimits_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
- ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
-
- adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
- kcalloc(psl->ucNumEntries,
- sizeof(struct amdgpu_phase_shedding_limits_entry),
- GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
- amdgpu_free_extended_power_table(adev);
- return -ENOMEM;
- }
-
- entry = &psl->entries[0];
- for (i = 0; i < psl->ucNumEntries; i++) {
- adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
- le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
- adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
- le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
- adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
- le16_to_cpu(entry->usVoltage);
- entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
- ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
- }
- adev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
- psl->ucNumEntries;
- }
- }
-
- /* cac data */
- if (le16_to_cpu(power_info->pplib.usTableSize) >=
- sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
- adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
- adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
- adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit;
- adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
- if (adev->pm.dpm.tdp_od_limit)
- adev->pm.dpm.power_control = true;
- else
- adev->pm.dpm.power_control = false;
- adev->pm.dpm.tdp_adjustment = 0;
- adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
- adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
- adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
- if (power_info->pplib5.usCACLeakageTableOffset) {
- ATOM_PPLIB_CAC_Leakage_Table *cac_table =
- (ATOM_PPLIB_CAC_Leakage_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
- ATOM_PPLIB_CAC_Leakage_Record *entry;
- u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
- adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
- amdgpu_free_extended_power_table(adev);
- return -ENOMEM;
- }
- entry = &cac_table->entries[0];
- for (i = 0; i < cac_table->ucNumEntries; i++) {
- if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
- adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
- le16_to_cpu(entry->usVddc1);
- adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
- le16_to_cpu(entry->usVddc2);
- adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
- le16_to_cpu(entry->usVddc3);
- } else {
- adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
- le16_to_cpu(entry->usVddc);
- adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
- le32_to_cpu(entry->ulLeakageValue);
- }
- entry = (ATOM_PPLIB_CAC_Leakage_Record *)
- ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
- }
- adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
- }
- }
-
- /* ext tables */
- if (le16_to_cpu(power_info->pplib.usTableSize) >=
- sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
- ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
- if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
- ext_hdr->usVCETableOffset) {
- VCEClockInfoArray *array = (VCEClockInfoArray *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
- ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
- (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
- 1 + array->ucNumEntries * sizeof(VCEClockInfo));
- ATOM_PPLIB_VCE_State_Table *states =
- (ATOM_PPLIB_VCE_State_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
- 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
- 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
- ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
- ATOM_PPLIB_VCE_State_Record *state_entry;
- VCEClockInfo *vce_clk;
- u32 size = limits->numEntries *
- sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
- adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
- kzalloc(size, GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
- amdgpu_free_extended_power_table(adev);
- return -ENOMEM;
- }
- adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
- limits->numEntries;
- entry = &limits->entries[0];
- state_entry = &states->entries[0];
- for (i = 0; i < limits->numEntries; i++) {
- vce_clk = (VCEClockInfo *)
- ((u8 *)&array->entries[0] +
- (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
- adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
- le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
- adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
- le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
- adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
- le16_to_cpu(entry->usVoltage);
- entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
- ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
- }
- adev->pm.dpm.num_of_vce_states =
- states->numEntries > AMD_MAX_VCE_LEVELS ?
- AMD_MAX_VCE_LEVELS : states->numEntries;
- for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
- vce_clk = (VCEClockInfo *)
- ((u8 *)&array->entries[0] +
- (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
- adev->pm.dpm.vce_states[i].evclk =
- le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
- adev->pm.dpm.vce_states[i].ecclk =
- le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
- adev->pm.dpm.vce_states[i].clk_idx =
- state_entry->ucClockInfoIndex & 0x3f;
- adev->pm.dpm.vce_states[i].pstate =
- (state_entry->ucClockInfoIndex & 0xc0) >> 6;
- state_entry = (ATOM_PPLIB_VCE_State_Record *)
- ((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
- }
- }
- if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
- ext_hdr->usUVDTableOffset) {
- UVDClockInfoArray *array = (UVDClockInfoArray *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
- ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
- (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
- 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
- ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
- u32 size = limits->numEntries *
- sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
- adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
- kzalloc(size, GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
- amdgpu_free_extended_power_table(adev);
- return -ENOMEM;
- }
- adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
- limits->numEntries;
- entry = &limits->entries[0];
- for (i = 0; i < limits->numEntries; i++) {
- UVDClockInfo *uvd_clk = (UVDClockInfo *)
- ((u8 *)&array->entries[0] +
- (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
- adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
- le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
- adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
- le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
- adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
- le16_to_cpu(entry->usVoltage);
- entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
- ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
- }
- }
- if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
- ext_hdr->usSAMUTableOffset) {
- ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
- (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
- ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
- u32 size = limits->numEntries *
- sizeof(struct amdgpu_clock_voltage_dependency_entry);
- adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
- kzalloc(size, GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
- amdgpu_free_extended_power_table(adev);
- return -ENOMEM;
- }
- adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
- limits->numEntries;
- entry = &limits->entries[0];
- for (i = 0; i < limits->numEntries; i++) {
- adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
- le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
- adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
- le16_to_cpu(entry->usVoltage);
- entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
- ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
- }
- }
- if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
- ext_hdr->usPPMTableOffset) {
- ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(ext_hdr->usPPMTableOffset));
- adev->pm.dpm.dyn_state.ppm_table =
- kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.ppm_table) {
- amdgpu_free_extended_power_table(adev);
- return -ENOMEM;
- }
- adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
- adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
- le16_to_cpu(ppm->usCpuCoreNumber);
- adev->pm.dpm.dyn_state.ppm_table->platform_tdp =
- le32_to_cpu(ppm->ulPlatformTDP);
- adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
- le32_to_cpu(ppm->ulSmallACPlatformTDP);
- adev->pm.dpm.dyn_state.ppm_table->platform_tdc =
- le32_to_cpu(ppm->ulPlatformTDC);
- adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
- le32_to_cpu(ppm->ulSmallACPlatformTDC);
- adev->pm.dpm.dyn_state.ppm_table->apu_tdp =
- le32_to_cpu(ppm->ulApuTDP);
- adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
- le32_to_cpu(ppm->ulDGpuTDP);
- adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
- le32_to_cpu(ppm->ulDGpuUlvPower);
- adev->pm.dpm.dyn_state.ppm_table->tj_max =
- le32_to_cpu(ppm->ulTjmax);
- }
- if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
- ext_hdr->usACPTableOffset) {
- ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
- (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
- ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
- u32 size = limits->numEntries *
- sizeof(struct amdgpu_clock_voltage_dependency_entry);
- adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
- kzalloc(size, GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
- amdgpu_free_extended_power_table(adev);
- return -ENOMEM;
- }
- adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
- limits->numEntries;
- entry = &limits->entries[0];
- for (i = 0; i < limits->numEntries; i++) {
- adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
- le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
- adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
- le16_to_cpu(entry->usVoltage);
- entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
- ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
- }
- }
- if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
- ext_hdr->usPowerTuneTableOffset) {
- u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
- le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
- ATOM_PowerTune_Table *pt;
- adev->pm.dpm.dyn_state.cac_tdp_table =
- kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
- amdgpu_free_extended_power_table(adev);
- return -ENOMEM;
- }
- if (rev > 0) {
- ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
- adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
- ppt->usMaximumPowerDeliveryLimit;
- pt = &ppt->power_tune_table;
- } else {
- ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
- adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
- pt = &ppt->power_tune_table;
- }
- adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
- adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
- le16_to_cpu(pt->usConfigurableTDP);
- adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
- adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
- le16_to_cpu(pt->usBatteryPowerLimit);
- adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
- le16_to_cpu(pt->usSmallPowerLimit);
- adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
- le16_to_cpu(pt->usLowCACLeakage);
- adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
- le16_to_cpu(pt->usHighCACLeakage);
- }
- if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) &&
- ext_hdr->usSclkVddgfxTableOffset) {
- dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset));
- ret = amdgpu_parse_clk_voltage_dep_table(
- &adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
- dep_table);
- if (ret) {
- kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
- return ret;
- }
- }
- }
-
- return 0;
-}
-
-void amdgpu_free_extended_power_table(struct amdgpu_device *adev)
-{
- struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state;
-
- kfree(dyn_state->vddc_dependency_on_sclk.entries);
- kfree(dyn_state->vddci_dependency_on_mclk.entries);
- kfree(dyn_state->vddc_dependency_on_mclk.entries);
- kfree(dyn_state->mvdd_dependency_on_mclk.entries);
- kfree(dyn_state->cac_leakage_table.entries);
- kfree(dyn_state->phase_shedding_limits_table.entries);
- kfree(dyn_state->ppm_table);
- kfree(dyn_state->cac_tdp_table);
- kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
- kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
- kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
- kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
- kfree(dyn_state->vddgfx_dependency_on_sclk.entries);
-}
-
-static const char *pp_lib_thermal_controller_names[] = {
- "NONE",
- "lm63",
- "adm1032",
- "adm1030",
- "max6649",
- "lm64",
- "f75375",
- "RV6xx",
- "RV770",
- "adt7473",
- "NONE",
- "External GPIO",
- "Evergreen",
- "emc2103",
- "Sumo",
- "Northern Islands",
- "Southern Islands",
- "lm96163",
- "Sea Islands",
- "Kaveri/Kabini",
-};
-
-void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
-{
- struct amdgpu_mode_info *mode_info = &adev->mode_info;
- ATOM_PPLIB_POWERPLAYTABLE *power_table;
- int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
- ATOM_PPLIB_THERMALCONTROLLER *controller;
- struct amdgpu_i2c_bus_rec i2c_bus;
- u16 data_offset;
- u8 frev, crev;
-
- if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
- &frev, &crev, &data_offset))
- return;
- power_table = (ATOM_PPLIB_POWERPLAYTABLE *)
- (mode_info->atom_context->bios + data_offset);
- controller = &power_table->sThermalController;
-
- /* add the i2c bus for thermal/fan chip */
- if (controller->ucType > 0) {
- if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
- adev->pm.no_fan = true;
- adev->pm.fan_pulses_per_revolution =
- controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
- if (adev->pm.fan_pulses_per_revolution) {
- adev->pm.fan_min_rpm = controller->ucFanMinRPM;
- adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
- }
- if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
- DRM_INFO("Internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
- } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
- DRM_INFO("Internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
- } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
- DRM_INFO("Internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
- } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
- DRM_INFO("Internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
- } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
- DRM_INFO("Internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- adev->pm.int_thermal_type = THERMAL_TYPE_NI;
- } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
- DRM_INFO("Internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- adev->pm.int_thermal_type = THERMAL_TYPE_SI;
- } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
- DRM_INFO("Internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- adev->pm.int_thermal_type = THERMAL_TYPE_CI;
- } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
- DRM_INFO("Internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- adev->pm.int_thermal_type = THERMAL_TYPE_KV;
- } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
- DRM_INFO("External GPIO thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
- } else if (controller->ucType ==
- ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
- DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
- } else if (controller->ucType ==
- ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
- DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
- } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
- DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
- pp_lib_thermal_controller_names[controller->ucType],
- controller->ucI2cAddress >> 1,
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
- i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine);
- adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus);
- if (adev->pm.i2c_bus) {
- struct i2c_board_info info = { };
- const char *name = pp_lib_thermal_controller_names[controller->ucType];
- info.addr = controller->ucI2cAddress >> 1;
- strlcpy(info.type, name, sizeof(info.type));
- i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info);
- }
- } else {
- DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
- controller->ucType,
- controller->ucI2cAddress >> 1,
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- }
- }
-}
-
-enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
- u32 sys_mask,
- enum amdgpu_pcie_gen asic_gen,
- enum amdgpu_pcie_gen default_gen)
-{
- switch (asic_gen) {
- case AMDGPU_PCIE_GEN1:
- return AMDGPU_PCIE_GEN1;
- case AMDGPU_PCIE_GEN2:
- return AMDGPU_PCIE_GEN2;
- case AMDGPU_PCIE_GEN3:
- return AMDGPU_PCIE_GEN3;
- default:
- if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) &&
- (default_gen == AMDGPU_PCIE_GEN3))
- return AMDGPU_PCIE_GEN3;
- else if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) &&
- (default_gen == AMDGPU_PCIE_GEN2))
- return AMDGPU_PCIE_GEN2;
- else
- return AMDGPU_PCIE_GEN1;
- }
- return AMDGPU_PCIE_GEN1;
-}
-
-struct amd_vce_state*
-amdgpu_get_vce_clock_state(void *handle, u32 idx)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (idx < adev->pm.dpm.num_of_vce_states)
- return &adev->pm.dpm.vce_states[idx];
-
- return NULL;
-}
-
-int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
-{
- uint32_t clk_freq;
- int ret = 0;
- if (is_support_sw_smu(adev)) {
- ret = smu_get_dpm_freq_range(&adev->smu, SMU_GFXCLK,
- low ? &clk_freq : NULL,
- !low ? &clk_freq : NULL);
- if (ret)
- return 0;
- return clk_freq * 100;
-
- } else {
- return (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (low));
- }
-}
-
-int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
-{
- uint32_t clk_freq;
- int ret = 0;
- if (is_support_sw_smu(adev)) {
- ret = smu_get_dpm_freq_range(&adev->smu, SMU_UCLK,
- low ? &clk_freq : NULL,
- !low ? &clk_freq : NULL);
- if (ret)
- return 0;
- return clk_freq * 100;
-
- } else {
- return (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (low));
- }
-}
-
-int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate)
-{
- int ret = 0;
- bool swsmu = is_support_sw_smu(adev);
-
- switch (block_type) {
- case AMD_IP_BLOCK_TYPE_UVD:
- case AMD_IP_BLOCK_TYPE_VCE:
- if (swsmu) {
- ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate);
- } else if (adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->set_powergating_by_smu) {
- /*
- * TODO: need a better lock mechanism
- *
- * Here adev->pm.mutex lock protection is enforced on
- * UVD and VCE cases only. Since for other cases, there
- * may be already lock protection in amdgpu_pm.c.
- * This is a quick fix for the deadlock issue below.
- * NFO: task ocltst:2028 blocked for more than 120 seconds.
- * Tainted: G OE 5.0.0-37-generic #40~18.04.1-Ubuntu
- * echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
- * cltst D 0 2028 2026 0x00000000
- * all Trace:
- * __schedule+0x2c0/0x870
- * schedule+0x2c/0x70
- * schedule_preempt_disabled+0xe/0x10
- * __mutex_lock.isra.9+0x26d/0x4e0
- * __mutex_lock_slowpath+0x13/0x20
- * ? __mutex_lock_slowpath+0x13/0x20
- * mutex_lock+0x2f/0x40
- * amdgpu_dpm_set_powergating_by_smu+0x64/0xe0 [amdgpu]
- * gfx_v8_0_enable_gfx_static_mg_power_gating+0x3c/0x70 [amdgpu]
- * gfx_v8_0_set_powergating_state+0x66/0x260 [amdgpu]
- * amdgpu_device_ip_set_powergating_state+0x62/0xb0 [amdgpu]
- * pp_dpm_force_performance_level+0xe7/0x100 [amdgpu]
- * amdgpu_set_dpm_forced_performance_level+0x129/0x330 [amdgpu]
- */
- mutex_lock(&adev->pm.mutex);
- ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu(
- (adev)->powerplay.pp_handle, block_type, gate));
- mutex_unlock(&adev->pm.mutex);
- }
- break;
- case AMD_IP_BLOCK_TYPE_GFX:
- case AMD_IP_BLOCK_TYPE_VCN:
- case AMD_IP_BLOCK_TYPE_SDMA:
- if (swsmu)
- ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate);
- else if (adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->set_powergating_by_smu)
- ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu(
- (adev)->powerplay.pp_handle, block_type, gate));
- break;
- case AMD_IP_BLOCK_TYPE_JPEG:
- if (swsmu)
- ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate);
- break;
- case AMD_IP_BLOCK_TYPE_GMC:
- case AMD_IP_BLOCK_TYPE_ACP:
- if (adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->set_powergating_by_smu)
- ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu(
- (adev)->powerplay.pp_handle, block_type, gate));
- break;
- default:
- break;
- }
-
- return ret;
-}
-
-int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
-{
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
- void *pp_handle = adev->powerplay.pp_handle;
- struct smu_context *smu = &adev->smu;
- int ret = 0;
-
- if (is_support_sw_smu(adev)) {
- ret = smu_baco_enter(smu);
- } else {
- if (!pp_funcs || !pp_funcs->set_asic_baco_state)
- return -ENOENT;
-
- /* enter BACO state */
- ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
- }
-
- return ret;
-}
-
-int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
-{
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
- void *pp_handle = adev->powerplay.pp_handle;
- struct smu_context *smu = &adev->smu;
- int ret = 0;
-
- if (is_support_sw_smu(adev)) {
- ret = smu_baco_exit(smu);
- } else {
- if (!pp_funcs || !pp_funcs->set_asic_baco_state)
- return -ENOENT;
-
- /* exit BACO state */
- ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
- }
-
- return ret;
-}
-
-int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
- enum pp_mp1_state mp1_state)
-{
- int ret = 0;
-
- if (is_support_sw_smu(adev)) {
- ret = smu_set_mp1_state(&adev->smu, mp1_state);
- } else if (adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->set_mp1_state) {
- ret = adev->powerplay.pp_funcs->set_mp1_state(
- adev->powerplay.pp_handle,
- mp1_state);
- }
-
- return ret;
-}
-
-bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
-{
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
- void *pp_handle = adev->powerplay.pp_handle;
- struct smu_context *smu = &adev->smu;
- bool baco_cap;
-
- if (is_support_sw_smu(adev)) {
- return smu_baco_is_support(smu);
- } else {
- if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
- return false;
-
- if (pp_funcs->get_asic_baco_capability(pp_handle, &baco_cap))
- return false;
-
- return baco_cap ? true : false;
- }
-}
-
-int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
-{
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
- void *pp_handle = adev->powerplay.pp_handle;
- struct smu_context *smu = &adev->smu;
-
- if (is_support_sw_smu(adev)) {
- return smu_mode2_reset(smu);
- } else {
- if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
- return -ENOENT;
-
- return pp_funcs->asic_reset_mode_2(pp_handle);
- }
-}
-
-int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
-{
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
- void *pp_handle = adev->powerplay.pp_handle;
- struct smu_context *smu = &adev->smu;
- int ret = 0;
-
- dev_info(adev->dev, "GPU BACO reset\n");
-
- if (is_support_sw_smu(adev)) {
- ret = smu_baco_enter(smu);
- if (ret)
- return ret;
-
- ret = smu_baco_exit(smu);
- if (ret)
- return ret;
- } else {
- if (!pp_funcs
- || !pp_funcs->set_asic_baco_state)
- return -ENOENT;
-
- /* enter BACO state */
- ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
- if (ret)
- return ret;
-
- /* exit BACO state */
- ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
-{
- struct smu_context *smu = &adev->smu;
-
- if (is_support_sw_smu(adev))
- return smu_mode1_reset_is_support(smu);
-
- return false;
-}
-
-int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
-{
- struct smu_context *smu = &adev->smu;
-
- if (is_support_sw_smu(adev))
- return smu_mode1_reset(smu);
-
- return -EOPNOTSUPP;
-}
-
-int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
- enum PP_SMC_POWER_PROFILE type,
- bool en)
-{
- int ret = 0;
-
- if (is_support_sw_smu(adev))
- ret = smu_switch_power_profile(&adev->smu, type, en);
- else if (adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->switch_power_profile)
- ret = adev->powerplay.pp_funcs->switch_power_profile(
- adev->powerplay.pp_handle, type, en);
-
- return ret;
-}
-
-int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
- uint32_t pstate)
-{
- int ret = 0;
-
- if (is_support_sw_smu(adev))
- ret = smu_set_xgmi_pstate(&adev->smu, pstate);
- else if (adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->set_xgmi_pstate)
- ret = adev->powerplay.pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
- pstate);
-
- return ret;
-}
-
-int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
- uint32_t cstate)
-{
- int ret = 0;
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
- void *pp_handle = adev->powerplay.pp_handle;
- struct smu_context *smu = &adev->smu;
-
- if (is_support_sw_smu(adev))
- ret = smu_set_df_cstate(smu, cstate);
- else if (pp_funcs &&
- pp_funcs->set_df_cstate)
- ret = pp_funcs->set_df_cstate(pp_handle, cstate);
-
- return ret;
-}
-
-int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en)
-{
- struct smu_context *smu = &adev->smu;
-
- if (is_support_sw_smu(adev))
- return smu_allow_xgmi_power_down(smu, en);
-
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
deleted file mode 100644
index aa27fe65cdfa..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ /dev/null
@@ -1,548 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef __AMDGPU_DPM_H__
-#define __AMDGPU_DPM_H__
-
-enum amdgpu_int_thermal_type {
- THERMAL_TYPE_NONE,
- THERMAL_TYPE_EXTERNAL,
- THERMAL_TYPE_EXTERNAL_GPIO,
- THERMAL_TYPE_RV6XX,
- THERMAL_TYPE_RV770,
- THERMAL_TYPE_ADT7473_WITH_INTERNAL,
- THERMAL_TYPE_EVERGREEN,
- THERMAL_TYPE_SUMO,
- THERMAL_TYPE_NI,
- THERMAL_TYPE_SI,
- THERMAL_TYPE_EMC2103_WITH_INTERNAL,
- THERMAL_TYPE_CI,
- THERMAL_TYPE_KV,
-};
-
-enum amdgpu_dpm_auto_throttle_src {
- AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL,
- AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL
-};
-
-enum amdgpu_dpm_event_src {
- AMDGPU_DPM_EVENT_SRC_ANALOG = 0,
- AMDGPU_DPM_EVENT_SRC_EXTERNAL = 1,
- AMDGPU_DPM_EVENT_SRC_DIGITAL = 2,
- AMDGPU_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
- AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
-};
-
-struct amdgpu_ps {
- u32 caps; /* vbios flags */
- u32 class; /* vbios flags */
- u32 class2; /* vbios flags */
- /* UVD clocks */
- u32 vclk;
- u32 dclk;
- /* VCE clocks */
- u32 evclk;
- u32 ecclk;
- bool vce_active;
- enum amd_vce_level vce_level;
- /* asic priv */
- void *ps_priv;
-};
-
-struct amdgpu_dpm_thermal {
- /* thermal interrupt work */
- struct work_struct work;
- /* low temperature threshold */
- int min_temp;
- /* high temperature threshold */
- int max_temp;
- /* edge max emergency(shutdown) temp */
- int max_edge_emergency_temp;
- /* hotspot low temperature threshold */
- int min_hotspot_temp;
- /* hotspot high temperature critical threshold */
- int max_hotspot_crit_temp;
- /* hotspot max emergency(shutdown) temp */
- int max_hotspot_emergency_temp;
- /* memory low temperature threshold */
- int min_mem_temp;
- /* memory high temperature critical threshold */
- int max_mem_crit_temp;
- /* memory max emergency(shutdown) temp */
- int max_mem_emergency_temp;
- /* was last interrupt low to high or high to low */
- bool high_to_low;
- /* interrupt source */
- struct amdgpu_irq_src irq;
-};
-
-enum amdgpu_clk_action
-{
- AMDGPU_SCLK_UP = 1,
- AMDGPU_SCLK_DOWN
-};
-
-struct amdgpu_blacklist_clocks
-{
- u32 sclk;
- u32 mclk;
- enum amdgpu_clk_action action;
-};
-
-struct amdgpu_clock_and_voltage_limits {
- u32 sclk;
- u32 mclk;
- u16 vddc;
- u16 vddci;
-};
-
-struct amdgpu_clock_array {
- u32 count;
- u32 *values;
-};
-
-struct amdgpu_clock_voltage_dependency_entry {
- u32 clk;
- u16 v;
-};
-
-struct amdgpu_clock_voltage_dependency_table {
- u32 count;
- struct amdgpu_clock_voltage_dependency_entry *entries;
-};
-
-union amdgpu_cac_leakage_entry {
- struct {
- u16 vddc;
- u32 leakage;
- };
- struct {
- u16 vddc1;
- u16 vddc2;
- u16 vddc3;
- };
-};
-
-struct amdgpu_cac_leakage_table {
- u32 count;
- union amdgpu_cac_leakage_entry *entries;
-};
-
-struct amdgpu_phase_shedding_limits_entry {
- u16 voltage;
- u32 sclk;
- u32 mclk;
-};
-
-struct amdgpu_phase_shedding_limits_table {
- u32 count;
- struct amdgpu_phase_shedding_limits_entry *entries;
-};
-
-struct amdgpu_uvd_clock_voltage_dependency_entry {
- u32 vclk;
- u32 dclk;
- u16 v;
-};
-
-struct amdgpu_uvd_clock_voltage_dependency_table {
- u8 count;
- struct amdgpu_uvd_clock_voltage_dependency_entry *entries;
-};
-
-struct amdgpu_vce_clock_voltage_dependency_entry {
- u32 ecclk;
- u32 evclk;
- u16 v;
-};
-
-struct amdgpu_vce_clock_voltage_dependency_table {
- u8 count;
- struct amdgpu_vce_clock_voltage_dependency_entry *entries;
-};
-
-struct amdgpu_ppm_table {
- u8 ppm_design;
- u16 cpu_core_number;
- u32 platform_tdp;
- u32 small_ac_platform_tdp;
- u32 platform_tdc;
- u32 small_ac_platform_tdc;
- u32 apu_tdp;
- u32 dgpu_tdp;
- u32 dgpu_ulv_power;
- u32 tj_max;
-};
-
-struct amdgpu_cac_tdp_table {
- u16 tdp;
- u16 configurable_tdp;
- u16 tdc;
- u16 battery_power_limit;
- u16 small_power_limit;
- u16 low_cac_leakage;
- u16 high_cac_leakage;
- u16 maximum_power_delivery_limit;
-};
-
-struct amdgpu_dpm_dynamic_state {
- struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_sclk;
- struct amdgpu_clock_voltage_dependency_table vddci_dependency_on_mclk;
- struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_mclk;
- struct amdgpu_clock_voltage_dependency_table mvdd_dependency_on_mclk;
- struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_dispclk;
- struct amdgpu_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table;
- struct amdgpu_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table;
- struct amdgpu_clock_voltage_dependency_table samu_clock_voltage_dependency_table;
- struct amdgpu_clock_voltage_dependency_table acp_clock_voltage_dependency_table;
- struct amdgpu_clock_voltage_dependency_table vddgfx_dependency_on_sclk;
- struct amdgpu_clock_array valid_sclk_values;
- struct amdgpu_clock_array valid_mclk_values;
- struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_dc;
- struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_ac;
- u32 mclk_sclk_ratio;
- u32 sclk_mclk_delta;
- u16 vddc_vddci_delta;
- u16 min_vddc_for_pcie_gen2;
- struct amdgpu_cac_leakage_table cac_leakage_table;
- struct amdgpu_phase_shedding_limits_table phase_shedding_limits_table;
- struct amdgpu_ppm_table *ppm_table;
- struct amdgpu_cac_tdp_table *cac_tdp_table;
-};
-
-struct amdgpu_dpm_fan {
- u16 t_min;
- u16 t_med;
- u16 t_high;
- u16 pwm_min;
- u16 pwm_med;
- u16 pwm_high;
- u8 t_hyst;
- u32 cycle_delay;
- u16 t_max;
- u8 control_mode;
- u16 default_max_fan_pwm;
- u16 default_fan_output_sensitivity;
- u16 fan_output_sensitivity;
- bool ucode_fan_control;
-};
-
-enum amdgpu_pcie_gen {
- AMDGPU_PCIE_GEN1 = 0,
- AMDGPU_PCIE_GEN2 = 1,
- AMDGPU_PCIE_GEN3 = 2,
- AMDGPU_PCIE_GEN_INVALID = 0xffff
-};
-
-#define amdgpu_dpm_pre_set_power_state(adev) \
- ((adev)->powerplay.pp_funcs->pre_set_power_state((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_set_power_state(adev) \
- ((adev)->powerplay.pp_funcs->set_power_state((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_post_set_power_state(adev) \
- ((adev)->powerplay.pp_funcs->post_set_power_state((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_display_configuration_changed(adev) \
- ((adev)->powerplay.pp_funcs->display_configuration_changed((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_print_power_state(adev, ps) \
- ((adev)->powerplay.pp_funcs->print_power_state((adev)->powerplay.pp_handle, (ps)))
-
-#define amdgpu_dpm_vblank_too_short(adev) \
- ((adev)->powerplay.pp_funcs->vblank_too_short((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_enable_bapm(adev, e) \
- ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
-
-#define amdgpu_dpm_set_fan_control_mode(adev, m) \
- ((adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)))
-
-#define amdgpu_dpm_get_fan_control_mode(adev) \
- ((adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_set_fan_speed_percent(adev, s) \
- ((adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)))
-
-#define amdgpu_dpm_get_fan_speed_percent(adev, s) \
- ((adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)))
-
-#define amdgpu_dpm_get_fan_speed_rpm(adev, s) \
- ((adev)->powerplay.pp_funcs->get_fan_speed_rpm)((adev)->powerplay.pp_handle, (s))
-
-#define amdgpu_dpm_set_fan_speed_rpm(adev, s) \
- ((adev)->powerplay.pp_funcs->set_fan_speed_rpm)((adev)->powerplay.pp_handle, (s))
-
-#define amdgpu_dpm_force_performance_level(adev, l) \
- ((adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)))
-
-#define amdgpu_dpm_get_current_power_state(adev) \
- ((adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_get_pp_num_states(adev, data) \
- ((adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data))
-
-#define amdgpu_dpm_get_pp_table(adev, table) \
- ((adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table))
-
-#define amdgpu_dpm_set_pp_table(adev, buf, size) \
- ((adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size))
-
-#define amdgpu_dpm_print_clock_levels(adev, type, buf) \
- ((adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf))
-
-#define amdgpu_dpm_force_clock_level(adev, type, level) \
- ((adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level))
-
-#define amdgpu_dpm_get_sclk_od(adev) \
- ((adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_set_sclk_od(adev, value) \
- ((adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value))
-
-#define amdgpu_dpm_get_mclk_od(adev) \
- ((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_set_mclk_od(adev, value) \
- ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value))
-
-#define amdgpu_dpm_dispatch_task(adev, task_id, user_state) \
- ((adev)->powerplay.pp_funcs->dispatch_tasks)((adev)->powerplay.pp_handle, (task_id), (user_state))
-
-#define amdgpu_dpm_check_state_equal(adev, cps, rps, equal) \
- ((adev)->powerplay.pp_funcs->check_state_equal((adev)->powerplay.pp_handle, (cps), (rps), (equal)))
-
-#define amdgpu_dpm_get_vce_clock_state(adev, i) \
- ((adev)->powerplay.pp_funcs->get_vce_clock_state((adev)->powerplay.pp_handle, (i)))
-
-#define amdgpu_dpm_get_performance_level(adev) \
- ((adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_reset_power_profile_state(adev, request) \
- ((adev)->powerplay.pp_funcs->reset_power_profile_state(\
- (adev)->powerplay.pp_handle, request))
-
-#define amdgpu_dpm_set_clockgating_by_smu(adev, msg_id) \
- ((adev)->powerplay.pp_funcs->set_clockgating_by_smu(\
- (adev)->powerplay.pp_handle, msg_id))
-
-#define amdgpu_dpm_get_power_profile_mode(adev, buf) \
- ((adev)->powerplay.pp_funcs->get_power_profile_mode(\
- (adev)->powerplay.pp_handle, buf))
-
-#define amdgpu_dpm_set_power_profile_mode(adev, parameter, size) \
- ((adev)->powerplay.pp_funcs->set_power_profile_mode(\
- (adev)->powerplay.pp_handle, parameter, size))
-
-#define amdgpu_dpm_odn_edit_dpm_table(adev, type, parameter, size) \
- ((adev)->powerplay.pp_funcs->odn_edit_dpm_table(\
- (adev)->powerplay.pp_handle, type, parameter, size))
-
-#define amdgpu_dpm_enable_mgpu_fan_boost(adev) \
- ((adev)->powerplay.pp_funcs->enable_mgpu_fan_boost(\
- (adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_get_ppfeature_status(adev, buf) \
- ((adev)->powerplay.pp_funcs->get_ppfeature_status(\
- (adev)->powerplay.pp_handle, (buf)))
-
-#define amdgpu_dpm_set_ppfeature_status(adev, ppfeatures) \
- ((adev)->powerplay.pp_funcs->set_ppfeature_status(\
- (adev)->powerplay.pp_handle, (ppfeatures)))
-
-struct amdgpu_dpm {
- struct amdgpu_ps *ps;
- /* number of valid power states */
- int num_ps;
- /* current power state that is active */
- struct amdgpu_ps *current_ps;
- /* requested power state */
- struct amdgpu_ps *requested_ps;
- /* boot up power state */
- struct amdgpu_ps *boot_ps;
- /* default uvd power state */
- struct amdgpu_ps *uvd_ps;
- /* vce requirements */
- u32 num_of_vce_states;
- struct amd_vce_state vce_states[AMD_MAX_VCE_LEVELS];
- enum amd_vce_level vce_level;
- enum amd_pm_state_type state;
- enum amd_pm_state_type user_state;
- enum amd_pm_state_type last_state;
- enum amd_pm_state_type last_user_state;
- u32 platform_caps;
- u32 voltage_response_time;
- u32 backbias_response_time;
- void *priv;
- u32 new_active_crtcs;
- int new_active_crtc_count;
- u32 current_active_crtcs;
- int current_active_crtc_count;
- struct amdgpu_dpm_dynamic_state dyn_state;
- struct amdgpu_dpm_fan fan;
- u32 tdp_limit;
- u32 near_tdp_limit;
- u32 near_tdp_limit_adjusted;
- u32 sq_ramping_threshold;
- u32 cac_leakage;
- u16 tdp_od_limit;
- u32 tdp_adjustment;
- u16 load_line_slope;
- bool power_control;
- /* special states active */
- bool thermal_active;
- bool uvd_active;
- bool vce_active;
- /* thermal handling */
- struct amdgpu_dpm_thermal thermal;
- /* forced levels */
- enum amd_dpm_forced_level forced_level;
-};
-
-struct amdgpu_pm {
- struct mutex mutex;
- u32 current_sclk;
- u32 current_mclk;
- u32 default_sclk;
- u32 default_mclk;
- struct amdgpu_i2c_chan *i2c_bus;
- bool bus_locked;
- /* internal thermal controller on rv6xx+ */
- enum amdgpu_int_thermal_type int_thermal_type;
- struct device *int_hwmon_dev;
- /* fan control parameters */
- bool no_fan;
- u8 fan_pulses_per_revolution;
- u8 fan_min_rpm;
- u8 fan_max_rpm;
- /* dpm */
- bool dpm_enabled;
- bool sysfs_initialized;
- struct amdgpu_dpm dpm;
- const struct firmware *fw; /* SMC firmware */
- uint32_t fw_version;
- uint32_t pcie_gen_mask;
- uint32_t pcie_mlw_mask;
- struct amd_pp_display_configuration pm_display_cfg;/* set by dc */
- uint32_t smu_prv_buffer_size;
- struct amdgpu_bo *smu_prv_buffer;
- bool ac_power;
- /* powerplay feature */
- uint32_t pp_feature;
-
- /* Used for I2C access to various EEPROMs on relevant ASICs */
- struct i2c_adapter smu_i2c;
- struct list_head pm_attr_list;
-};
-
-#define R600_SSTU_DFLT 0
-#define R600_SST_DFLT 0x00C8
-
-/* XXX are these ok? */
-#define R600_TEMP_RANGE_MIN (90 * 1000)
-#define R600_TEMP_RANGE_MAX (120 * 1000)
-
-#define FDO_PWM_MODE_STATIC 1
-#define FDO_PWM_MODE_STATIC_RPM 5
-
-enum amdgpu_td {
- AMDGPU_TD_AUTO,
- AMDGPU_TD_UP,
- AMDGPU_TD_DOWN,
-};
-
-enum amdgpu_display_watermark {
- AMDGPU_DISPLAY_WATERMARK_LOW = 0,
- AMDGPU_DISPLAY_WATERMARK_HIGH = 1,
-};
-
-enum amdgpu_display_gap
-{
- AMDGPU_PM_DISPLAY_GAP_VBLANK_OR_WM = 0,
- AMDGPU_PM_DISPLAY_GAP_VBLANK = 1,
- AMDGPU_PM_DISPLAY_GAP_WATERMARK = 2,
- AMDGPU_PM_DISPLAY_GAP_IGNORE = 3,
-};
-
-void amdgpu_dpm_print_class_info(u32 class, u32 class2);
-void amdgpu_dpm_print_cap_info(u32 caps);
-void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
- struct amdgpu_ps *rps);
-u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev);
-u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev);
-void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev);
-int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
- void *data, uint32_t *size);
-
-bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor);
-
-int amdgpu_get_platform_caps(struct amdgpu_device *adev);
-
-int amdgpu_parse_extended_power_table(struct amdgpu_device *adev);
-void amdgpu_free_extended_power_table(struct amdgpu_device *adev);
-
-void amdgpu_add_thermal_controller(struct amdgpu_device *adev);
-
-enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
- u32 sys_mask,
- enum amdgpu_pcie_gen asic_gen,
- enum amdgpu_pcie_gen default_gen);
-
-struct amd_vce_state*
-amdgpu_get_vce_clock_state(void *handle, u32 idx);
-
-int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev,
- uint32_t block_type, bool gate);
-
-extern int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low);
-
-extern int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low);
-
-int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
- uint32_t pstate);
-
-int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
- enum PP_SMC_POWER_PROFILE type,
- bool en);
-
-int amdgpu_dpm_baco_reset(struct amdgpu_device *adev);
-
-int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev);
-
-bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev);
-
-bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev);
-int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev);
-
-int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
- enum pp_mp1_state mp1_state);
-
-int amdgpu_dpm_baco_exit(struct amdgpu_device *adev);
-
-int amdgpu_dpm_baco_enter(struct amdgpu_device *adev);
-
-int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
- uint32_t cstate);
-
-int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en);
-
-#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 26127c7d2f32..8e988f07f085 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -26,12 +26,12 @@
#include <drm/drm_drv.h>
#include <drm/drm_gem.h>
#include <drm/drm_vblank.h>
+#include <drm/drm_managed.h>
#include "amdgpu_drv.h"
#include <drm/drm_pciids.h>
#include <linux/console.h>
#include <linux/module.h>
-#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/vga_switcheroo.h>
#include <drm/drm_probe_helper.h>
@@ -88,9 +88,10 @@
* - 3.37.0 - L2 is invalidated before SDMA IBs, needed for correctness
* - 3.38.0 - Add AMDGPU_IB_FLAG_EMIT_MEM_SYNC
* - 3.39.0 - DMABUF implicit sync does a full pipeline sync
+ * - 3.40.0 - Add AMDGPU_IDS_FLAGS_TMZ
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 39
+#define KMS_DRIVER_MINOR 40
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0;
@@ -146,16 +147,18 @@ int amdgpu_async_gfx_ring = 1;
int amdgpu_mcbp = 0;
int amdgpu_discovery = -1;
int amdgpu_mes = 0;
-int amdgpu_noretry;
+int amdgpu_noretry = -1;
int amdgpu_force_asic_type = -1;
int amdgpu_tmz = 0;
int amdgpu_reset_method = -1; /* auto */
+int amdgpu_num_kcq = -1;
struct amdgpu_mgpu_info mgpu_info = {
.mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
};
int amdgpu_ras_enable = -1;
uint amdgpu_ras_mask = 0xffffffff;
+int amdgpu_bad_page_threshold = -1;
/**
* DOC: vramlimit (int)
@@ -393,12 +396,12 @@ MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default
module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
/**
- * DOC: ppfeaturemask (uint)
+ * DOC: ppfeaturemask (hexint)
* Override power features enabled. See enum PP_FEATURE_MASK in drivers/gpu/drm/amd/include/amd_shared.h.
* The default is the current set of stable power features.
*/
MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))");
-module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, uint, 0444);
+module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, hexint, 0444);
/**
* DOC: forcelongtraining (uint)
@@ -593,8 +596,13 @@ MODULE_PARM_DESC(mes,
"Enable Micro Engine Scheduler (0 = disabled (default), 1 = enabled)");
module_param_named(mes, amdgpu_mes, int, 0444);
+/**
+ * DOC: noretry (int)
+ * Disable retry faults in the GPU memory controller.
+ * (0 = retry enabled, 1 = retry disabled, -1 auto (default))
+ */
MODULE_PARM_DESC(noretry,
- "Disable retry faults (0 = retry enabled (default), 1 = retry disabled)");
+ "Disable retry faults (0 = retry enabled, 1 = retry disabled, -1 auto (default))");
module_param_named(noretry, amdgpu_noretry, int, 0644);
/**
@@ -676,11 +684,14 @@ MODULE_PARM_DESC(debug_largebar,
* Ignore CRAT table during KFD initialization. By default, KFD uses the ACPI CRAT
* table to get information about AMD APUs. This option can serve as a workaround on
* systems with a broken CRAT table.
+ *
+ * Default is auto (according to asic type, iommu_v2, and crat table, to decide
+ * whehter use CRAT)
*/
int ignore_crat;
module_param(ignore_crat, int, 0444);
MODULE_PARM_DESC(ignore_crat,
- "Ignore CRAT table during KFD initialization (0 = use CRAT (default), 1 = ignore CRAT)");
+ "Ignore CRAT table during KFD initialization (0 = auto (default), 1 = ignore CRAT)");
/**
* DOC: halt_if_hws_hang (int)
@@ -715,6 +726,15 @@ MODULE_PARM_DESC(queue_preemption_timeout_ms, "queue preemption timeout in ms (1
bool debug_evictions;
module_param(debug_evictions, bool, 0644);
MODULE_PARM_DESC(debug_evictions, "enable eviction debug messages (false = default)");
+
+/**
+ * DOC: no_system_mem_limit(bool)
+ * Disable system memory limit, to support multiple process shared memory
+ */
+bool no_system_mem_limit;
+module_param(no_system_mem_limit, bool, 0644);
+MODULE_PARM_DESC(no_system_mem_limit, "disable system memory limit (false = default)");
+
#endif
/**
@@ -765,6 +785,19 @@ module_param_named(tmz, amdgpu_tmz, int, 0444);
MODULE_PARM_DESC(reset_method, "GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco)");
module_param_named(reset_method, amdgpu_reset_method, int, 0444);
+/**
+ * DOC: bad_page_threshold (int)
+ * Bad page threshold is to specify the threshold value of faulty pages
+ * detected by RAS ECC, that may result in GPU entering bad status if total
+ * faulty pages by ECC exceed threshold value and leave it for user's further
+ * check.
+ */
+MODULE_PARM_DESC(bad_page_threshold, "Bad page threshold(-1 = auto(default value), 0 = disable bad page retirement)");
+module_param_named(bad_page_threshold, amdgpu_bad_page_threshold, int, 0444);
+
+MODULE_PARM_DESC(num_kcq, "number of kernel compute queue user want to setup (8 if set to greater than 8 or less than 0, only affect gfx 8+)");
+module_param_named(num_kcq, amdgpu_num_kcq, int, 0444);
+
static const struct pci_device_id pciidlist[] = {
#ifdef CONFIG_DRM_AMDGPU_SI
{0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
@@ -1022,10 +1055,10 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU},
{0x1002, 0x15d8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU},
/* Arcturus */
- {0x1002, 0x738C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x7388, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x738E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x7390, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS|AMD_EXP_HW_SUPPORT},
+ {0x1002, 0x738C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS},
+ {0x1002, 0x7388, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS},
+ {0x1002, 0x738E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS},
+ {0x1002, 0x7390, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS},
/* Navi10 */
{0x1002, 0x7310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
{0x1002, 0x7312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
@@ -1033,6 +1066,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x7319, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
{0x1002, 0x731A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
{0x1002, 0x731B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
+ {0x1002, 0x731E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
{0x1002, 0x731F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
/* Navi14 */
{0x1002, 0x7340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
@@ -1044,8 +1078,16 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
/* Navi12 */
- {0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x7362, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12|AMD_EXP_HW_SUPPORT},
+ {0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12},
+ {0x1002, 0x7362, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12},
+
+ /* Sienna_Cichlid */
+ {0x1002, 0x73A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+ {0x1002, 0x73A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+ {0x1002, 0x73A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+ {0x1002, 0x73AB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+ {0x1002, 0x73AE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+ {0x1002, 0x73BF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
{0, 0, 0}
};
@@ -1057,7 +1099,7 @@ static struct drm_driver kms_driver;
static int amdgpu_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- struct drm_device *dev;
+ struct drm_device *ddev;
struct amdgpu_device *adev;
unsigned long flags = ent->driver_data;
int ret, retry = 0;
@@ -1073,6 +1115,16 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
return -ENODEV;
}
+ /* Due to hardware bugs, S/G Display on raven requires a 1:1 IOMMU mapping,
+ * however, SME requires an indirect IOMMU mapping because the encryption
+ * bit is beyond the DMA mask of the chip.
+ */
+ if (mem_encrypt_active() && ((flags & AMD_ASIC_MASK) == CHIP_RAVEN)) {
+ dev_info(&pdev->dev,
+ "SME is not compatible with RAVEN\n");
+ return -ENOTSUPP;
+ }
+
#ifdef CONFIG_DRM_AMDGPU_SI
if (!amdgpu_si_support) {
switch (flags & AMD_ASIC_MASK) {
@@ -1113,36 +1165,39 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
- dev = drm_dev_alloc(&kms_driver, &pdev->dev);
- if (IS_ERR(dev))
- return PTR_ERR(dev);
+ adev = devm_drm_dev_alloc(&pdev->dev, &kms_driver, typeof(*adev), ddev);
+ if (IS_ERR(adev))
+ return PTR_ERR(adev);
+
+ adev->dev = &pdev->dev;
+ adev->pdev = pdev;
+ ddev = adev_to_drm(adev);
if (!supports_atomic)
- dev->driver_features &= ~DRIVER_ATOMIC;
+ ddev->driver_features &= ~DRIVER_ATOMIC;
ret = pci_enable_device(pdev);
if (ret)
- goto err_free;
-
- dev->pdev = pdev;
+ return ret;
- pci_set_drvdata(pdev, dev);
+ ddev->pdev = pdev;
+ pci_set_drvdata(pdev, ddev);
- ret = amdgpu_driver_load_kms(dev, ent->driver_data);
+ ret = amdgpu_driver_load_kms(adev, ent->driver_data);
if (ret)
goto err_pci;
retry_init:
- ret = drm_dev_register(dev, ent->driver_data);
+ ret = drm_dev_register(ddev, ent->driver_data);
if (ret == -EAGAIN && ++retry <= 3) {
DRM_INFO("retry init %d\n", retry);
/* Don't request EX mode too frequently which is attacking */
msleep(5000);
goto retry_init;
- } else if (ret)
+ } else if (ret) {
goto err_pci;
+ }
- adev = dev->dev_private;
ret = amdgpu_debugfs_init(adev);
if (ret)
DRM_ERROR("Creating debugfs files failed (%d).\n", ret);
@@ -1151,8 +1206,6 @@ retry_init:
err_pci:
pci_disable_device(pdev);
-err_free:
- drm_dev_put(dev);
return ret;
}
@@ -1169,14 +1222,13 @@ amdgpu_pci_remove(struct pci_dev *pdev)
amdgpu_driver_unload_kms(dev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
- drm_dev_put(dev);
}
static void
amdgpu_pci_shutdown(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
if (amdgpu_ras_intr_triggered())
return;
@@ -1209,7 +1261,7 @@ static int amdgpu_pmops_resume(struct device *dev)
static int amdgpu_pmops_freeze(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = drm_dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
int r;
adev->in_hibernate = true;
@@ -1245,7 +1297,7 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
- struct amdgpu_device *adev = drm_dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
int ret, i;
if (!adev->runpm) {
@@ -1279,7 +1331,7 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
if (amdgpu_is_atpx_hybrid()) {
pci_ignore_hotplug(pdev);
} else {
- pci_save_state(pdev);
+ amdgpu_device_cache_pci_state(pdev);
pci_disable_device(pdev);
pci_ignore_hotplug(pdev);
pci_set_power_state(pdev, PCI_D3cold);
@@ -1296,7 +1348,7 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
- struct amdgpu_device *adev = drm_dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
int ret;
if (!adev->runpm)
@@ -1312,7 +1364,7 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
pci_set_master(pdev);
} else {
pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
+ amdgpu_device_load_pci_state(pdev);
ret = pci_enable_device(pdev);
if (ret)
return ret;
@@ -1332,7 +1384,7 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
static int amdgpu_pmops_runtime_idle(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = drm_dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
/* we don't want the main rpm_idle to call suspend - we want to autosuspend */
int ret = 1;
@@ -1491,6 +1543,13 @@ static struct drm_driver kms_driver = {
.patchlevel = KMS_DRIVER_PATCHLEVEL,
};
+static struct pci_error_handlers amdgpu_pci_err_handler = {
+ .error_detected = amdgpu_pci_error_detected,
+ .mmio_enabled = amdgpu_pci_mmio_enabled,
+ .slot_reset = amdgpu_pci_slot_reset,
+ .resume = amdgpu_pci_resume,
+};
+
static struct pci_driver amdgpu_kms_pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
@@ -1498,10 +1557,9 @@ static struct pci_driver amdgpu_kms_pci_driver = {
.remove = amdgpu_pci_remove,
.shutdown = amdgpu_pci_shutdown,
.driver.pm = &amdgpu_pm_ops,
+ .err_handler = &amdgpu_pci_err_handler,
};
-
-
static int __init amdgpu_init(void)
{
int r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
index 61fcf247a638..af4ef84e27a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
@@ -35,7 +35,7 @@
void
amdgpu_link_encoder_connector(struct drm_device *dev)
{
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_connector *connector;
struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index db731f573f98..e2c2eb45a793 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -135,7 +135,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
AMDGPU_GEM_CREATE_VRAM_CLEARED;
- info = drm_get_format_info(adev->ddev, mode_cmd);
+ info = drm_get_format_info(adev_to_drm(adev), mode_cmd);
cpp = info->cpp[0];
/* need to align pitch with crtc limits */
@@ -231,7 +231,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
goto out;
}
- ret = amdgpu_display_framebuffer_init(adev->ddev, &rfbdev->rfb,
+ ret = amdgpu_display_framebuffer_init(adev_to_drm(adev), &rfbdev->rfb,
&mode_cmd, gobj);
if (ret) {
DRM_ERROR("failed to initialize framebuffer %d\n", ret);
@@ -254,7 +254,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
drm_fb_helper_fill_info(info, &rfbdev->helper, sizes);
/* setup aperture base/size for vesafb takeover */
- info->apertures->ranges[0].base = adev->ddev->mode_config.fb_base;
+ info->apertures->ranges[0].base = adev_to_drm(adev)->mode_config.fb_base;
info->apertures->ranges[0].size = adev->gmc.aper_size;
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
@@ -270,7 +270,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
DRM_INFO("fb depth is %d\n", fb->format->depth);
DRM_INFO(" pitch is %d\n", fb->pitches[0]);
- vga_switcheroo_client_fb_set(adev->ddev->pdev, info);
+ vga_switcheroo_client_fb_set(adev_to_drm(adev)->pdev, info);
return 0;
out:
@@ -318,7 +318,7 @@ int amdgpu_fbdev_init(struct amdgpu_device *adev)
return 0;
/* don't init fbdev if there are no connectors */
- if (list_empty(&adev->ddev->mode_config.connector_list))
+ if (list_empty(&adev_to_drm(adev)->mode_config.connector_list))
return 0;
/* select 8 bpp console on low vram cards */
@@ -332,10 +332,10 @@ int amdgpu_fbdev_init(struct amdgpu_device *adev)
rfbdev->adev = adev;
adev->mode_info.rfbdev = rfbdev;
- drm_fb_helper_prepare(adev->ddev, &rfbdev->helper,
- &amdgpu_fb_helper_funcs);
+ drm_fb_helper_prepare(adev_to_drm(adev), &rfbdev->helper,
+ &amdgpu_fb_helper_funcs);
- ret = drm_fb_helper_init(adev->ddev, &rfbdev->helper);
+ ret = drm_fb_helper_init(adev_to_drm(adev), &rfbdev->helper);
if (ret) {
kfree(rfbdev);
return ret;
@@ -343,7 +343,7 @@ int amdgpu_fbdev_init(struct amdgpu_device *adev)
/* disable all the possible outputs/crtcs before entering KMS mode */
if (!amdgpu_device_has_dc_support(adev))
- drm_helper_disable_unused_functions(adev->ddev);
+ drm_helper_disable_unused_functions(adev_to_drm(adev));
drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
return 0;
@@ -354,7 +354,7 @@ void amdgpu_fbdev_fini(struct amdgpu_device *adev)
if (!adev->mode_info.rfbdev)
return;
- amdgpu_fbdev_destroy(adev->ddev, adev->mode_info.rfbdev);
+ amdgpu_fbdev_destroy(adev_to_drm(adev), adev->mode_info.rfbdev);
kfree(adev->mode_info.rfbdev);
adev->mode_info.rfbdev = NULL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 58d4c219178a..fe2d495d08ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -155,7 +155,7 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
seq);
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
seq, flags | AMDGPU_FENCE_FLAG_INT);
- pm_runtime_get_noresume(adev->ddev->dev);
+ pm_runtime_get_noresume(adev_to_drm(adev)->dev);
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
if (unlikely(rcu_dereference_protected(*ptr, 1))) {
struct dma_fence *old;
@@ -284,8 +284,8 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
BUG();
dma_fence_put(fence);
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
} while (last_seq != seq);
return true;
@@ -700,7 +700,7 @@ static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
int i;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
@@ -749,7 +749,7 @@ static int amdgpu_debugfs_gpu_recover(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
int r;
r = pm_runtime_get_sync(dev->dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
index e811fecc540f..8f4a8f8d8146 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
@@ -34,18 +34,31 @@
static bool is_fru_eeprom_supported(struct amdgpu_device *adev)
{
- /* TODO: Gaming SKUs don't have the FRU EEPROM.
- * Use this hack to address hangs on modprobe on gaming SKUs
- * until a proper solution can be implemented by only supporting
- * the explicit chip IDs for VG20 Server cards
- *
- * TODO: Add list of supported Arcturus DIDs once confirmed
+ /* Only server cards have the FRU EEPROM
+ * TODO: See if we can figure this out dynamically instead of
+ * having to parse VBIOS versions.
*/
- if ((adev->asic_type == CHIP_VEGA20 && adev->pdev->device == 0x66a0) ||
- (adev->asic_type == CHIP_VEGA20 && adev->pdev->device == 0x66a1) ||
- (adev->asic_type == CHIP_VEGA20 && adev->pdev->device == 0x66a4))
- return true;
- return false;
+ struct atom_context *atom_ctx = adev->mode_info.atom_context;
+
+ /* VBIOS is of the format ###-DXXXYY-##. For SKU identification,
+ * we can use just the "DXXX" portion. If there were more models, we
+ * could convert the 3 characters to a hex integer and use a switch
+ * for ease/speed/readability. For now, 2 string comparisons are
+ * reasonable and not too expensive
+ */
+ switch (adev->asic_type) {
+ case CHIP_VEGA20:
+ /* D161 and D163 are the VG20 server SKUs */
+ if (strnstr(atom_ctx->vbios_version, "D161",
+ sizeof(atom_ctx->vbios_version)) ||
+ strnstr(atom_ctx->vbios_version, "D163",
+ sizeof(atom_ctx->vbios_version)))
+ return true;
+ else
+ return false;
+ default:
+ return false;
+ }
}
static int amdgpu_fru_read_eeprom(struct amdgpu_device *adev, uint32_t addrptr,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h
index f29a8611d69b..1308d976d60e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h
@@ -26,4 +26,4 @@
int amdgpu_fru_get_product_info(struct amdgpu_device *adev);
-#endif // __AMDGPU_PRODINFO_H__
+#endif // __AMDGPU_FRU_EEPROM_H__
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 7f9e50247413..7e8265da9f25 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -93,7 +93,7 @@ retry:
void amdgpu_gem_force_release(struct amdgpu_device *adev)
{
- struct drm_device *ddev = adev->ddev;
+ struct drm_device *ddev = adev_to_drm(adev);
struct drm_file *file;
mutex_lock(&ddev->filelist_mutex);
@@ -217,7 +217,7 @@ out_unlock:
int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_fpriv *fpriv = filp->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
union drm_amdgpu_gem_create *args = data;
@@ -298,7 +298,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
struct ttm_operation_ctx ctx = { true, false };
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_amdgpu_gem_userptr *args = data;
struct drm_gem_object *gobj;
struct amdgpu_bo *bo;
@@ -332,7 +332,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
bo = gem_to_amdgpu_bo(gobj);
bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
- r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
+ r = amdgpu_ttm_tt_set_userptr(&bo->tbo, args->addr, args->flags);
if (r)
goto release_object;
@@ -587,7 +587,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct drm_amdgpu_gem_va *args = data;
struct drm_gem_object *gobj;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_fpriv *fpriv = filp->driver_priv;
struct amdgpu_bo *abo;
struct amdgpu_bo_va *bo_va;
@@ -596,6 +596,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct ww_acquire_ctx ticket;
struct list_head list, duplicates;
uint64_t va_flags;
+ uint64_t vm_size;
int r = 0;
if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
@@ -616,6 +617,15 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
args->va_address &= AMDGPU_GMC_HOLE_MASK;
+ vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
+ vm_size -= AMDGPU_VA_RESERVED_SIZE;
+ if (args->va_address + args->map_size > vm_size) {
+ dev_dbg(&dev->pdev->dev,
+ "va_address 0x%llx is in top reserved area 0x%llx\n",
+ args->va_address + args->map_size, vm_size);
+ return -EINVAL;
+ }
+
if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
dev_dbg(&dev->pdev->dev, "invalid flags combination 0x%08X\n",
args->flags);
@@ -711,7 +721,7 @@ error_unref:
int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_amdgpu_gem_op *args = data;
struct drm_gem_object *gobj;
struct amdgpu_vm_bo_base *base;
@@ -788,7 +798,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_gem_object *gobj;
uint32_t handle;
u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 78d37f92c7be..8c9bacfdbc30 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -202,40 +202,29 @@ bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
{
- int i, queue, pipe, mec;
+ int i, queue, pipe;
bool multipipe_policy = amdgpu_gfx_is_multipipe_capable(adev);
-
- /* policy for amdgpu compute queue ownership */
- for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
- queue = i % adev->gfx.mec.num_queue_per_pipe;
- pipe = (i / adev->gfx.mec.num_queue_per_pipe)
- % adev->gfx.mec.num_pipe_per_mec;
- mec = (i / adev->gfx.mec.num_queue_per_pipe)
- / adev->gfx.mec.num_pipe_per_mec;
-
- /* we've run out of HW */
- if (mec >= adev->gfx.mec.num_mec)
- break;
-
- if (multipipe_policy) {
- /* policy: amdgpu owns the first two queues of the first MEC */
- if (mec == 0 && queue < 2)
- set_bit(i, adev->gfx.mec.queue_bitmap);
- } else {
- /* policy: amdgpu owns all queues in the first pipe */
- if (mec == 0 && pipe == 0)
- set_bit(i, adev->gfx.mec.queue_bitmap);
+ int max_queues_per_mec = min(adev->gfx.mec.num_pipe_per_mec *
+ adev->gfx.mec.num_queue_per_pipe,
+ adev->gfx.num_compute_rings);
+
+ if (multipipe_policy) {
+ /* policy: make queues evenly cross all pipes on MEC1 only */
+ for (i = 0; i < max_queues_per_mec; i++) {
+ pipe = i % adev->gfx.mec.num_pipe_per_mec;
+ queue = (i / adev->gfx.mec.num_pipe_per_mec) %
+ adev->gfx.mec.num_queue_per_pipe;
+
+ set_bit(pipe * adev->gfx.mec.num_queue_per_pipe + queue,
+ adev->gfx.mec.queue_bitmap);
}
+ } else {
+ /* policy: amdgpu owns all queues in the given pipe */
+ for (i = 0; i < max_queues_per_mec; ++i)
+ set_bit(i, adev->gfx.mec.queue_bitmap);
}
- /* update the number of active compute rings */
- adev->gfx.num_compute_rings =
- bitmap_weight(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
-
- /* If you hit this case and edited the policy, you probably just
- * need to increase AMDGPU_MAX_COMPUTE_RINGS */
- if (WARN_ON(adev->gfx.num_compute_rings > AMDGPU_MAX_COMPUTE_RINGS))
- adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
+ dev_dbg(adev->dev, "mec queue bitmap weight=%d\n", bitmap_weight(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES));
}
void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
@@ -571,8 +560,14 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
if (enable && !adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
schedule_delayed_work(&adev->gfx.gfx_off_delay_work, GFX_OFF_DELAY_ENABLE);
} else if (!enable && adev->gfx.gfx_off_state) {
- if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false))
+ if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) {
adev->gfx.gfx_off_state = false;
+
+ if (adev->gfx.funcs->init_spm_golden) {
+ dev_dbg(adev->dev, "GFXOFF is disabled, re-init SPM golden settings\n");
+ amdgpu_gfx_init_spm_golden(adev);
+ }
+ }
}
mutex_unlock(&adev->gfx.gfx_off_mutex);
@@ -698,6 +693,9 @@ uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
struct amdgpu_ring *ring = &kiq->ring;
+ if (adev->in_pci_err_recovery)
+ return 0;
+
BUG_ON(!ring->funcs->emit_rreg);
spin_lock_irqsave(&kiq->ring_lock, flags);
@@ -724,7 +722,7 @@ uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
*
* also don't wait anymore for IRQ context
* */
- if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
+ if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
goto failed_kiq_read;
might_sleep();
@@ -748,7 +746,7 @@ failed_unlock:
failed_kiq_read:
if (reg_val_offs)
amdgpu_device_wb_free(adev, reg_val_offs);
- pr_err("failed to read reg:%x\n", reg);
+ dev_err(adev->dev, "failed to read reg:%x\n", reg);
return ~0;
}
@@ -762,6 +760,9 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
BUG_ON(!ring->funcs->emit_wreg);
+ if (adev->in_pci_err_recovery)
+ return;
+
spin_lock_irqsave(&kiq->ring_lock, flags);
amdgpu_ring_alloc(ring, 32);
amdgpu_ring_emit_wreg(ring, reg, v);
@@ -782,7 +783,7 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
*
* also don't wait anymore for IRQ context
* */
- if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
+ if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
goto failed_kiq_write;
might_sleep();
@@ -801,5 +802,5 @@ failed_undo:
amdgpu_ring_undo(ring);
spin_unlock_irqrestore(&kiq->ring_lock, flags);
failed_kiq_write:
- pr_err("failed to write reg:%x\n", reg);
+ dev_err(adev->dev, "failed to write reg:%x\n", reg);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 1e7a2b0997c5..258498cbf1eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -216,6 +216,8 @@ struct amdgpu_gfx_funcs {
int (*ras_error_inject)(struct amdgpu_device *adev, void *inject_if);
int (*query_ras_error_count) (struct amdgpu_device *adev, void *ras_error_status);
void (*reset_ras_error_count) (struct amdgpu_device *adev);
+ void (*init_spm_golden)(struct amdgpu_device *adev);
+ void (*query_ras_error_status) (struct amdgpu_device *adev);
};
struct sq_work {
@@ -324,6 +326,7 @@ struct amdgpu_gfx {
#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
#define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance))
#define amdgpu_gfx_select_me_pipe_q(adev, me, pipe, q, vmid) (adev)->gfx.funcs->select_me_pipe_q((adev), (me), (pipe), (q), (vmid))
+#define amdgpu_gfx_init_spm_golden(adev) (adev)->gfx.funcs->init_spm_golden((adev))
/**
* amdgpu_gfx_create_bitmask - create a bitmask
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h
new file mode 100644
index 000000000000..66ebc2e3b2ad
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __AMDGPU_GFXHUB_H__
+#define __AMDGPU_GFXHUB_H__
+
+struct amdgpu_gfxhub_funcs {
+ u64 (*get_fb_location)(struct amdgpu_device *adev);
+ u64 (*get_mc_fb_offset)(struct amdgpu_device *adev);
+ void (*setup_vm_pt_regs)(struct amdgpu_device *adev, uint32_t vmid,
+ uint64_t page_table_base);
+ int (*gart_enable)(struct amdgpu_device *adev);
+
+ void (*gart_disable)(struct amdgpu_device *adev);
+ void (*set_fault_enable_default)(struct amdgpu_device *adev, bool value);
+ void (*init)(struct amdgpu_device *adev);
+ int (*get_xgmi_info)(struct amdgpu_device *adev);
+};
+
+struct amdgpu_gfxhub {
+ const struct amdgpu_gfxhub_funcs *funcs;
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 34cbd6f6a56b..36604d751d62 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -27,6 +27,7 @@
#include <linux/io-64-nonatomic-lo-hi.h>
#include "amdgpu.h"
+#include "amdgpu_gmc.h"
#include "amdgpu_ras.h"
#include "amdgpu_xgmi.h"
@@ -411,3 +412,102 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
break;
}
}
+
+/**
+ * amdgpu_noretry_set -- set per asic noretry defaults
+ * @adev: amdgpu_device pointer
+ *
+ * Set a per asic default for the no-retry parameter.
+ *
+ */
+void amdgpu_gmc_noretry_set(struct amdgpu_device *adev)
+{
+ struct amdgpu_gmc *gmc = &adev->gmc;
+
+ switch (adev->asic_type) {
+ case CHIP_RAVEN:
+ /* Raven currently has issues with noretry
+ * regardless of what we decide for other
+ * asics, we should leave raven with
+ * noretry = 0 until we root cause the
+ * issues.
+ */
+ if (amdgpu_noretry == -1)
+ gmc->noretry = 0;
+ else
+ gmc->noretry = amdgpu_noretry;
+ break;
+ default:
+ /* default this to 0 for now, but we may want
+ * to change this in the future for certain
+ * GPUs as it can increase performance in
+ * certain cases.
+ */
+ if (amdgpu_noretry == -1)
+ gmc->noretry = 0;
+ else
+ gmc->noretry = amdgpu_noretry;
+ break;
+ }
+}
+
+void amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type,
+ bool enable)
+{
+ struct amdgpu_vmhub *hub;
+ u32 tmp, reg, i;
+
+ hub = &adev->vmhub[hub_type];
+ for (i = 0; i < 16; i++) {
+ reg = hub->vm_context0_cntl + hub->ctx_distance * i;
+
+ tmp = RREG32(reg);
+ if (enable)
+ tmp |= hub->vm_cntx_cntl_vm_fault;
+ else
+ tmp &= ~hub->vm_cntx_cntl_vm_fault;
+
+ WREG32(reg, tmp);
+ }
+}
+
+void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev)
+{
+ unsigned size;
+
+ /*
+ * TODO:
+ * Currently there is a bug where some memory client outside
+ * of the driver writes to first 8M of VRAM on S3 resume,
+ * this overrides GART which by default gets placed in first 8M and
+ * causes VM_FAULTS once GTT is accessed.
+ * Keep the stolen memory reservation until the while this is not solved.
+ */
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ case CHIP_RAVEN:
+ case CHIP_RENOIR:
+ adev->mman.keep_stolen_vga_memory = true;
+ break;
+ default:
+ adev->mman.keep_stolen_vga_memory = false;
+ break;
+ }
+
+ if (!amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_DCE))
+ size = 0;
+ else
+ size = amdgpu_gmc_get_vbios_fb_size(adev);
+
+ /* set to 0 if the pre-OS buffer uses up most of vram */
+ if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
+ size = 0;
+
+ if (size > AMDGPU_VBIOS_VGA_ALLOCATION) {
+ adev->mman.stolen_vga_size = AMDGPU_VBIOS_VGA_ALLOCATION;
+ adev->mman.stolen_extended_size = size - adev->mman.stolen_vga_size;
+ } else {
+ adev->mman.stolen_vga_size = size;
+ adev->mman.stolen_extended_size = 0;
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index acdb61cfa24c..aa0c83776ce0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -74,6 +74,12 @@ struct amdgpu_gmc_fault {
/*
* VMHUB structures, functions & helpers
*/
+struct amdgpu_vmhub_funcs {
+ void (*print_l2_protection_fault_status)(struct amdgpu_device *adev,
+ uint32_t status);
+ uint32_t (*get_invalidate_req)(unsigned int vmid, uint32_t flush_type);
+};
+
struct amdgpu_vmhub {
uint32_t ctx0_ptb_addr_lo32;
uint32_t ctx0_ptb_addr_hi32;
@@ -92,6 +98,10 @@ struct amdgpu_vmhub {
uint32_t ctx_addr_distance; /* include LO32/HI32 */
uint32_t eng_distance;
uint32_t eng_addr_distance; /* include LO32/HI32 */
+
+ uint32_t vm_cntx_cntl_vm_fault;
+
+ const struct amdgpu_vmhub_funcs *vmhub_funcs;
};
/*
@@ -121,6 +131,8 @@ struct amdgpu_gmc_funcs {
void (*get_vm_pte)(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *mapping,
uint64_t *flags);
+ /* get the amount of memory used by the vbios for pre-OS console */
+ unsigned int (*get_vbios_fb_size)(struct amdgpu_device *adev);
};
struct amdgpu_xgmi {
@@ -203,7 +215,6 @@ struct amdgpu_gmc {
uint8_t vram_vendor;
uint32_t srbm_soft_reset;
bool prt_warning;
- uint64_t stolen_size;
uint32_t sdpif_register;
/* apertures */
u64 shared_aperture_start;
@@ -228,6 +239,7 @@ struct amdgpu_gmc {
struct amdgpu_xgmi xgmi;
struct amdgpu_irq_src ecc_irq;
+ int noretry;
};
#define amdgpu_gmc_flush_gpu_tlb(adev, vmid, vmhub, type) ((adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (vmhub), (type)))
@@ -239,6 +251,7 @@ struct amdgpu_gmc {
#define amdgpu_gmc_map_mtype(adev, flags) (adev)->gmc.gmc_funcs->map_mtype((adev),(flags))
#define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags))
#define amdgpu_gmc_get_vm_pte(adev, mapping, flags) (adev)->gmc.gmc_funcs->get_vm_pte((adev), (mapping), (flags))
+#define amdgpu_gmc_get_vbios_fb_size(adev) (adev)->gmc.gmc_funcs->get_vbios_fb_size((adev))
/**
* amdgpu_gmc_vram_full_visible - Check if full VRAM is visible through the BAR
@@ -288,5 +301,12 @@ void amdgpu_gmc_ras_fini(struct amdgpu_device *adev);
int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev);
extern void amdgpu_gmc_tmz_set(struct amdgpu_device *adev);
+extern void amdgpu_gmc_noretry_set(struct amdgpu_device *adev);
+
+extern void
+amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type,
+ bool enable);
+
+void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 77fae40197ab..731f3aa2e6ba 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -24,11 +24,10 @@
#include "amdgpu.h"
-struct amdgpu_gtt_mgr {
- struct drm_mm mm;
- spinlock_t lock;
- atomic64_t available;
-};
+static inline struct amdgpu_gtt_mgr *to_gtt_mgr(struct ttm_resource_manager *man)
+{
+ return container_of(man, struct amdgpu_gtt_mgr, manager);
+}
struct amdgpu_gtt_node {
struct drm_mm_node node;
@@ -47,10 +46,11 @@ static ssize_t amdgpu_mem_info_gtt_total_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
return snprintf(buf, PAGE_SIZE, "%llu\n",
- (adev->mman.bdev.man[TTM_PL_TT].size) * PAGE_SIZE);
+ man->size * PAGE_SIZE);
}
/**
@@ -65,10 +65,11 @@ static ssize_t amdgpu_mem_info_gtt_used_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
return snprintf(buf, PAGE_SIZE, "%llu\n",
- amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]));
+ amdgpu_gtt_mgr_usage(man));
}
static DEVICE_ATTR(mem_info_gtt_total, S_IRUGO,
@@ -76,32 +77,32 @@ static DEVICE_ATTR(mem_info_gtt_total, S_IRUGO,
static DEVICE_ATTR(mem_info_gtt_used, S_IRUGO,
amdgpu_mem_info_gtt_used_show, NULL);
+static const struct ttm_resource_manager_func amdgpu_gtt_mgr_func;
/**
* amdgpu_gtt_mgr_init - init GTT manager and DRM MM
*
- * @man: TTM memory type manager
- * @p_size: maximum size of GTT
+ * @adev: amdgpu_device pointer
+ * @gtt_size: maximum size of GTT
*
* Allocate and initialize the GTT manager.
*/
-static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
- unsigned long p_size)
+int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
- struct amdgpu_gtt_mgr *mgr;
+ struct amdgpu_gtt_mgr *mgr = &adev->mman.gtt_mgr;
+ struct ttm_resource_manager *man = &mgr->manager;
uint64_t start, size;
int ret;
- mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
- if (!mgr)
- return -ENOMEM;
+ man->use_tt = true;
+ man->func = &amdgpu_gtt_mgr_func;
+
+ ttm_resource_manager_init(man, gtt_size >> PAGE_SHIFT);
start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS;
size = (adev->gmc.gart_size >> PAGE_SHIFT) - start;
drm_mm_init(&mgr->mm, start, size);
spin_lock_init(&mgr->lock);
- atomic64_set(&mgr->available, p_size);
- man->priv = mgr;
+ atomic64_set(&mgr->available, gtt_size >> PAGE_SHIFT);
ret = device_create_file(adev->dev, &dev_attr_mem_info_gtt_total);
if (ret) {
@@ -114,31 +115,40 @@ static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
return ret;
}
+ ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_TT, &mgr->manager);
+ ttm_resource_manager_set_used(man, true);
return 0;
}
/**
* amdgpu_gtt_mgr_fini - free and destroy GTT manager
*
- * @man: TTM memory type manager
+ * @adev: amdgpu_device pointer
*
* Destroy and free the GTT manager, returns -EBUSY if ranges are still
* allocated inside it.
*/
-static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man)
+void amdgpu_gtt_mgr_fini(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
- struct amdgpu_gtt_mgr *mgr = man->priv;
+ struct amdgpu_gtt_mgr *mgr = &adev->mman.gtt_mgr;
+ struct ttm_resource_manager *man = &mgr->manager;
+ int ret;
+
+ ttm_resource_manager_set_used(man, false);
+
+ ret = ttm_resource_manager_force_list_clean(&adev->mman.bdev, man);
+ if (ret)
+ return;
+
spin_lock(&mgr->lock);
drm_mm_takedown(&mgr->mm);
spin_unlock(&mgr->lock);
- kfree(mgr);
- man->priv = NULL;
device_remove_file(adev->dev, &dev_attr_mem_info_gtt_total);
device_remove_file(adev->dev, &dev_attr_mem_info_gtt_used);
- return 0;
+ ttm_resource_manager_cleanup(man);
+ ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_TT, NULL);
}
/**
@@ -148,7 +158,7 @@ static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man)
*
* Check if a mem object has already address space allocated.
*/
-bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem)
+bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem)
{
return mem->mm_node != NULL;
}
@@ -163,12 +173,12 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem)
*
* Dummy, allocate the node but no space for it yet.
*/
-static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
+static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
struct ttm_buffer_object *tbo,
const struct ttm_place *place,
- struct ttm_mem_reg *mem)
+ struct ttm_resource *mem)
{
- struct amdgpu_gtt_mgr *mgr = man->priv;
+ struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
struct amdgpu_gtt_node *node;
int r;
@@ -226,10 +236,10 @@ err_out:
*
* Free the allocated GTT again.
*/
-static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man,
- struct ttm_mem_reg *mem)
+static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
+ struct ttm_resource *mem)
{
- struct amdgpu_gtt_mgr *mgr = man->priv;
+ struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
struct amdgpu_gtt_node *node = mem->mm_node;
if (node) {
@@ -249,17 +259,17 @@ static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man,
*
* Return how many bytes are used in the GTT domain
*/
-uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man)
+uint64_t amdgpu_gtt_mgr_usage(struct ttm_resource_manager *man)
{
- struct amdgpu_gtt_mgr *mgr = man->priv;
+ struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
s64 result = man->size - atomic64_read(&mgr->available);
return (result > 0 ? result : 0) * PAGE_SIZE;
}
-int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man)
+int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man)
{
- struct amdgpu_gtt_mgr *mgr = man->priv;
+ struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
struct amdgpu_gtt_node *node;
struct drm_mm_node *mm_node;
int r = 0;
@@ -284,10 +294,10 @@ int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man)
*
* Dump the table content using printk.
*/
-static void amdgpu_gtt_mgr_debug(struct ttm_mem_type_manager *man,
+static void amdgpu_gtt_mgr_debug(struct ttm_resource_manager *man,
struct drm_printer *printer)
{
- struct amdgpu_gtt_mgr *mgr = man->priv;
+ struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
spin_lock(&mgr->lock);
drm_mm_print(&mgr->mm, printer);
@@ -298,10 +308,8 @@ static void amdgpu_gtt_mgr_debug(struct ttm_mem_type_manager *man,
amdgpu_gtt_mgr_usage(man) >> 20);
}
-const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func = {
- .init = amdgpu_gtt_mgr_init,
- .takedown = amdgpu_gtt_mgr_fini,
- .get_node = amdgpu_gtt_mgr_new,
- .put_node = amdgpu_gtt_mgr_del,
+static const struct ttm_resource_manager_func amdgpu_gtt_mgr_func = {
+ .alloc = amdgpu_gtt_mgr_new,
+ .free = amdgpu_gtt_mgr_del,
.debug = amdgpu_gtt_mgr_debug
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
index 70dbe343f51d..47cad23a6b9e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
@@ -40,7 +40,7 @@
static int amdgpu_i2c_pre_xfer(struct i2c_adapter *i2c_adap)
{
struct amdgpu_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
- struct amdgpu_device *adev = i2c->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(i2c->dev);
struct amdgpu_i2c_bus_rec *rec = &i2c->rec;
uint32_t temp;
@@ -82,7 +82,7 @@ static int amdgpu_i2c_pre_xfer(struct i2c_adapter *i2c_adap)
static void amdgpu_i2c_post_xfer(struct i2c_adapter *i2c_adap)
{
struct amdgpu_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
- struct amdgpu_device *adev = i2c->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(i2c->dev);
struct amdgpu_i2c_bus_rec *rec = &i2c->rec;
uint32_t temp;
@@ -101,7 +101,7 @@ static void amdgpu_i2c_post_xfer(struct i2c_adapter *i2c_adap)
static int amdgpu_i2c_get_clock(void *i2c_priv)
{
struct amdgpu_i2c_chan *i2c = i2c_priv;
- struct amdgpu_device *adev = i2c->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(i2c->dev);
struct amdgpu_i2c_bus_rec *rec = &i2c->rec;
uint32_t val;
@@ -116,7 +116,7 @@ static int amdgpu_i2c_get_clock(void *i2c_priv)
static int amdgpu_i2c_get_data(void *i2c_priv)
{
struct amdgpu_i2c_chan *i2c = i2c_priv;
- struct amdgpu_device *adev = i2c->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(i2c->dev);
struct amdgpu_i2c_bus_rec *rec = &i2c->rec;
uint32_t val;
@@ -130,7 +130,7 @@ static int amdgpu_i2c_get_data(void *i2c_priv)
static void amdgpu_i2c_set_clock(void *i2c_priv, int clock)
{
struct amdgpu_i2c_chan *i2c = i2c_priv;
- struct amdgpu_device *adev = i2c->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(i2c->dev);
struct amdgpu_i2c_bus_rec *rec = &i2c->rec;
uint32_t val;
@@ -143,7 +143,7 @@ static void amdgpu_i2c_set_clock(void *i2c_priv, int clock)
static void amdgpu_i2c_set_data(void *i2c_priv, int data)
{
struct amdgpu_i2c_chan *i2c = i2c_priv;
- struct amdgpu_device *adev = i2c->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(i2c->dev);
struct amdgpu_i2c_bus_rec *rec = &i2c->rec;
uint32_t val;
@@ -253,7 +253,7 @@ void amdgpu_i2c_add(struct amdgpu_device *adev,
const struct amdgpu_i2c_bus_rec *rec,
const char *name)
{
- struct drm_device *dev = adev->ddev;
+ struct drm_device *dev = adev_to_drm(adev);
int i;
for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index dcd492170598..2f53fa0ae9a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -445,7 +445,7 @@ static int amdgpu_debugfs_sa_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
seq_printf(m, "--------------------- DELAYED --------------------- \n");
amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_DELAYED],
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 7521f4ab55de..6e9a9e5dbea0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -43,7 +43,7 @@ static DEFINE_IDA(amdgpu_pasid_ida);
/* Helper to free pasid from a fence callback */
struct amdgpu_pasid_cb {
struct dma_fence_cb cb;
- unsigned int pasid;
+ u32 pasid;
};
/**
@@ -79,7 +79,7 @@ int amdgpu_pasid_alloc(unsigned int bits)
* amdgpu_pasid_free - Free a PASID
* @pasid: PASID to free
*/
-void amdgpu_pasid_free(unsigned int pasid)
+void amdgpu_pasid_free(u32 pasid)
{
trace_amdgpu_pasid_freed(pasid);
ida_simple_remove(&amdgpu_pasid_ida, pasid);
@@ -105,7 +105,7 @@ static void amdgpu_pasid_free_cb(struct dma_fence *fence,
* Free the pasid only after all the fences in resv are signaled.
*/
void amdgpu_pasid_free_delayed(struct dma_resv *resv,
- unsigned int pasid)
+ u32 pasid)
{
struct dma_fence *fence, **fences;
struct amdgpu_pasid_cb *cb;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
index 8e58325bbca2..0c3b4fa1f936 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
@@ -71,9 +71,9 @@ struct amdgpu_vmid_mgr {
};
int amdgpu_pasid_alloc(unsigned int bits);
-void amdgpu_pasid_free(unsigned int pasid);
+void amdgpu_pasid_free(u32 pasid);
void amdgpu_pasid_free_delayed(struct dma_resv *resv,
- unsigned int pasid);
+ u32 pasid);
bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
struct amdgpu_vmid *id);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 0cc4c67f95f7..300ac73b4738 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -85,7 +85,7 @@ static void amdgpu_hotplug_work_func(struct work_struct *work)
{
struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
hotplug_work);
- struct drm_device *dev = adev->ddev;
+ struct drm_device *dev = adev_to_drm(adev);
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
struct drm_connector_list_iter iter;
@@ -151,7 +151,7 @@ void amdgpu_irq_disable_all(struct amdgpu_device *adev)
irqreturn_t amdgpu_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
irqreturn_t ret;
ret = amdgpu_ih_process(adev, &adev->irq.ih);
@@ -268,9 +268,9 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
if (!adev->enable_virtual_display)
/* Disable vblank IRQs aggressively for power-saving */
/* XXX: can this be enabled for DC? */
- adev->ddev->vblank_disable_immediate = true;
+ adev_to_drm(adev)->vblank_disable_immediate = true;
- r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc);
+ r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
if (r)
return r;
@@ -284,14 +284,14 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
adev->irq.installed = true;
/* Use vector 0 for MSI-X */
- r = drm_irq_install(adev->ddev, pci_irq_vector(adev->pdev, 0));
+ r = drm_irq_install(adev_to_drm(adev), pci_irq_vector(adev->pdev, 0));
if (r) {
adev->irq.installed = false;
if (!amdgpu_device_has_dc_support(adev))
flush_work(&adev->hotplug_work);
return r;
}
- adev->ddev->max_vblank_count = 0x00ffffff;
+ adev_to_drm(adev)->max_vblank_count = 0x00ffffff;
DRM_DEBUG("amdgpu: irq initialized.\n");
return 0;
@@ -311,7 +311,7 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
unsigned i, j;
if (adev->irq.installed) {
- drm_irq_uninstall(adev->ddev);
+ drm_irq_uninstall(adev_to_drm(adev));
adev->irq.installed = false;
if (adev->irq.msi_enabled)
pci_free_irq_vectors(adev->pdev);
@@ -522,7 +522,7 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
unsigned type)
{
- if (!adev->ddev->irq_enabled)
+ if (!adev_to_drm(adev)->irq_enabled)
return -ENOENT;
if (type >= src->num_types)
@@ -552,7 +552,7 @@ int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
unsigned type)
{
- if (!adev->ddev->irq_enabled)
+ if (!adev_to_drm(adev)->irq_enabled)
return -ENOENT;
if (type >= src->num_types)
@@ -583,7 +583,7 @@ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
unsigned type)
{
- if (!adev->ddev->irq_enabled)
+ if (!adev_to_drm(adev)->irq_enabled)
return false;
if (type >= src->num_types)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 937029ad5271..dcfe8a3b03ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -251,7 +251,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
int i;
/* Signal all jobs not yet scheduled */
- for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
+ for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
struct drm_sched_rq *rq = &sched->sched_rq[i];
if (!rq)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 0047da06041f..efda38349a03 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -78,7 +78,7 @@ void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev)
*/
void amdgpu_driver_unload_kms(struct drm_device *dev)
{
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
if (adev == NULL)
return;
@@ -86,7 +86,7 @@ void amdgpu_driver_unload_kms(struct drm_device *dev)
amdgpu_unregister_gpu_instance(adev);
if (adev->rmmio == NULL)
- goto done_free;
+ return;
if (adev->runpm) {
pm_runtime_get_sync(dev->dev);
@@ -94,12 +94,7 @@ void amdgpu_driver_unload_kms(struct drm_device *dev)
}
amdgpu_acpi_fini(adev);
-
amdgpu_device_fini(adev);
-
-done_free:
- kfree(adev);
- dev->dev_private = NULL;
}
void amdgpu_register_gpu_instance(struct amdgpu_device *adev)
@@ -130,22 +125,18 @@ void amdgpu_register_gpu_instance(struct amdgpu_device *adev)
/**
* amdgpu_driver_load_kms - Main load function for KMS.
*
- * @dev: drm dev pointer
+ * @adev: pointer to struct amdgpu_device
* @flags: device flags
*
* This is the main load function for KMS (all asics).
* Returns 0 on success, error on failure.
*/
-int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
+int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
{
- struct amdgpu_device *adev;
+ struct drm_device *dev;
int r, acpi_status;
- adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL);
- if (adev == NULL) {
- return -ENOMEM;
- }
- dev->dev_private = (void *)adev;
+ dev = adev_to_drm(adev);
if (amdgpu_has_atpx() &&
(amdgpu_is_atpx_hybrid() ||
@@ -160,7 +151,7 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
* properly initialize the GPU MC controller and permit
* VRAM allocation
*/
- r = amdgpu_device_init(adev, dev, dev->pdev, flags);
+ r = amdgpu_device_init(adev, flags);
if (r) {
dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
goto out;
@@ -179,13 +170,14 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
case CHIP_VEGA20:
case CHIP_ARCTURUS:
case CHIP_SIENNA_CICHLID:
+ case CHIP_NAVY_FLOUNDER:
/* enable runpm if runpm=1 */
if (amdgpu_runtime_pm > 0)
adev->runpm = true;
break;
case CHIP_VEGA10:
/* turn runpm on if noretry=0 */
- if (!amdgpu_noretry)
+ if (!adev->gmc.noretry)
adev->runpm = true;
break;
default:
@@ -290,14 +282,25 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
fw_info->feature = 0;
break;
case AMDGPU_INFO_FW_TA:
- if (query_fw->index > 1)
- return -EINVAL;
- if (query_fw->index == 0) {
+ switch (query_fw->index) {
+ case 0:
fw_info->ver = adev->psp.ta_fw_version;
fw_info->feature = adev->psp.ta_xgmi_ucode_version;
- } else {
+ break;
+ case 1:
fw_info->ver = adev->psp.ta_fw_version;
fw_info->feature = adev->psp.ta_ras_ucode_version;
+ break;
+ case 2:
+ fw_info->ver = adev->psp.ta_fw_version;
+ fw_info->feature = adev->psp.ta_hdcp_ucode_version;
+ break;
+ case 3:
+ fw_info->ver = adev->psp.ta_fw_version;
+ fw_info->feature = adev->psp.ta_dtm_ucode_version;
+ break;
+ default:
+ return -EINVAL;
}
break;
case AMDGPU_INFO_FW_SDMA:
@@ -479,7 +482,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
*/
static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_amdgpu_info *info = data;
struct amdgpu_mode_info *minfo = &adev->mode_info;
void __user *out = (void __user *)(uintptr_t)info->return_pointer;
@@ -594,13 +597,13 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
ui64 = atomic64_read(&adev->num_vram_cpu_page_faults);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_VRAM_USAGE:
- ui64 = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
+ ui64 = amdgpu_vram_mgr_usage(ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM));
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_VIS_VRAM_USAGE:
- ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
+ ui64 = amdgpu_vram_mgr_vis_usage(ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM));
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_GTT_USAGE:
- ui64 = amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
+ ui64 = amdgpu_gtt_mgr_usage(ttm_manager_type(&adev->mman.bdev, TTM_PL_TT));
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_GDS_CONFIG: {
struct drm_amdgpu_info_gds gds_info;
@@ -623,7 +626,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
min(adev->gmc.visible_vram_size -
atomic64_read(&adev->visible_pin_size),
vram_gtt.vram_size);
- vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size;
+ vram_gtt.gtt_size = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT)->size;
vram_gtt.gtt_size *= PAGE_SIZE;
vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size);
return copy_to_user(out, &vram_gtt,
@@ -631,14 +634,17 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
}
case AMDGPU_INFO_MEMORY: {
struct drm_amdgpu_memory_info mem;
-
+ struct ttm_resource_manager *vram_man =
+ ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
+ struct ttm_resource_manager *gtt_man =
+ ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
memset(&mem, 0, sizeof(mem));
mem.vram.total_heap_size = adev->gmc.real_vram_size;
mem.vram.usable_heap_size = adev->gmc.real_vram_size -
atomic64_read(&adev->vram_pin_size) -
AMDGPU_VM_RESERVED_VRAM;
mem.vram.heap_usage =
- amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
+ amdgpu_vram_mgr_usage(vram_man);
mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
mem.cpu_accessible_vram.total_heap_size =
@@ -648,16 +654,16 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
atomic64_read(&adev->visible_pin_size),
mem.vram.usable_heap_size);
mem.cpu_accessible_vram.heap_usage =
- amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
+ amdgpu_vram_mgr_vis_usage(vram_man);
mem.cpu_accessible_vram.max_allocation =
mem.cpu_accessible_vram.usable_heap_size * 3 / 4;
- mem.gtt.total_heap_size = adev->mman.bdev.man[TTM_PL_TT].size;
+ mem.gtt.total_heap_size = gtt_man->size;
mem.gtt.total_heap_size *= PAGE_SIZE;
mem.gtt.usable_heap_size = mem.gtt.total_heap_size -
atomic64_read(&adev->gart_pin_size);
mem.gtt.heap_usage =
- amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
+ amdgpu_gtt_mgr_usage(gtt_man);
mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
return copy_to_user(out, &mem,
@@ -678,8 +684,12 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
* in the bitfields */
if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK)
se_num = 0xffffffff;
+ else if (se_num >= AMDGPU_GFX_MAX_SE)
+ return -EINVAL;
if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
sh_num = 0xffffffff;
+ else if (sh_num >= AMDGPU_GFX_MAX_SH_PER_SE)
+ return -EINVAL;
if (info->read_mmr_reg.count > 128)
return -EINVAL;
@@ -737,6 +747,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
if (amdgpu_mcbp || amdgpu_sriov_vf(adev))
dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
+ if (amdgpu_is_tmz(adev))
+ dev_info.ids_flags |= AMDGPU_IDS_FLAGS_TMZ;
vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
vm_size -= AMDGPU_VA_RESERVED_SIZE;
@@ -990,7 +1002,7 @@ void amdgpu_driver_lastclose_kms(struct drm_device *dev)
*/
int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
{
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_fpriv *fpriv;
int r, pasid;
@@ -1075,11 +1087,11 @@ pm_put:
void amdgpu_driver_postclose_kms(struct drm_device *dev,
struct drm_file *file_priv)
{
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
struct amdgpu_bo_list *list;
struct amdgpu_bo *pd;
- unsigned int pasid;
+ u32 pasid;
int handle;
if (!fpriv)
@@ -1140,7 +1152,7 @@ u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
unsigned int pipe = crtc->index;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
int vpos, hpos, stat;
u32 count;
@@ -1208,7 +1220,7 @@ int amdgpu_enable_vblank_kms(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
unsigned int pipe = crtc->index;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
return amdgpu_irq_get(adev, &adev->crtc_irq, idx);
@@ -1225,7 +1237,7 @@ void amdgpu_disable_vblank_kms(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
unsigned int pipe = crtc->index;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
amdgpu_irq_put(adev, &adev->crtc_irq, idx);
@@ -1261,7 +1273,7 @@ static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_amdgpu_info_firmware fw_info;
struct drm_amdgpu_query_fw query_fw;
struct atom_context *ctx = adev->mode_info.atom_context;
@@ -1384,13 +1396,31 @@ static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
fw_info.feature, fw_info.ver);
query_fw.fw_type = AMDGPU_INFO_FW_TA;
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < 4; i++) {
query_fw.index = i;
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
if (ret)
continue;
- seq_printf(m, "TA %s feature version: %u, firmware version: 0x%08x\n",
- i ? "RAS" : "XGMI", fw_info.feature, fw_info.ver);
+ switch (query_fw.index) {
+ case 0:
+ seq_printf(m, "TA %s feature version: 0x%08x, firmware version: 0x%08x\n",
+ "RAS", fw_info.feature, fw_info.ver);
+ break;
+ case 1:
+ seq_printf(m, "TA %s feature version: 0x%08x, firmware version: 0x%08x\n",
+ "XGMI", fw_info.feature, fw_info.ver);
+ break;
+ case 2:
+ seq_printf(m, "TA %s feature version: 0x%08x, firmware version: 0x%08x\n",
+ "HDCP", fw_info.feature, fw_info.ver);
+ break;
+ case 3:
+ seq_printf(m, "TA %s feature version: 0x%08x, firmware version: 0x%08x\n",
+ "DTM", fw_info.feature, fw_info.ver);
+ break;
+ default:
+ return -EINVAL;
+ }
}
/* SMC */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
index e89fb35fec71..1ae9bdae7311 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
@@ -27,6 +27,20 @@ struct amdgpu_mmhub_funcs {
void (*query_ras_error_count)(struct amdgpu_device *adev,
void *ras_error_status);
void (*reset_ras_error_count)(struct amdgpu_device *adev);
+ u64 (*get_fb_location)(struct amdgpu_device *adev);
+ void (*init)(struct amdgpu_device *adev);
+ int (*gart_enable)(struct amdgpu_device *adev);
+ void (*set_fault_enable_default)(struct amdgpu_device *adev,
+ bool value);
+ void (*gart_disable)(struct amdgpu_device *adev);
+ int (*set_clockgating)(struct amdgpu_device *adev,
+ enum amd_clockgating_state state);
+ void (*get_clockgating)(struct amdgpu_device *adev, u32 *flags);
+ void (*setup_vm_pt_regs)(struct amdgpu_device *adev, uint32_t vmid,
+ uint64_t page_table_base);
+ void (*update_power_gating)(struct amdgpu_device *adev,
+ bool enable);
+ void (*query_ras_error_status)(struct amdgpu_device *adev);
};
struct amdgpu_mmhub {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 37ba07e2feb5..a04decb934b0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -46,6 +46,7 @@
#include <drm/drm_dp_mst_helper.h>
#include "modules/inc/mod_freesync.h"
+#include "amdgpu_dm_irq_params.h"
struct amdgpu_bo;
struct amdgpu_device;
@@ -404,7 +405,8 @@ struct amdgpu_crtc {
struct amdgpu_flip_work *pflip_works;
enum amdgpu_flip_status pflip_status;
int deferred_flip_completion;
- u32 last_flip_vblank;
+ /* parameters access from DM IRQ handler */
+ struct dm_irq_params dm_irq_params;
/* pll sharing */
struct amdgpu_atom_ss ss;
bool ss_enabled;
@@ -469,6 +471,7 @@ struct amdgpu_encoder {
struct amdgpu_connector_atom_dig {
/* displayport */
u8 dpcd[DP_RECEIVER_CAP_SIZE];
+ u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
u8 dp_sink_type;
int dp_clock;
int dp_lane_count;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 5ac7b5561475..ac043baac05d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -136,8 +136,8 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
places[c].fpfn = 0;
places[c].lpfn = 0;
- places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_VRAM;
+ places[c].mem_type = TTM_PL_VRAM;
+ places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED;
if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
places[c].lpfn = visible_pfn;
@@ -152,7 +152,8 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
if (domain & AMDGPU_GEM_DOMAIN_GTT) {
places[c].fpfn = 0;
places[c].lpfn = 0;
- places[c].flags = TTM_PL_FLAG_TT;
+ places[c].mem_type = TTM_PL_TT;
+ places[c].flags = 0;
if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
places[c].flags |= TTM_PL_FLAG_WC |
TTM_PL_FLAG_UNCACHED;
@@ -164,7 +165,8 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
if (domain & AMDGPU_GEM_DOMAIN_CPU) {
places[c].fpfn = 0;
places[c].lpfn = 0;
- places[c].flags = TTM_PL_FLAG_SYSTEM;
+ places[c].mem_type = TTM_PL_SYSTEM;
+ places[c].flags = 0;
if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
places[c].flags |= TTM_PL_FLAG_WC |
TTM_PL_FLAG_UNCACHED;
@@ -176,28 +178,32 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
if (domain & AMDGPU_GEM_DOMAIN_GDS) {
places[c].fpfn = 0;
places[c].lpfn = 0;
- places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GDS;
+ places[c].mem_type = AMDGPU_PL_GDS;
+ places[c].flags = TTM_PL_FLAG_UNCACHED;
c++;
}
if (domain & AMDGPU_GEM_DOMAIN_GWS) {
places[c].fpfn = 0;
places[c].lpfn = 0;
- places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GWS;
+ places[c].mem_type = AMDGPU_PL_GWS;
+ places[c].flags = TTM_PL_FLAG_UNCACHED;
c++;
}
if (domain & AMDGPU_GEM_DOMAIN_OA) {
places[c].fpfn = 0;
places[c].lpfn = 0;
- places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_OA;
+ places[c].mem_type = AMDGPU_PL_OA;
+ places[c].flags = TTM_PL_FLAG_UNCACHED;
c++;
}
if (!c) {
places[c].fpfn = 0;
places[c].lpfn = 0;
- places[c].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ places[c].mem_type = TTM_PL_SYSTEM;
+ places[c].flags = TTM_PL_MASK_CACHING;
c++;
}
@@ -374,6 +380,9 @@ int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
if (r)
return r;
+ if ((*bo_ptr) == NULL)
+ return 0;
+
/*
* Remove the original mem node and create a new one at the request
* position.
@@ -381,7 +390,7 @@ int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
if (cpu_addr)
amdgpu_bo_kunmap(*bo_ptr);
- ttm_bo_mem_put(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.mem);
+ ttm_resource_free(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.mem);
for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) {
(*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT;
@@ -442,14 +451,14 @@ void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
unsigned long size, u32 domain)
{
- struct ttm_mem_type_manager *man = NULL;
+ struct ttm_resource_manager *man = NULL;
/*
* If GTT is part of requested domains the check must succeed to
* allow fall back to GTT
*/
if (domain & AMDGPU_GEM_DOMAIN_GTT) {
- man = &adev->mman.bdev.man[TTM_PL_TT];
+ man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
if (size < (man->size << PAGE_SHIFT))
return true;
@@ -458,7 +467,7 @@ static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
}
if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
- man = &adev->mman.bdev.man[TTM_PL_VRAM];
+ man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
if (size < (man->size << PAGE_SHIFT))
return true;
@@ -552,7 +561,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
if (bo == NULL)
return -ENOMEM;
- drm_gem_private_object_init(adev->ddev, &bo->tbo.base, size);
+ drm_gem_private_object_init(adev_to_drm(adev), &bo->tbo.base, size);
INIT_LIST_HEAD(&bo->shadow_list);
bo->vm_bo = NULL;
bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
@@ -591,7 +600,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
- bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
+ bo->tbo.mem.mem_type == TTM_PL_VRAM) {
struct dma_fence *fence;
r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence);
@@ -1268,11 +1277,11 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
*/
void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
bool evict,
- struct ttm_mem_reg *new_mem)
+ struct ttm_resource *new_mem)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_bo *abo;
- struct ttm_mem_reg *old_mem = &bo->mem;
+ struct ttm_resource *old_mem = &bo->mem;
if (!amdgpu_bo_is_amdgpu_bo(bo))
return;
@@ -1299,7 +1308,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
}
/**
- * amdgpu_bo_move_notify - notification about a BO being released
+ * amdgpu_bo_release_notify - notification about a BO being released
* @bo: pointer to a buffer object
*
* Wipes VRAM buffers whose contents should not be leaked before the
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index afa5189dba7d..5ddb6cf96030 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -160,7 +160,7 @@ static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr)
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
int r;
- r = __ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL);
+ r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
dev_err(adev->dev, "%p reserve failed\n", bo);
@@ -283,7 +283,7 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
uint64_t *flags);
void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
bool evict,
- struct ttm_mem_reg *new_mem);
+ struct ttm_resource *new_mem);
void amdgpu_bo_release_notify(struct ttm_buffer_object *bo);
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
deleted file mode 100644
index e4dbf14320b6..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ /dev/null
@@ -1,3928 +0,0 @@
-/*
- * Copyright 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Rafał Miłecki <zajec5@gmail.com>
- * Alex Deucher <alexdeucher@gmail.com>
- */
-
-#include <drm/drm_debugfs.h>
-
-#include "amdgpu.h"
-#include "amdgpu_drv.h"
-#include "amdgpu_pm.h"
-#include "amdgpu_dpm.h"
-#include "amdgpu_display.h"
-#include "amdgpu_smu.h"
-#include "atom.h"
-#include <linux/power_supply.h>
-#include <linux/pci.h>
-#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
-#include <linux/nospec.h>
-#include <linux/pm_runtime.h>
-#include "hwmgr.h"
-#define WIDTH_4K 3840
-
-static const struct cg_flag_name clocks[] = {
- {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
- {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"},
- {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"},
- {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"},
- {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"},
- {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"},
- {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"},
- {AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"},
- {AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"},
- {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"},
- {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"},
- {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"},
- {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"},
- {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"},
- {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"},
-
- {AMD_CG_SUPPORT_ATHUB_MGCG, "Address Translation Hub Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_ATHUB_LS, "Address Translation Hub Light Sleep"},
- {0, NULL},
-};
-
-static const struct hwmon_temp_label {
- enum PP_HWMON_TEMP channel;
- const char *label;
-} temp_label[] = {
- {PP_TEMP_EDGE, "edge"},
- {PP_TEMP_JUNCTION, "junction"},
- {PP_TEMP_MEM, "mem"},
-};
-
-void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
-{
- if (adev->pm.dpm_enabled) {
- mutex_lock(&adev->pm.mutex);
- if (power_supply_is_system_supplied() > 0)
- adev->pm.ac_power = true;
- else
- adev->pm.ac_power = false;
- if (adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->enable_bapm)
- amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
- mutex_unlock(&adev->pm.mutex);
-
- if (is_support_sw_smu(adev))
- smu_set_ac_dc(&adev->smu);
- }
-}
-
-int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
- void *data, uint32_t *size)
-{
- int ret = 0;
-
- if (!data || !size)
- return -EINVAL;
-
- if (is_support_sw_smu(adev))
- ret = smu_read_sensor(&adev->smu, sensor, data, size);
- else {
- if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
- ret = adev->powerplay.pp_funcs->read_sensor((adev)->powerplay.pp_handle,
- sensor, data, size);
- else
- ret = -EINVAL;
- }
-
- return ret;
-}
-
-/**
- * DOC: power_dpm_state
- *
- * The power_dpm_state file is a legacy interface and is only provided for
- * backwards compatibility. The amdgpu driver provides a sysfs API for adjusting
- * certain power related parameters. The file power_dpm_state is used for this.
- * It accepts the following arguments:
- *
- * - battery
- *
- * - balanced
- *
- * - performance
- *
- * battery
- *
- * On older GPUs, the vbios provided a special power state for battery
- * operation. Selecting battery switched to this state. This is no
- * longer provided on newer GPUs so the option does nothing in that case.
- *
- * balanced
- *
- * On older GPUs, the vbios provided a special power state for balanced
- * operation. Selecting balanced switched to this state. This is no
- * longer provided on newer GPUs so the option does nothing in that case.
- *
- * performance
- *
- * On older GPUs, the vbios provided a special power state for performance
- * operation. Selecting performance switched to this state. This is no
- * longer provided on newer GPUs so the option does nothing in that case.
- *
- */
-
-static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- enum amd_pm_state_type pm;
- int ret;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev)) {
- if (adev->smu.ppt_funcs->get_current_power_state)
- pm = smu_get_current_power_state(&adev->smu);
- else
- pm = adev->pm.dpm.user_state;
- } else if (adev->powerplay.pp_funcs->get_current_power_state) {
- pm = amdgpu_dpm_get_current_power_state(adev);
- } else {
- pm = adev->pm.dpm.user_state;
- }
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- return snprintf(buf, PAGE_SIZE, "%s\n",
- (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
- (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
-}
-
-static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- enum amd_pm_state_type state;
- int ret;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- if (strncmp("battery", buf, strlen("battery")) == 0)
- state = POWER_STATE_TYPE_BATTERY;
- else if (strncmp("balanced", buf, strlen("balanced")) == 0)
- state = POWER_STATE_TYPE_BALANCED;
- else if (strncmp("performance", buf, strlen("performance")) == 0)
- state = POWER_STATE_TYPE_PERFORMANCE;
- else
- return -EINVAL;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev)) {
- mutex_lock(&adev->pm.mutex);
- adev->pm.dpm.user_state = state;
- mutex_unlock(&adev->pm.mutex);
- } else if (adev->powerplay.pp_funcs->dispatch_tasks) {
- amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state);
- } else {
- mutex_lock(&adev->pm.mutex);
- adev->pm.dpm.user_state = state;
- mutex_unlock(&adev->pm.mutex);
-
- amdgpu_pm_compute_clocks(adev);
- }
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- return count;
-}
-
-
-/**
- * DOC: power_dpm_force_performance_level
- *
- * The amdgpu driver provides a sysfs API for adjusting certain power
- * related parameters. The file power_dpm_force_performance_level is
- * used for this. It accepts the following arguments:
- *
- * - auto
- *
- * - low
- *
- * - high
- *
- * - manual
- *
- * - profile_standard
- *
- * - profile_min_sclk
- *
- * - profile_min_mclk
- *
- * - profile_peak
- *
- * auto
- *
- * When auto is selected, the driver will attempt to dynamically select
- * the optimal power profile for current conditions in the driver.
- *
- * low
- *
- * When low is selected, the clocks are forced to the lowest power state.
- *
- * high
- *
- * When high is selected, the clocks are forced to the highest power state.
- *
- * manual
- *
- * When manual is selected, the user can manually adjust which power states
- * are enabled for each clock domain via the sysfs pp_dpm_mclk, pp_dpm_sclk,
- * and pp_dpm_pcie files and adjust the power state transition heuristics
- * via the pp_power_profile_mode sysfs file.
- *
- * profile_standard
- * profile_min_sclk
- * profile_min_mclk
- * profile_peak
- *
- * When the profiling modes are selected, clock and power gating are
- * disabled and the clocks are set for different profiling cases. This
- * mode is recommended for profiling specific work loads where you do
- * not want clock or power gating for clock fluctuation to interfere
- * with your results. profile_standard sets the clocks to a fixed clock
- * level which varies from asic to asic. profile_min_sclk forces the sclk
- * to the lowest level. profile_min_mclk forces the mclk to the lowest level.
- * profile_peak sets all clocks (mclk, sclk, pcie) to the highest levels.
- *
- */
-
-static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- enum amd_dpm_forced_level level = 0xff;
- int ret;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- level = smu_get_performance_level(&adev->smu);
- else if (adev->powerplay.pp_funcs->get_performance_level)
- level = amdgpu_dpm_get_performance_level(adev);
- else
- level = adev->pm.dpm.forced_level;
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- return snprintf(buf, PAGE_SIZE, "%s\n",
- (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
- (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
- (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
- (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
- (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
- (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
- (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
- (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
- "unknown");
-}
-
-static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- enum amd_dpm_forced_level level;
- enum amd_dpm_forced_level current_level = 0xff;
- int ret = 0;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- if (strncmp("low", buf, strlen("low")) == 0) {
- level = AMD_DPM_FORCED_LEVEL_LOW;
- } else if (strncmp("high", buf, strlen("high")) == 0) {
- level = AMD_DPM_FORCED_LEVEL_HIGH;
- } else if (strncmp("auto", buf, strlen("auto")) == 0) {
- level = AMD_DPM_FORCED_LEVEL_AUTO;
- } else if (strncmp("manual", buf, strlen("manual")) == 0) {
- level = AMD_DPM_FORCED_LEVEL_MANUAL;
- } else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) {
- level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
- } else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) {
- level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
- } else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) {
- level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
- } else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) {
- level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
- } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) {
- level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
- } else {
- return -EINVAL;
- }
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- current_level = smu_get_performance_level(&adev->smu);
- else if (adev->powerplay.pp_funcs->get_performance_level)
- current_level = amdgpu_dpm_get_performance_level(adev);
-
- if (current_level == level) {
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return count;
- }
-
- if (adev->asic_type == CHIP_RAVEN) {
- if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
- if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL && level == AMD_DPM_FORCED_LEVEL_MANUAL)
- amdgpu_gfx_off_ctrl(adev, false);
- else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL && level != AMD_DPM_FORCED_LEVEL_MANUAL)
- amdgpu_gfx_off_ctrl(adev, true);
- }
- }
-
- /* profile_exit setting is valid only when current mode is in profile mode */
- if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
- AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
- AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
- AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) &&
- (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) {
- pr_err("Currently not in any profile mode!\n");
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return -EINVAL;
- }
-
- if (is_support_sw_smu(adev)) {
- ret = smu_force_performance_level(&adev->smu, level);
- if (ret) {
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return -EINVAL;
- }
- } else if (adev->powerplay.pp_funcs->force_performance_level) {
- mutex_lock(&adev->pm.mutex);
- if (adev->pm.dpm.thermal_active) {
- mutex_unlock(&adev->pm.mutex);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return -EINVAL;
- }
- ret = amdgpu_dpm_force_performance_level(adev, level);
- if (ret) {
- mutex_unlock(&adev->pm.mutex);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return -EINVAL;
- } else {
- adev->pm.dpm.forced_level = level;
- }
- mutex_unlock(&adev->pm.mutex);
- }
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- return count;
-}
-
-static ssize_t amdgpu_get_pp_num_states(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- struct pp_states_info data;
- int i, buf_len, ret;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev)) {
- ret = smu_get_power_num_states(&adev->smu, &data);
- if (ret)
- return ret;
- } else if (adev->powerplay.pp_funcs->get_pp_num_states) {
- amdgpu_dpm_get_pp_num_states(adev, &data);
- } else {
- memset(&data, 0, sizeof(data));
- }
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums);
- for (i = 0; i < data.nums; i++)
- buf_len += snprintf(buf + buf_len, PAGE_SIZE, "%d %s\n", i,
- (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" :
- (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" :
- (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" :
- (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default");
-
- return buf_len;
-}
-
-static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- struct pp_states_info data;
- struct smu_context *smu = &adev->smu;
- enum amd_pm_state_type pm = 0;
- int i = 0, ret = 0;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev)) {
- pm = smu_get_current_power_state(smu);
- ret = smu_get_power_num_states(smu, &data);
- if (ret)
- return ret;
- } else if (adev->powerplay.pp_funcs->get_current_power_state
- && adev->powerplay.pp_funcs->get_pp_num_states) {
- pm = amdgpu_dpm_get_current_power_state(adev);
- amdgpu_dpm_get_pp_num_states(adev, &data);
- }
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- for (i = 0; i < data.nums; i++) {
- if (pm == data.states[i])
- break;
- }
-
- if (i == data.nums)
- i = -EINVAL;
-
- return snprintf(buf, PAGE_SIZE, "%d\n", i);
-}
-
-static ssize_t amdgpu_get_pp_force_state(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- if (adev->pp_force_state_enabled)
- return amdgpu_get_pp_cur_state(dev, attr, buf);
- else
- return snprintf(buf, PAGE_SIZE, "\n");
-}
-
-static ssize_t amdgpu_set_pp_force_state(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- enum amd_pm_state_type state = 0;
- unsigned long idx;
- int ret;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- if (strlen(buf) == 1)
- adev->pp_force_state_enabled = false;
- else if (is_support_sw_smu(adev))
- adev->pp_force_state_enabled = false;
- else if (adev->powerplay.pp_funcs->dispatch_tasks &&
- adev->powerplay.pp_funcs->get_pp_num_states) {
- struct pp_states_info data;
-
- ret = kstrtoul(buf, 0, &idx);
- if (ret || idx >= ARRAY_SIZE(data.states))
- return -EINVAL;
-
- idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
-
- amdgpu_dpm_get_pp_num_states(adev, &data);
- state = data.states[idx];
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- /* only set user selected power states */
- if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
- state != POWER_STATE_TYPE_DEFAULT) {
- amdgpu_dpm_dispatch_task(adev,
- AMD_PP_TASK_ENABLE_USER_STATE, &state);
- adev->pp_force_state_enabled = true;
- }
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- }
-
- return count;
-}
-
-/**
- * DOC: pp_table
- *
- * The amdgpu driver provides a sysfs API for uploading new powerplay
- * tables. The file pp_table is used for this. Reading the file
- * will dump the current power play table. Writing to the file
- * will attempt to upload a new powerplay table and re-initialize
- * powerplay using that new table.
- *
- */
-
-static ssize_t amdgpu_get_pp_table(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- char *table = NULL;
- int size, ret;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev)) {
- size = smu_sys_get_pp_table(&adev->smu, (void **)&table);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- if (size < 0)
- return size;
- } else if (adev->powerplay.pp_funcs->get_pp_table) {
- size = amdgpu_dpm_get_pp_table(adev, &table);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- if (size < 0)
- return size;
- } else {
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return 0;
- }
-
- if (size >= PAGE_SIZE)
- size = PAGE_SIZE - 1;
-
- memcpy(buf, table, size);
-
- return size;
-}
-
-static ssize_t amdgpu_set_pp_table(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- int ret = 0;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev)) {
- ret = smu_sys_set_pp_table(&adev->smu, (void *)buf, count);
- if (ret) {
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
- } else if (adev->powerplay.pp_funcs->set_pp_table)
- amdgpu_dpm_set_pp_table(adev, buf, count);
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- return count;
-}
-
-/**
- * DOC: pp_od_clk_voltage
- *
- * The amdgpu driver provides a sysfs API for adjusting the clocks and voltages
- * in each power level within a power state. The pp_od_clk_voltage is used for
- * this.
- *
- * < For Vega10 and previous ASICs >
- *
- * Reading the file will display:
- *
- * - a list of engine clock levels and voltages labeled OD_SCLK
- *
- * - a list of memory clock levels and voltages labeled OD_MCLK
- *
- * - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE
- *
- * To manually adjust these settings, first select manual using
- * power_dpm_force_performance_level. Enter a new value for each
- * level by writing a string that contains "s/m level clock voltage" to
- * the file. E.g., "s 1 500 820" will update sclk level 1 to be 500 MHz
- * at 820 mV; "m 0 350 810" will update mclk level 0 to be 350 MHz at
- * 810 mV. When you have edited all of the states as needed, write
- * "c" (commit) to the file to commit your changes. If you want to reset to the
- * default power levels, write "r" (reset) to the file to reset them.
- *
- *
- * < For Vega20 and newer ASICs >
- *
- * Reading the file will display:
- *
- * - minimum and maximum engine clock labeled OD_SCLK
- *
- * - maximum memory clock labeled OD_MCLK
- *
- * - three <frequency, voltage> points labeled OD_VDDC_CURVE.
- * They can be used to calibrate the sclk voltage curve.
- *
- * - a list of valid ranges for sclk, mclk, and voltage curve points
- * labeled OD_RANGE
- *
- * To manually adjust these settings:
- *
- * - First select manual using power_dpm_force_performance_level
- *
- * - For clock frequency setting, enter a new value by writing a
- * string that contains "s/m index clock" to the file. The index
- * should be 0 if to set minimum clock. And 1 if to set maximum
- * clock. E.g., "s 0 500" will update minimum sclk to be 500 MHz.
- * "m 1 800" will update maximum mclk to be 800Mhz.
- *
- * For sclk voltage curve, enter the new values by writing a
- * string that contains "vc point clock voltage" to the file. The
- * points are indexed by 0, 1 and 2. E.g., "vc 0 300 600" will
- * update point1 with clock set as 300Mhz and voltage as
- * 600mV. "vc 2 1000 1000" will update point3 with clock set
- * as 1000Mhz and voltage 1000mV.
- *
- * - When you have edited all of the states as needed, write "c" (commit)
- * to the file to commit your changes
- *
- * - If you want to reset to the default power levels, write "r" (reset)
- * to the file to reset them
- *
- */
-
-static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- int ret;
- uint32_t parameter_size = 0;
- long parameter[64];
- char buf_cpy[128];
- char *tmp_str;
- char *sub_str;
- const char delimiter[3] = {' ', '\n', '\0'};
- uint32_t type;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- if (count > 127)
- return -EINVAL;
-
- if (*buf == 's')
- type = PP_OD_EDIT_SCLK_VDDC_TABLE;
- else if (*buf == 'm')
- type = PP_OD_EDIT_MCLK_VDDC_TABLE;
- else if(*buf == 'r')
- type = PP_OD_RESTORE_DEFAULT_TABLE;
- else if (*buf == 'c')
- type = PP_OD_COMMIT_DPM_TABLE;
- else if (!strncmp(buf, "vc", 2))
- type = PP_OD_EDIT_VDDC_CURVE;
- else
- return -EINVAL;
-
- memcpy(buf_cpy, buf, count+1);
-
- tmp_str = buf_cpy;
-
- if (type == PP_OD_EDIT_VDDC_CURVE)
- tmp_str++;
- while (isspace(*++tmp_str));
-
- while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
- ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
- if (ret)
- return -EINVAL;
- parameter_size++;
-
- while (isspace(*tmp_str))
- tmp_str++;
- }
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev)) {
- ret = smu_od_edit_dpm_table(&adev->smu, type,
- parameter, parameter_size);
-
- if (ret) {
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return -EINVAL;
- }
- } else {
- if (adev->powerplay.pp_funcs->odn_edit_dpm_table) {
- ret = amdgpu_dpm_odn_edit_dpm_table(adev, type,
- parameter, parameter_size);
- if (ret) {
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return -EINVAL;
- }
- }
-
- if (type == PP_OD_COMMIT_DPM_TABLE) {
- if (adev->powerplay.pp_funcs->dispatch_tasks) {
- amdgpu_dpm_dispatch_task(adev,
- AMD_PP_TASK_READJUST_POWER_STATE,
- NULL);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return count;
- } else {
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return -EINVAL;
- }
- }
- }
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- return count;
-}
-
-static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- ssize_t size;
- int ret;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev)) {
- size = smu_print_clk_levels(&adev->smu, SMU_OD_SCLK, buf);
- size += smu_print_clk_levels(&adev->smu, SMU_OD_MCLK, buf+size);
- size += smu_print_clk_levels(&adev->smu, SMU_OD_VDDC_CURVE, buf+size);
- size += smu_print_clk_levels(&adev->smu, SMU_OD_RANGE, buf+size);
- } else if (adev->powerplay.pp_funcs->print_clock_levels) {
- size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
- size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size);
- size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size);
- size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size);
- } else {
- size = snprintf(buf, PAGE_SIZE, "\n");
- }
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- return size;
-}
-
-/**
- * DOC: pp_features
- *
- * The amdgpu driver provides a sysfs API for adjusting what powerplay
- * features to be enabled. The file pp_features is used for this. And
- * this is only available for Vega10 and later dGPUs.
- *
- * Reading back the file will show you the followings:
- * - Current ppfeature masks
- * - List of the all supported powerplay features with their naming,
- * bitmasks and enablement status('Y'/'N' means "enabled"/"disabled").
- *
- * To manually enable or disable a specific feature, just set or clear
- * the corresponding bit from original ppfeature masks and input the
- * new ppfeature masks.
- */
-static ssize_t amdgpu_set_pp_features(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- uint64_t featuremask;
- int ret;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = kstrtou64(buf, 0, &featuremask);
- if (ret)
- return -EINVAL;
-
- pr_debug("featuremask = 0x%llx\n", featuremask);
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev)) {
- ret = smu_sys_set_pp_feature_mask(&adev->smu, featuremask);
- if (ret) {
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return -EINVAL;
- }
- } else if (adev->powerplay.pp_funcs->set_ppfeature_status) {
- ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
- if (ret) {
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return -EINVAL;
- }
- }
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- return count;
-}
-
-static ssize_t amdgpu_get_pp_features(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- ssize_t size;
- int ret;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- size = smu_sys_get_pp_feature_mask(&adev->smu, buf);
- else if (adev->powerplay.pp_funcs->get_ppfeature_status)
- size = amdgpu_dpm_get_ppfeature_status(adev, buf);
- else
- size = snprintf(buf, PAGE_SIZE, "\n");
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- return size;
-}
-
-/**
- * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie
- *
- * The amdgpu driver provides a sysfs API for adjusting what power levels
- * are enabled for a given power state. The files pp_dpm_sclk, pp_dpm_mclk,
- * pp_dpm_socclk, pp_dpm_fclk, pp_dpm_dcefclk and pp_dpm_pcie are used for
- * this.
- *
- * pp_dpm_socclk and pp_dpm_dcefclk interfaces are only available for
- * Vega10 and later ASICs.
- * pp_dpm_fclk interface is only available for Vega20 and later ASICs.
- *
- * Reading back the files will show you the available power levels within
- * the power state and the clock information for those levels.
- *
- * To manually adjust these states, first select manual using
- * power_dpm_force_performance_level.
- * Secondly, enter a new value for each level by inputing a string that
- * contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie"
- * E.g.,
- *
- * .. code-block:: bash
- *
- * echo "4 5 6" > pp_dpm_sclk
- *
- * will enable sclk levels 4, 5, and 6.
- *
- * NOTE: change to the dcefclk max dpm level is not supported now
- */
-
-static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- ssize_t size;
- int ret;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- size = smu_print_clk_levels(&adev->smu, SMU_SCLK, buf);
- else if (adev->powerplay.pp_funcs->print_clock_levels)
- size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
- else
- size = snprintf(buf, PAGE_SIZE, "\n");
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- return size;
-}
-
-/*
- * Worst case: 32 bits individually specified, in octal at 12 characters
- * per line (+1 for \n).
- */
-#define AMDGPU_MASK_BUF_MAX (32 * 13)
-
-static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
-{
- int ret;
- long level;
- char *sub_str = NULL;
- char *tmp;
- char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
- const char delimiter[3] = {' ', '\n', '\0'};
- size_t bytes;
-
- *mask = 0;
-
- bytes = min(count, sizeof(buf_cpy) - 1);
- memcpy(buf_cpy, buf, bytes);
- buf_cpy[bytes] = '\0';
- tmp = buf_cpy;
- while ((sub_str = strsep(&tmp, delimiter)) != NULL) {
- if (strlen(sub_str)) {
- ret = kstrtol(sub_str, 0, &level);
- if (ret)
- return -EINVAL;
- *mask |= 1 << level;
- } else
- break;
- }
-
- return 0;
-}
-
-static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- int ret;
- uint32_t mask = 0;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = amdgpu_read_mask(buf, count, &mask);
- if (ret)
- return ret;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask);
- else if (adev->powerplay.pp_funcs->force_clock_level)
- ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- if (ret)
- return -EINVAL;
-
- return count;
-}
-
-static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- ssize_t size;
- int ret;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- size = smu_print_clk_levels(&adev->smu, SMU_MCLK, buf);
- else if (adev->powerplay.pp_funcs->print_clock_levels)
- size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
- else
- size = snprintf(buf, PAGE_SIZE, "\n");
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- return size;
-}
-
-static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- uint32_t mask = 0;
- int ret;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = amdgpu_read_mask(buf, count, &mask);
- if (ret)
- return ret;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask);
- else if (adev->powerplay.pp_funcs->force_clock_level)
- ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- if (ret)
- return -EINVAL;
-
- return count;
-}
-
-static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- ssize_t size;
- int ret;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- size = smu_print_clk_levels(&adev->smu, SMU_SOCCLK, buf);
- else if (adev->powerplay.pp_funcs->print_clock_levels)
- size = amdgpu_dpm_print_clock_levels(adev, PP_SOCCLK, buf);
- else
- size = snprintf(buf, PAGE_SIZE, "\n");
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- return size;
-}
-
-static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- int ret;
- uint32_t mask = 0;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = amdgpu_read_mask(buf, count, &mask);
- if (ret)
- return ret;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask);
- else if (adev->powerplay.pp_funcs->force_clock_level)
- ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask);
- else
- ret = 0;
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- if (ret)
- return -EINVAL;
-
- return count;
-}
-
-static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- ssize_t size;
- int ret;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- size = smu_print_clk_levels(&adev->smu, SMU_FCLK, buf);
- else if (adev->powerplay.pp_funcs->print_clock_levels)
- size = amdgpu_dpm_print_clock_levels(adev, PP_FCLK, buf);
- else
- size = snprintf(buf, PAGE_SIZE, "\n");
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- return size;
-}
-
-static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- int ret;
- uint32_t mask = 0;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = amdgpu_read_mask(buf, count, &mask);
- if (ret)
- return ret;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask);
- else if (adev->powerplay.pp_funcs->force_clock_level)
- ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask);
- else
- ret = 0;
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- if (ret)
- return -EINVAL;
-
- return count;
-}
-
-static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- ssize_t size;
- int ret;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- size = smu_print_clk_levels(&adev->smu, SMU_DCEFCLK, buf);
- else if (adev->powerplay.pp_funcs->print_clock_levels)
- size = amdgpu_dpm_print_clock_levels(adev, PP_DCEFCLK, buf);
- else
- size = snprintf(buf, PAGE_SIZE, "\n");
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- return size;
-}
-
-static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- int ret;
- uint32_t mask = 0;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = amdgpu_read_mask(buf, count, &mask);
- if (ret)
- return ret;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask);
- else if (adev->powerplay.pp_funcs->force_clock_level)
- ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask);
- else
- ret = 0;
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- if (ret)
- return -EINVAL;
-
- return count;
-}
-
-static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- ssize_t size;
- int ret;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- size = smu_print_clk_levels(&adev->smu, SMU_PCIE, buf);
- else if (adev->powerplay.pp_funcs->print_clock_levels)
- size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
- else
- size = snprintf(buf, PAGE_SIZE, "\n");
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- return size;
-}
-
-static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- int ret;
- uint32_t mask = 0;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = amdgpu_read_mask(buf, count, &mask);
- if (ret)
- return ret;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask);
- else if (adev->powerplay.pp_funcs->force_clock_level)
- ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
- else
- ret = 0;
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- if (ret)
- return -EINVAL;
-
- return count;
-}
-
-static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- uint32_t value = 0;
- int ret;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- value = smu_get_od_percentage(&(adev->smu), SMU_OD_SCLK);
- else if (adev->powerplay.pp_funcs->get_sclk_od)
- value = amdgpu_dpm_get_sclk_od(adev);
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- return snprintf(buf, PAGE_SIZE, "%d\n", value);
-}
-
-static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- int ret;
- long int value;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = kstrtol(buf, 0, &value);
-
- if (ret)
- return -EINVAL;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev)) {
- value = smu_set_od_percentage(&(adev->smu), SMU_OD_SCLK, (uint32_t)value);
- } else {
- if (adev->powerplay.pp_funcs->set_sclk_od)
- amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
-
- if (adev->powerplay.pp_funcs->dispatch_tasks) {
- amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
- } else {
- adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
- amdgpu_pm_compute_clocks(adev);
- }
- }
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- return count;
-}
-
-static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- uint32_t value = 0;
- int ret;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- value = smu_get_od_percentage(&(adev->smu), SMU_OD_MCLK);
- else if (adev->powerplay.pp_funcs->get_mclk_od)
- value = amdgpu_dpm_get_mclk_od(adev);
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- return snprintf(buf, PAGE_SIZE, "%d\n", value);
-}
-
-static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- int ret;
- long int value;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = kstrtol(buf, 0, &value);
-
- if (ret)
- return -EINVAL;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev)) {
- value = smu_set_od_percentage(&(adev->smu), SMU_OD_MCLK, (uint32_t)value);
- } else {
- if (adev->powerplay.pp_funcs->set_mclk_od)
- amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
-
- if (adev->powerplay.pp_funcs->dispatch_tasks) {
- amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
- } else {
- adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
- amdgpu_pm_compute_clocks(adev);
- }
- }
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- return count;
-}
-
-/**
- * DOC: pp_power_profile_mode
- *
- * The amdgpu driver provides a sysfs API for adjusting the heuristics
- * related to switching between power levels in a power state. The file
- * pp_power_profile_mode is used for this.
- *
- * Reading this file outputs a list of all of the predefined power profiles
- * and the relevant heuristics settings for that profile.
- *
- * To select a profile or create a custom profile, first select manual using
- * power_dpm_force_performance_level. Writing the number of a predefined
- * profile to pp_power_profile_mode will enable those heuristics. To
- * create a custom set of heuristics, write a string of numbers to the file
- * starting with the number of the custom profile along with a setting
- * for each heuristic parameter. Due to differences across asic families
- * the heuristic parameters vary from family to family.
- *
- */
-
-static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- ssize_t size;
- int ret;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- size = smu_get_power_profile_mode(&adev->smu, buf);
- else if (adev->powerplay.pp_funcs->get_power_profile_mode)
- size = amdgpu_dpm_get_power_profile_mode(adev, buf);
- else
- size = snprintf(buf, PAGE_SIZE, "\n");
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- return size;
-}
-
-
-static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- int ret;
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- uint32_t parameter_size = 0;
- long parameter[64];
- char *sub_str, buf_cpy[128];
- char *tmp_str;
- uint32_t i = 0;
- char tmp[2];
- long int profile_mode = 0;
- const char delimiter[3] = {' ', '\n', '\0'};
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- tmp[0] = *(buf);
- tmp[1] = '\0';
- ret = kstrtol(tmp, 0, &profile_mode);
- if (ret)
- return -EINVAL;
-
- if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
- if (count < 2 || count > 127)
- return -EINVAL;
- while (isspace(*++buf))
- i++;
- memcpy(buf_cpy, buf, count-i);
- tmp_str = buf_cpy;
- while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
- ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
- if (ret)
- return -EINVAL;
- parameter_size++;
- while (isspace(*tmp_str))
- tmp_str++;
- }
- }
- parameter[parameter_size] = profile_mode;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev))
- ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size, true);
- else if (adev->powerplay.pp_funcs->set_power_profile_mode)
- ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- if (!ret)
- return count;
-
- return -EINVAL;
-}
-
-/**
- * DOC: gpu_busy_percent
- *
- * The amdgpu driver provides a sysfs API for reading how busy the GPU
- * is as a percentage. The file gpu_busy_percent is used for this.
- * The SMU firmware computes a percentage of load based on the
- * aggregate activity level in the IP cores.
- */
-static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- int r, value, size = sizeof(value);
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- r = pm_runtime_get_sync(ddev->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return r;
- }
-
- /* read the IP busy sensor */
- r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD,
- (void *)&value, &size);
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- if (r)
- return r;
-
- return snprintf(buf, PAGE_SIZE, "%d\n", value);
-}
-
-/**
- * DOC: mem_busy_percent
- *
- * The amdgpu driver provides a sysfs API for reading how busy the VRAM
- * is as a percentage. The file mem_busy_percent is used for this.
- * The SMU firmware computes a percentage of load based on the
- * aggregate activity level in the IP cores.
- */
-static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- int r, value, size = sizeof(value);
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- r = pm_runtime_get_sync(ddev->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return r;
- }
-
- /* read the IP busy sensor */
- r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD,
- (void *)&value, &size);
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- if (r)
- return r;
-
- return snprintf(buf, PAGE_SIZE, "%d\n", value);
-}
-
-/**
- * DOC: pcie_bw
- *
- * The amdgpu driver provides a sysfs API for estimating how much data
- * has been received and sent by the GPU in the last second through PCIe.
- * The file pcie_bw is used for this.
- * The Perf counters count the number of received and sent messages and return
- * those values, as well as the maximum payload size of a PCIe packet (mps).
- * Note that it is not possible to easily and quickly obtain the size of each
- * packet transmitted, so we output the max payload size (mps) to allow for
- * quick estimation of the PCIe bandwidth usage
- */
-static ssize_t amdgpu_get_pcie_bw(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- uint64_t count0 = 0, count1 = 0;
- int ret;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- if (adev->flags & AMD_IS_APU)
- return -ENODATA;
-
- if (!adev->asic_funcs->get_pcie_usage)
- return -ENODATA;
-
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
-
- amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
- return snprintf(buf, PAGE_SIZE, "%llu %llu %i\n",
- count0, count1, pcie_get_mps(adev->pdev));
-}
-
-/**
- * DOC: unique_id
- *
- * The amdgpu driver provides a sysfs API for providing a unique ID for the GPU
- * The file unique_id is used for this.
- * This will provide a Unique ID that will persist from machine to machine
- *
- * NOTE: This will only work for GFX9 and newer. This file will be absent
- * on unsupported ASICs (GFX8 and older)
- */
-static ssize_t amdgpu_get_unique_id(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- if (adev->unique_id)
- return snprintf(buf, PAGE_SIZE, "%016llx\n", adev->unique_id);
-
- return 0;
-}
-
-/**
- * DOC: thermal_throttling_logging
- *
- * Thermal throttling pulls down the clock frequency and thus the performance.
- * It's an useful mechanism to protect the chip from overheating. Since it
- * impacts performance, the user controls whether it is enabled and if so,
- * the log frequency.
- *
- * Reading back the file shows you the status(enabled or disabled) and
- * the interval(in seconds) between each thermal logging.
- *
- * Writing an integer to the file, sets a new logging interval, in seconds.
- * The value should be between 1 and 3600. If the value is less than 1,
- * thermal logging is disabled. Values greater than 3600 are ignored.
- */
-static ssize_t amdgpu_get_thermal_throttling_logging(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
-
- return snprintf(buf, PAGE_SIZE, "%s: thermal throttling logging %s, with interval %d seconds\n",
- adev->ddev->unique,
- atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled",
- adev->throttling_logging_rs.interval / HZ + 1);
-}
-
-static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- long throttling_logging_interval;
- unsigned long flags;
- int ret = 0;
-
- ret = kstrtol(buf, 0, &throttling_logging_interval);
- if (ret)
- return ret;
-
- if (throttling_logging_interval > 3600)
- return -EINVAL;
-
- if (throttling_logging_interval > 0) {
- raw_spin_lock_irqsave(&adev->throttling_logging_rs.lock, flags);
- /*
- * Reset the ratelimit timer internals.
- * This can effectively restart the timer.
- */
- adev->throttling_logging_rs.interval =
- (throttling_logging_interval - 1) * HZ;
- adev->throttling_logging_rs.begin = 0;
- adev->throttling_logging_rs.printed = 0;
- adev->throttling_logging_rs.missed = 0;
- raw_spin_unlock_irqrestore(&adev->throttling_logging_rs.lock, flags);
-
- atomic_set(&adev->throttling_logging_enabled, 1);
- } else {
- atomic_set(&adev->throttling_logging_enabled, 0);
- }
-
- return count;
-}
-
-static struct amdgpu_device_attr amdgpu_device_attrs[] = {
- AMDGPU_DEVICE_ATTR_RW(power_dpm_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
- AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
- AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
- AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
- AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
- AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RW(pp_mclk_od, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RO(pcie_bw, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging, ATTR_FLAG_BASIC),
-};
-
-static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
- uint32_t mask, enum amdgpu_device_attr_states *states)
-{
- struct device_attribute *dev_attr = &attr->dev_attr;
- const char *attr_name = dev_attr->attr.name;
- struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
- enum amd_asic_type asic_type = adev->asic_type;
-
- if (!(attr->flags & mask)) {
- *states = ATTR_STATE_UNSUPPORTED;
- return 0;
- }
-
-#define DEVICE_ATTR_IS(_name) (!strcmp(attr_name, #_name))
-
- if (DEVICE_ATTR_IS(pp_dpm_socclk)) {
- if (asic_type < CHIP_VEGA10)
- *states = ATTR_STATE_UNSUPPORTED;
- } else if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
- if (asic_type < CHIP_VEGA10 || asic_type == CHIP_ARCTURUS)
- *states = ATTR_STATE_UNSUPPORTED;
- } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {
- if (asic_type < CHIP_VEGA20)
- *states = ATTR_STATE_UNSUPPORTED;
- } else if (DEVICE_ATTR_IS(pp_dpm_pcie)) {
- if (asic_type == CHIP_ARCTURUS)
- *states = ATTR_STATE_UNSUPPORTED;
- } else if (DEVICE_ATTR_IS(pp_od_clk_voltage)) {
- *states = ATTR_STATE_UNSUPPORTED;
- if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
- (!is_support_sw_smu(adev) && hwmgr->od_enabled))
- *states = ATTR_STATE_SUPPORTED;
- } else if (DEVICE_ATTR_IS(mem_busy_percent)) {
- if (adev->flags & AMD_IS_APU || asic_type == CHIP_VEGA10)
- *states = ATTR_STATE_UNSUPPORTED;
- } else if (DEVICE_ATTR_IS(pcie_bw)) {
- /* PCIe Perf counters won't work on APU nodes */
- if (adev->flags & AMD_IS_APU)
- *states = ATTR_STATE_UNSUPPORTED;
- } else if (DEVICE_ATTR_IS(unique_id)) {
- if (asic_type != CHIP_VEGA10 &&
- asic_type != CHIP_VEGA20 &&
- asic_type != CHIP_ARCTURUS)
- *states = ATTR_STATE_UNSUPPORTED;
- } else if (DEVICE_ATTR_IS(pp_features)) {
- if (adev->flags & AMD_IS_APU || asic_type < CHIP_VEGA10)
- *states = ATTR_STATE_UNSUPPORTED;
- }
-
- if (asic_type == CHIP_ARCTURUS) {
- /* Arcturus does not support standalone mclk/socclk/fclk level setting */
- if (DEVICE_ATTR_IS(pp_dpm_mclk) ||
- DEVICE_ATTR_IS(pp_dpm_socclk) ||
- DEVICE_ATTR_IS(pp_dpm_fclk)) {
- dev_attr->attr.mode &= ~S_IWUGO;
- dev_attr->store = NULL;
- }
- }
-
-#undef DEVICE_ATTR_IS
-
- return 0;
-}
-
-
-static int amdgpu_device_attr_create(struct amdgpu_device *adev,
- struct amdgpu_device_attr *attr,
- uint32_t mask, struct list_head *attr_list)
-{
- int ret = 0;
- struct device_attribute *dev_attr = &attr->dev_attr;
- const char *name = dev_attr->attr.name;
- enum amdgpu_device_attr_states attr_states = ATTR_STATE_SUPPORTED;
- struct amdgpu_device_attr_entry *attr_entry;
-
- int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
- uint32_t mask, enum amdgpu_device_attr_states *states) = default_attr_update;
-
- BUG_ON(!attr);
-
- attr_update = attr->attr_update ? attr_update : default_attr_update;
-
- ret = attr_update(adev, attr, mask, &attr_states);
- if (ret) {
- dev_err(adev->dev, "failed to update device file %s, ret = %d\n",
- name, ret);
- return ret;
- }
-
- if (attr_states == ATTR_STATE_UNSUPPORTED)
- return 0;
-
- ret = device_create_file(adev->dev, dev_attr);
- if (ret) {
- dev_err(adev->dev, "failed to create device file %s, ret = %d\n",
- name, ret);
- }
-
- attr_entry = kmalloc(sizeof(*attr_entry), GFP_KERNEL);
- if (!attr_entry)
- return -ENOMEM;
-
- attr_entry->attr = attr;
- INIT_LIST_HEAD(&attr_entry->entry);
-
- list_add_tail(&attr_entry->entry, attr_list);
-
- return ret;
-}
-
-static void amdgpu_device_attr_remove(struct amdgpu_device *adev, struct amdgpu_device_attr *attr)
-{
- struct device_attribute *dev_attr = &attr->dev_attr;
-
- device_remove_file(adev->dev, dev_attr);
-}
-
-static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
- struct list_head *attr_list);
-
-static int amdgpu_device_attr_create_groups(struct amdgpu_device *adev,
- struct amdgpu_device_attr *attrs,
- uint32_t counts,
- uint32_t mask,
- struct list_head *attr_list)
-{
- int ret = 0;
- uint32_t i = 0;
-
- for (i = 0; i < counts; i++) {
- ret = amdgpu_device_attr_create(adev, &attrs[i], mask, attr_list);
- if (ret)
- goto failed;
- }
-
- return 0;
-
-failed:
- amdgpu_device_attr_remove_groups(adev, attr_list);
-
- return ret;
-}
-
-static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
- struct list_head *attr_list)
-{
- struct amdgpu_device_attr_entry *entry, *entry_tmp;
-
- if (list_empty(attr_list))
- return ;
-
- list_for_each_entry_safe(entry, entry_tmp, attr_list, entry) {
- amdgpu_device_attr_remove(adev, entry->attr);
- list_del(&entry->entry);
- kfree(entry);
- }
-}
-
-static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- int channel = to_sensor_dev_attr(attr)->index;
- int r, temp = 0, size = sizeof(temp);
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- if (channel >= PP_TEMP_MAX)
- return -EINVAL;
-
- r = pm_runtime_get_sync(adev->ddev->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return r;
- }
-
- switch (channel) {
- case PP_TEMP_JUNCTION:
- /* get current junction temperature */
- r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
- (void *)&temp, &size);
- break;
- case PP_TEMP_EDGE:
- /* get current edge temperature */
- r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_EDGE_TEMP,
- (void *)&temp, &size);
- break;
- case PP_TEMP_MEM:
- /* get current memory temperature */
- r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_TEMP,
- (void *)&temp, &size);
- break;
- default:
- r = -EINVAL;
- break;
- }
-
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
-
- if (r)
- return r;
-
- return snprintf(buf, PAGE_SIZE, "%d\n", temp);
-}
-
-static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- int hyst = to_sensor_dev_attr(attr)->index;
- int temp;
-
- if (hyst)
- temp = adev->pm.dpm.thermal.min_temp;
- else
- temp = adev->pm.dpm.thermal.max_temp;
-
- return snprintf(buf, PAGE_SIZE, "%d\n", temp);
-}
-
-static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- int hyst = to_sensor_dev_attr(attr)->index;
- int temp;
-
- if (hyst)
- temp = adev->pm.dpm.thermal.min_hotspot_temp;
- else
- temp = adev->pm.dpm.thermal.max_hotspot_crit_temp;
-
- return snprintf(buf, PAGE_SIZE, "%d\n", temp);
-}
-
-static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- int hyst = to_sensor_dev_attr(attr)->index;
- int temp;
-
- if (hyst)
- temp = adev->pm.dpm.thermal.min_mem_temp;
- else
- temp = adev->pm.dpm.thermal.max_mem_crit_temp;
-
- return snprintf(buf, PAGE_SIZE, "%d\n", temp);
-}
-
-static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int channel = to_sensor_dev_attr(attr)->index;
-
- if (channel >= PP_TEMP_MAX)
- return -EINVAL;
-
- return snprintf(buf, PAGE_SIZE, "%s\n", temp_label[channel].label);
-}
-
-static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- int channel = to_sensor_dev_attr(attr)->index;
- int temp = 0;
-
- if (channel >= PP_TEMP_MAX)
- return -EINVAL;
-
- switch (channel) {
- case PP_TEMP_JUNCTION:
- temp = adev->pm.dpm.thermal.max_hotspot_emergency_temp;
- break;
- case PP_TEMP_EDGE:
- temp = adev->pm.dpm.thermal.max_edge_emergency_temp;
- break;
- case PP_TEMP_MEM:
- temp = adev->pm.dpm.thermal.max_mem_emergency_temp;
- break;
- }
-
- return snprintf(buf, PAGE_SIZE, "%d\n", temp);
-}
-
-static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- u32 pwm_mode = 0;
- int ret;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = pm_runtime_get_sync(adev->ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev)) {
- pwm_mode = smu_get_fan_control_mode(&adev->smu);
- } else {
- if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return -EINVAL;
- }
-
- pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
- }
-
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
-
- return sprintf(buf, "%i\n", pwm_mode);
-}
-
-static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- int err, ret;
- int value;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- err = kstrtoint(buf, 10, &value);
- if (err)
- return err;
-
- ret = pm_runtime_get_sync(adev->ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev)) {
- smu_set_fan_control_mode(&adev->smu, value);
- } else {
- if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return -EINVAL;
- }
-
- amdgpu_dpm_set_fan_control_mode(adev, value);
- }
-
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
-
- return count;
-}
-
-static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%i\n", 0);
-}
-
-static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%i\n", 255);
-}
-
-static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- int err;
- u32 value;
- u32 pwm_mode;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- err = pm_runtime_get_sync(adev->ddev->dev);
- if (err < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return err;
- }
-
- if (is_support_sw_smu(adev))
- pwm_mode = smu_get_fan_control_mode(&adev->smu);
- else
- pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
-
- if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
- pr_info("manual fan speed control should be enabled first\n");
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return -EINVAL;
- }
-
- err = kstrtou32(buf, 10, &value);
- if (err) {
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return err;
- }
-
- value = (value * 100) / 255;
-
- if (is_support_sw_smu(adev))
- err = smu_set_fan_speed_percent(&adev->smu, value);
- else if (adev->powerplay.pp_funcs->set_fan_speed_percent)
- err = amdgpu_dpm_set_fan_speed_percent(adev, value);
- else
- err = -EINVAL;
-
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
-
- if (err)
- return err;
-
- return count;
-}
-
-static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- int err;
- u32 speed = 0;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- err = pm_runtime_get_sync(adev->ddev->dev);
- if (err < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return err;
- }
-
- if (is_support_sw_smu(adev))
- err = smu_get_fan_speed_percent(&adev->smu, &speed);
- else if (adev->powerplay.pp_funcs->get_fan_speed_percent)
- err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
- else
- err = -EINVAL;
-
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
-
- if (err)
- return err;
-
- speed = (speed * 255) / 100;
-
- return sprintf(buf, "%i\n", speed);
-}
-
-static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- int err;
- u32 speed = 0;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- err = pm_runtime_get_sync(adev->ddev->dev);
- if (err < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return err;
- }
-
- if (is_support_sw_smu(adev))
- err = smu_get_fan_speed_rpm(&adev->smu, &speed);
- else if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
- err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
- else
- err = -EINVAL;
-
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
-
- if (err)
- return err;
-
- return sprintf(buf, "%i\n", speed);
-}
-
-static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- u32 min_rpm = 0;
- u32 size = sizeof(min_rpm);
- int r;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- r = pm_runtime_get_sync(adev->ddev->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return r;
- }
-
- r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
- (void *)&min_rpm, &size);
-
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
-
- if (r)
- return r;
-
- return snprintf(buf, PAGE_SIZE, "%d\n", min_rpm);
-}
-
-static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- u32 max_rpm = 0;
- u32 size = sizeof(max_rpm);
- int r;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- r = pm_runtime_get_sync(adev->ddev->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return r;
- }
-
- r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
- (void *)&max_rpm, &size);
-
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
-
- if (r)
- return r;
-
- return snprintf(buf, PAGE_SIZE, "%d\n", max_rpm);
-}
-
-static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- int err;
- u32 rpm = 0;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- err = pm_runtime_get_sync(adev->ddev->dev);
- if (err < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return err;
- }
-
- if (is_support_sw_smu(adev))
- err = smu_get_fan_speed_rpm(&adev->smu, &rpm);
- else if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
- err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
- else
- err = -EINVAL;
-
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
-
- if (err)
- return err;
-
- return sprintf(buf, "%i\n", rpm);
-}
-
-static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- int err;
- u32 value;
- u32 pwm_mode;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- err = pm_runtime_get_sync(adev->ddev->dev);
- if (err < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return err;
- }
-
- if (is_support_sw_smu(adev))
- pwm_mode = smu_get_fan_control_mode(&adev->smu);
- else
- pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
-
- if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return -ENODATA;
- }
-
- err = kstrtou32(buf, 10, &value);
- if (err) {
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return err;
- }
-
- if (is_support_sw_smu(adev))
- err = smu_set_fan_speed_rpm(&adev->smu, value);
- else if (adev->powerplay.pp_funcs->set_fan_speed_rpm)
- err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
- else
- err = -EINVAL;
-
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
-
- if (err)
- return err;
-
- return count;
-}
-
-static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- u32 pwm_mode = 0;
- int ret;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- ret = pm_runtime_get_sync(adev->ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return ret;
- }
-
- if (is_support_sw_smu(adev)) {
- pwm_mode = smu_get_fan_control_mode(&adev->smu);
- } else {
- if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return -EINVAL;
- }
-
- pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
- }
-
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
-
- return sprintf(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
-}
-
-static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- int err;
- int value;
- u32 pwm_mode;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- err = kstrtoint(buf, 10, &value);
- if (err)
- return err;
-
- if (value == 0)
- pwm_mode = AMD_FAN_CTRL_AUTO;
- else if (value == 1)
- pwm_mode = AMD_FAN_CTRL_MANUAL;
- else
- return -EINVAL;
-
- err = pm_runtime_get_sync(adev->ddev->dev);
- if (err < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return err;
- }
-
- if (is_support_sw_smu(adev)) {
- smu_set_fan_control_mode(&adev->smu, pwm_mode);
- } else {
- if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return -EINVAL;
- }
- amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
- }
-
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
-
- return count;
-}
-
-static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- u32 vddgfx;
- int r, size = sizeof(vddgfx);
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- r = pm_runtime_get_sync(adev->ddev->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return r;
- }
-
- /* get the voltage */
- r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX,
- (void *)&vddgfx, &size);
-
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
-
- if (r)
- return r;
-
- return snprintf(buf, PAGE_SIZE, "%d\n", vddgfx);
-}
-
-static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "vddgfx\n");
-}
-
-static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- u32 vddnb;
- int r, size = sizeof(vddnb);
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- /* only APUs have vddnb */
- if (!(adev->flags & AMD_IS_APU))
- return -EINVAL;
-
- r = pm_runtime_get_sync(adev->ddev->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return r;
- }
-
- /* get the voltage */
- r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB,
- (void *)&vddnb, &size);
-
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
-
- if (r)
- return r;
-
- return snprintf(buf, PAGE_SIZE, "%d\n", vddnb);
-}
-
-static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "vddnb\n");
-}
-
-static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- u32 query = 0;
- int r, size = sizeof(u32);
- unsigned uw;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- r = pm_runtime_get_sync(adev->ddev->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return r;
- }
-
- /* get the voltage */
- r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER,
- (void *)&query, &size);
-
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
-
- if (r)
- return r;
-
- /* convert to microwatts */
- uw = (query >> 8) * 1000000 + (query & 0xff) * 1000;
-
- return snprintf(buf, PAGE_SIZE, "%u\n", uw);
-}
-
-static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%i\n", 0);
-}
-
-static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- uint32_t limit = 0;
- ssize_t size;
- int r;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- r = pm_runtime_get_sync(adev->ddev->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return r;
- }
-
- if (is_support_sw_smu(adev)) {
- smu_get_power_limit(&adev->smu, &limit, true);
- size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
- } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
- adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true);
- size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
- } else {
- size = snprintf(buf, PAGE_SIZE, "\n");
- }
-
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
-
- return size;
-}
-
-static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- uint32_t limit = 0;
- ssize_t size;
- int r;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- r = pm_runtime_get_sync(adev->ddev->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return r;
- }
-
- if (is_support_sw_smu(adev)) {
- smu_get_power_limit(&adev->smu, &limit, false);
- size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
- } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
- adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false);
- size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
- } else {
- size = snprintf(buf, PAGE_SIZE, "\n");
- }
-
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
-
- return size;
-}
-
-
-static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- int err;
- u32 value;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
-
- err = kstrtou32(buf, 10, &value);
- if (err)
- return err;
-
- value = value / 1000000; /* convert to Watt */
-
-
- err = pm_runtime_get_sync(adev->ddev->dev);
- if (err < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return err;
- }
-
- if (is_support_sw_smu(adev))
- err = smu_set_power_limit(&adev->smu, value);
- else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit)
- err = adev->powerplay.pp_funcs->set_power_limit(adev->powerplay.pp_handle, value);
- else
- err = -EINVAL;
-
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
-
- if (err)
- return err;
-
- return count;
-}
-
-static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- uint32_t sclk;
- int r, size = sizeof(sclk);
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- r = pm_runtime_get_sync(adev->ddev->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return r;
- }
-
- /* get the sclk */
- r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
- (void *)&sclk, &size);
-
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
-
- if (r)
- return r;
-
- return snprintf(buf, PAGE_SIZE, "%u\n", sclk * 10 * 1000);
-}
-
-static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "sclk\n");
-}
-
-static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- uint32_t mclk;
- int r, size = sizeof(mclk);
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- r = pm_runtime_get_sync(adev->ddev->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return r;
- }
-
- /* get the sclk */
- r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
- (void *)&mclk, &size);
-
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
-
- if (r)
- return r;
-
- return snprintf(buf, PAGE_SIZE, "%u\n", mclk * 10 * 1000);
-}
-
-static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "mclk\n");
-}
-
-/**
- * DOC: hwmon
- *
- * The amdgpu driver exposes the following sensor interfaces:
- *
- * - GPU temperature (via the on-die sensor)
- *
- * - GPU voltage
- *
- * - Northbridge voltage (APUs only)
- *
- * - GPU power
- *
- * - GPU fan
- *
- * - GPU gfx/compute engine clock
- *
- * - GPU memory clock (dGPU only)
- *
- * hwmon interfaces for GPU temperature:
- *
- * - temp[1-3]_input: the on die GPU temperature in millidegrees Celsius
- * - temp2_input and temp3_input are supported on SOC15 dGPUs only
- *
- * - temp[1-3]_label: temperature channel label
- * - temp2_label and temp3_label are supported on SOC15 dGPUs only
- *
- * - temp[1-3]_crit: temperature critical max value in millidegrees Celsius
- * - temp2_crit and temp3_crit are supported on SOC15 dGPUs only
- *
- * - temp[1-3]_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius
- * - temp2_crit_hyst and temp3_crit_hyst are supported on SOC15 dGPUs only
- *
- * - temp[1-3]_emergency: temperature emergency max value(asic shutdown) in millidegrees Celsius
- * - these are supported on SOC15 dGPUs only
- *
- * hwmon interfaces for GPU voltage:
- *
- * - in0_input: the voltage on the GPU in millivolts
- *
- * - in1_input: the voltage on the Northbridge in millivolts
- *
- * hwmon interfaces for GPU power:
- *
- * - power1_average: average power used by the GPU in microWatts
- *
- * - power1_cap_min: minimum cap supported in microWatts
- *
- * - power1_cap_max: maximum cap supported in microWatts
- *
- * - power1_cap: selected power cap in microWatts
- *
- * hwmon interfaces for GPU fan:
- *
- * - pwm1: pulse width modulation fan level (0-255)
- *
- * - pwm1_enable: pulse width modulation fan control method (0: no fan speed control, 1: manual fan speed control using pwm interface, 2: automatic fan speed control)
- *
- * - pwm1_min: pulse width modulation fan control minimum level (0)
- *
- * - pwm1_max: pulse width modulation fan control maximum level (255)
- *
- * - fan1_min: an minimum value Unit: revolution/min (RPM)
- *
- * - fan1_max: an maxmum value Unit: revolution/max (RPM)
- *
- * - fan1_input: fan speed in RPM
- *
- * - fan[1-\*]_target: Desired fan speed Unit: revolution/min (RPM)
- *
- * - fan[1-\*]_enable: Enable or disable the sensors.1: Enable 0: Disable
- *
- * hwmon interfaces for GPU clocks:
- *
- * - freq1_input: the gfx/compute clock in hertz
- *
- * - freq2_input: the memory clock in hertz
- *
- * You can use hwmon tools like sensors to view this information on your system.
- *
- */
-
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_EDGE);
-static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_EDGE);
-static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_JUNCTION);
-static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp2_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_JUNCTION);
-static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_MEM);
-static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp3_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_MEM);
-static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_EDGE);
-static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_JUNCTION);
-static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_MEM);
-static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
-static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
-static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
-static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
-static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0);
-static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, amdgpu_hwmon_get_fan1_min, NULL, 0);
-static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, amdgpu_hwmon_get_fan1_max, NULL, 0);
-static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_target, amdgpu_hwmon_set_fan1_target, 0);
-static SENSOR_DEVICE_ATTR(fan1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_enable, amdgpu_hwmon_set_fan1_enable, 0);
-static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0);
-static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0);
-static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0);
-static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0);
-static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0);
-static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
-static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0);
-static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
-static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0);
-static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0);
-static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0);
-static SENSOR_DEVICE_ATTR(freq2_label, S_IRUGO, amdgpu_hwmon_show_mclk_label, NULL, 0);
-
-static struct attribute *hwmon_attributes[] = {
- &sensor_dev_attr_temp1_input.dev_attr.attr,
- &sensor_dev_attr_temp1_crit.dev_attr.attr,
- &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
- &sensor_dev_attr_temp2_input.dev_attr.attr,
- &sensor_dev_attr_temp2_crit.dev_attr.attr,
- &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
- &sensor_dev_attr_temp3_input.dev_attr.attr,
- &sensor_dev_attr_temp3_crit.dev_attr.attr,
- &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
- &sensor_dev_attr_temp1_emergency.dev_attr.attr,
- &sensor_dev_attr_temp2_emergency.dev_attr.attr,
- &sensor_dev_attr_temp3_emergency.dev_attr.attr,
- &sensor_dev_attr_temp1_label.dev_attr.attr,
- &sensor_dev_attr_temp2_label.dev_attr.attr,
- &sensor_dev_attr_temp3_label.dev_attr.attr,
- &sensor_dev_attr_pwm1.dev_attr.attr,
- &sensor_dev_attr_pwm1_enable.dev_attr.attr,
- &sensor_dev_attr_pwm1_min.dev_attr.attr,
- &sensor_dev_attr_pwm1_max.dev_attr.attr,
- &sensor_dev_attr_fan1_input.dev_attr.attr,
- &sensor_dev_attr_fan1_min.dev_attr.attr,
- &sensor_dev_attr_fan1_max.dev_attr.attr,
- &sensor_dev_attr_fan1_target.dev_attr.attr,
- &sensor_dev_attr_fan1_enable.dev_attr.attr,
- &sensor_dev_attr_in0_input.dev_attr.attr,
- &sensor_dev_attr_in0_label.dev_attr.attr,
- &sensor_dev_attr_in1_input.dev_attr.attr,
- &sensor_dev_attr_in1_label.dev_attr.attr,
- &sensor_dev_attr_power1_average.dev_attr.attr,
- &sensor_dev_attr_power1_cap_max.dev_attr.attr,
- &sensor_dev_attr_power1_cap_min.dev_attr.attr,
- &sensor_dev_attr_power1_cap.dev_attr.attr,
- &sensor_dev_attr_freq1_input.dev_attr.attr,
- &sensor_dev_attr_freq1_label.dev_attr.attr,
- &sensor_dev_attr_freq2_input.dev_attr.attr,
- &sensor_dev_attr_freq2_label.dev_attr.attr,
- NULL
-};
-
-static umode_t hwmon_attributes_visible(struct kobject *kobj,
- struct attribute *attr, int index)
-{
- struct device *dev = kobj_to_dev(kobj);
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- umode_t effective_mode = attr->mode;
-
- /* under multi-vf mode, the hwmon attributes are all not supported */
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return 0;
-
- /* there is no fan under pp one vf mode */
- if (amdgpu_sriov_is_pp_one_vf(adev) &&
- (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
- return 0;
-
- /* Skip fan attributes if fan is not present */
- if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
- return 0;
-
- /* Skip fan attributes on APU */
- if ((adev->flags & AMD_IS_APU) &&
- (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
- return 0;
-
- /* Skip crit temp on APU */
- if ((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ) &&
- (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
- attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
- return 0;
-
- /* Skip limit attributes if DPM is not enabled */
- if (!adev->pm.dpm_enabled &&
- (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
- attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
- return 0;
-
- if (!is_support_sw_smu(adev)) {
- /* mask fan attributes if we have no bindings for this asic to expose */
- if ((!adev->powerplay.pp_funcs->get_fan_speed_percent &&
- attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
- (!adev->powerplay.pp_funcs->get_fan_control_mode &&
- attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
- effective_mode &= ~S_IRUGO;
-
- if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
- attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
- (!adev->powerplay.pp_funcs->set_fan_control_mode &&
- attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
- effective_mode &= ~S_IWUSR;
- }
-
- if (((adev->flags & AMD_IS_APU) ||
- adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */
- adev->family == AMDGPU_FAMILY_KV) && /* not implemented yet */
- (attr == &sensor_dev_attr_power1_average.dev_attr.attr ||
- attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
- attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
- attr == &sensor_dev_attr_power1_cap.dev_attr.attr))
- return 0;
-
- if (!is_support_sw_smu(adev)) {
- /* hide max/min values if we can't both query and manage the fan */
- if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
- !adev->powerplay.pp_funcs->get_fan_speed_percent) &&
- (!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
- !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
- (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
- return 0;
-
- if ((!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
- !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
- (attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
- return 0;
- }
-
- if ((adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */
- adev->family == AMDGPU_FAMILY_KV) && /* not implemented yet */
- (attr == &sensor_dev_attr_in0_input.dev_attr.attr ||
- attr == &sensor_dev_attr_in0_label.dev_attr.attr))
- return 0;
-
- /* only APUs have vddnb */
- if (!(adev->flags & AMD_IS_APU) &&
- (attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
- attr == &sensor_dev_attr_in1_label.dev_attr.attr))
- return 0;
-
- /* no mclk on APUs */
- if ((adev->flags & AMD_IS_APU) &&
- (attr == &sensor_dev_attr_freq2_input.dev_attr.attr ||
- attr == &sensor_dev_attr_freq2_label.dev_attr.attr))
- return 0;
-
- /* only SOC15 dGPUs support hotspot and mem temperatures */
- if (((adev->flags & AMD_IS_APU) ||
- adev->asic_type < CHIP_VEGA10) &&
- (attr == &sensor_dev_attr_temp2_crit.dev_attr.attr ||
- attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr ||
- attr == &sensor_dev_attr_temp3_crit.dev_attr.attr ||
- attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr ||
- attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
- attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
- attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr ||
- attr == &sensor_dev_attr_temp2_input.dev_attr.attr ||
- attr == &sensor_dev_attr_temp3_input.dev_attr.attr ||
- attr == &sensor_dev_attr_temp2_label.dev_attr.attr ||
- attr == &sensor_dev_attr_temp3_label.dev_attr.attr))
- return 0;
-
- return effective_mode;
-}
-
-static const struct attribute_group hwmon_attrgroup = {
- .attrs = hwmon_attributes,
- .is_visible = hwmon_attributes_visible,
-};
-
-static const struct attribute_group *hwmon_groups[] = {
- &hwmon_attrgroup,
- NULL
-};
-
-void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
-{
- struct amdgpu_device *adev =
- container_of(work, struct amdgpu_device,
- pm.dpm.thermal.work);
- /* switch to the thermal state */
- enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
- int temp, size = sizeof(temp);
-
- if (!adev->pm.dpm_enabled)
- return;
-
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
- (void *)&temp, &size)) {
- if (temp < adev->pm.dpm.thermal.min_temp)
- /* switch back the user state */
- dpm_state = adev->pm.dpm.user_state;
- } else {
- if (adev->pm.dpm.thermal.high_to_low)
- /* switch back the user state */
- dpm_state = adev->pm.dpm.user_state;
- }
- mutex_lock(&adev->pm.mutex);
- if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
- adev->pm.dpm.thermal_active = true;
- else
- adev->pm.dpm.thermal_active = false;
- adev->pm.dpm.state = dpm_state;
- mutex_unlock(&adev->pm.mutex);
-
- amdgpu_pm_compute_clocks(adev);
-}
-
-static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
- enum amd_pm_state_type dpm_state)
-{
- int i;
- struct amdgpu_ps *ps;
- u32 ui_class;
- bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
- true : false;
-
- /* check if the vblank period is too short to adjust the mclk */
- if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
- if (amdgpu_dpm_vblank_too_short(adev))
- single_display = false;
- }
-
- /* certain older asics have a separare 3D performance state,
- * so try that first if the user selected performance
- */
- if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
- dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
- /* balanced states don't exist at the moment */
- if (dpm_state == POWER_STATE_TYPE_BALANCED)
- dpm_state = POWER_STATE_TYPE_PERFORMANCE;
-
-restart_search:
- /* Pick the best power state based on current conditions */
- for (i = 0; i < adev->pm.dpm.num_ps; i++) {
- ps = &adev->pm.dpm.ps[i];
- ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
- switch (dpm_state) {
- /* user states */
- case POWER_STATE_TYPE_BATTERY:
- if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
- if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
- if (single_display)
- return ps;
- } else
- return ps;
- }
- break;
- case POWER_STATE_TYPE_BALANCED:
- if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
- if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
- if (single_display)
- return ps;
- } else
- return ps;
- }
- break;
- case POWER_STATE_TYPE_PERFORMANCE:
- if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
- if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
- if (single_display)
- return ps;
- } else
- return ps;
- }
- break;
- /* internal states */
- case POWER_STATE_TYPE_INTERNAL_UVD:
- if (adev->pm.dpm.uvd_ps)
- return adev->pm.dpm.uvd_ps;
- else
- break;
- case POWER_STATE_TYPE_INTERNAL_UVD_SD:
- if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
- return ps;
- break;
- case POWER_STATE_TYPE_INTERNAL_UVD_HD:
- if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
- return ps;
- break;
- case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
- if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
- return ps;
- break;
- case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
- if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
- return ps;
- break;
- case POWER_STATE_TYPE_INTERNAL_BOOT:
- return adev->pm.dpm.boot_ps;
- case POWER_STATE_TYPE_INTERNAL_THERMAL:
- if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
- return ps;
- break;
- case POWER_STATE_TYPE_INTERNAL_ACPI:
- if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
- return ps;
- break;
- case POWER_STATE_TYPE_INTERNAL_ULV:
- if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
- return ps;
- break;
- case POWER_STATE_TYPE_INTERNAL_3DPERF:
- if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
- return ps;
- break;
- default:
- break;
- }
- }
- /* use a fallback state if we didn't match */
- switch (dpm_state) {
- case POWER_STATE_TYPE_INTERNAL_UVD_SD:
- dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
- goto restart_search;
- case POWER_STATE_TYPE_INTERNAL_UVD_HD:
- case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
- case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
- if (adev->pm.dpm.uvd_ps) {
- return adev->pm.dpm.uvd_ps;
- } else {
- dpm_state = POWER_STATE_TYPE_PERFORMANCE;
- goto restart_search;
- }
- case POWER_STATE_TYPE_INTERNAL_THERMAL:
- dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
- goto restart_search;
- case POWER_STATE_TYPE_INTERNAL_ACPI:
- dpm_state = POWER_STATE_TYPE_BATTERY;
- goto restart_search;
- case POWER_STATE_TYPE_BATTERY:
- case POWER_STATE_TYPE_BALANCED:
- case POWER_STATE_TYPE_INTERNAL_3DPERF:
- dpm_state = POWER_STATE_TYPE_PERFORMANCE;
- goto restart_search;
- default:
- break;
- }
-
- return NULL;
-}
-
-static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
-{
- struct amdgpu_ps *ps;
- enum amd_pm_state_type dpm_state;
- int ret;
- bool equal = false;
-
- /* if dpm init failed */
- if (!adev->pm.dpm_enabled)
- return;
-
- if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
- /* add other state override checks here */
- if ((!adev->pm.dpm.thermal_active) &&
- (!adev->pm.dpm.uvd_active))
- adev->pm.dpm.state = adev->pm.dpm.user_state;
- }
- dpm_state = adev->pm.dpm.state;
-
- ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
- if (ps)
- adev->pm.dpm.requested_ps = ps;
- else
- return;
-
- if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) {
- printk("switching from power state:\n");
- amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
- printk("switching to power state:\n");
- amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
- }
-
- /* update whether vce is active */
- ps->vce_active = adev->pm.dpm.vce_active;
- if (adev->powerplay.pp_funcs->display_configuration_changed)
- amdgpu_dpm_display_configuration_changed(adev);
-
- ret = amdgpu_dpm_pre_set_power_state(adev);
- if (ret)
- return;
-
- if (adev->powerplay.pp_funcs->check_state_equal) {
- if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))
- equal = false;
- }
-
- if (equal)
- return;
-
- amdgpu_dpm_set_power_state(adev);
- amdgpu_dpm_post_set_power_state(adev);
-
- adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
- adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
-
- if (adev->powerplay.pp_funcs->force_performance_level) {
- if (adev->pm.dpm.thermal_active) {
- enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
- /* force low perf level for thermal */
- amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW);
- /* save the user's level */
- adev->pm.dpm.forced_level = level;
- } else {
- /* otherwise, user selected level */
- amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
- }
- }
-}
-
-void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
-{
- int ret = 0;
-
- if (adev->family == AMDGPU_FAMILY_SI) {
- mutex_lock(&adev->pm.mutex);
- if (enable) {
- adev->pm.dpm.uvd_active = true;
- adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
- } else {
- adev->pm.dpm.uvd_active = false;
- }
- mutex_unlock(&adev->pm.mutex);
-
- amdgpu_pm_compute_clocks(adev);
- } else {
- ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
- if (ret)
- DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
- enable ? "enable" : "disable", ret);
-
- /* enable/disable Low Memory PState for UVD (4k videos) */
- if (adev->asic_type == CHIP_STONEY &&
- adev->uvd.decode_image_width >= WIDTH_4K) {
- struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
-
- if (hwmgr && hwmgr->hwmgr_func &&
- hwmgr->hwmgr_func->update_nbdpm_pstate)
- hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr,
- !enable,
- true);
- }
- }
-}
-
-void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
-{
- int ret = 0;
-
- if (adev->family == AMDGPU_FAMILY_SI) {
- mutex_lock(&adev->pm.mutex);
- if (enable) {
- adev->pm.dpm.vce_active = true;
- /* XXX select vce level based on ring/task */
- adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
- } else {
- adev->pm.dpm.vce_active = false;
- }
- mutex_unlock(&adev->pm.mutex);
-
- amdgpu_pm_compute_clocks(adev);
- } else {
- ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
- if (ret)
- DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
- enable ? "enable" : "disable", ret);
- }
-}
-
-void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
-{
- int i;
-
- if (adev->powerplay.pp_funcs->print_power_state == NULL)
- return;
-
- for (i = 0; i < adev->pm.dpm.num_ps; i++)
- amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
-
-}
-
-void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
-{
- int ret = 0;
-
- ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
- if (ret)
- DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
- enable ? "enable" : "disable", ret);
-}
-
-int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
-{
- int r;
-
- if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) {
- r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
- if (r) {
- pr_err("smu firmware loading failed\n");
- return r;
- }
- *smu_version = adev->pm.fw_version;
- }
- return 0;
-}
-
-int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
-{
- int ret;
- uint32_t mask = 0;
-
- if (adev->pm.sysfs_initialized)
- return 0;
-
- if (adev->pm.dpm_enabled == 0)
- return 0;
-
- INIT_LIST_HEAD(&adev->pm.pm_attr_list);
-
- adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
- DRIVER_NAME, adev,
- hwmon_groups);
- if (IS_ERR(adev->pm.int_hwmon_dev)) {
- ret = PTR_ERR(adev->pm.int_hwmon_dev);
- dev_err(adev->dev,
- "Unable to register hwmon device: %d\n", ret);
- return ret;
- }
-
- switch (amdgpu_virt_get_sriov_vf_mode(adev)) {
- case SRIOV_VF_MODE_ONE_VF:
- mask = ATTR_FLAG_ONEVF;
- break;
- case SRIOV_VF_MODE_MULTI_VF:
- mask = 0;
- break;
- case SRIOV_VF_MODE_BARE_METAL:
- default:
- mask = ATTR_FLAG_MASK_ALL;
- break;
- }
-
- ret = amdgpu_device_attr_create_groups(adev,
- amdgpu_device_attrs,
- ARRAY_SIZE(amdgpu_device_attrs),
- mask,
- &adev->pm.pm_attr_list);
- if (ret)
- return ret;
-
- adev->pm.sysfs_initialized = true;
-
- return 0;
-}
-
-void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
-{
- if (adev->pm.dpm_enabled == 0)
- return;
-
- if (adev->pm.int_hwmon_dev)
- hwmon_device_unregister(adev->pm.int_hwmon_dev);
-
- amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list);
-}
-
-void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
-{
- int i = 0;
-
- if (!adev->pm.dpm_enabled)
- return;
-
- if (adev->mode_info.num_crtc)
- amdgpu_display_bandwidth_update(adev);
-
- for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
- struct amdgpu_ring *ring = adev->rings[i];
- if (ring && ring->sched.ready)
- amdgpu_fence_wait_empty(ring);
- }
-
- if (is_support_sw_smu(adev)) {
- struct smu_dpm_context *smu_dpm = &adev->smu.smu_dpm;
- smu_handle_task(&adev->smu,
- smu_dpm->dpm_level,
- AMD_PP_TASK_DISPLAY_CONFIG_CHANGE,
- true);
- } else {
- if (adev->powerplay.pp_funcs->dispatch_tasks) {
- if (!amdgpu_device_has_dc_support(adev)) {
- mutex_lock(&adev->pm.mutex);
- amdgpu_dpm_get_active_displays(adev);
- adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
- adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
- adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
- /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */
- if (adev->pm.pm_display_cfg.vrefresh > 120)
- adev->pm.pm_display_cfg.min_vblank_time = 0;
- if (adev->powerplay.pp_funcs->display_configuration_change)
- adev->powerplay.pp_funcs->display_configuration_change(
- adev->powerplay.pp_handle,
- &adev->pm.pm_display_cfg);
- mutex_unlock(&adev->pm.mutex);
- }
- amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
- } else {
- mutex_lock(&adev->pm.mutex);
- amdgpu_dpm_get_active_displays(adev);
- amdgpu_dpm_change_power_state_locked(adev);
- mutex_unlock(&adev->pm.mutex);
- }
- }
-}
-
-/*
- * Debugfs info
- */
-#if defined(CONFIG_DEBUG_FS)
-
-static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
-{
- uint32_t value;
- uint64_t value64;
- uint32_t query = 0;
- int size;
-
- /* GPU Clocks */
- size = sizeof(value);
- seq_printf(m, "GFX Clocks and Power:\n");
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size))
- seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size))
- seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size))
- seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n", value/100);
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size))
- seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n", value/100);
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size))
- seq_printf(m, "\t%u mV (VDDGFX)\n", value);
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
- seq_printf(m, "\t%u mV (VDDNB)\n", value);
- size = sizeof(uint32_t);
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size))
- seq_printf(m, "\t%u.%u W (average GPU)\n", query >> 8, query & 0xff);
- size = sizeof(value);
- seq_printf(m, "\n");
-
- /* GPU Temp */
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size))
- seq_printf(m, "GPU Temperature: %u C\n", value/1000);
-
- /* GPU Load */
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size))
- seq_printf(m, "GPU Load: %u %%\n", value);
- /* MEM Load */
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size))
- seq_printf(m, "MEM Load: %u %%\n", value);
-
- seq_printf(m, "\n");
-
- /* SMC feature mask */
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size))
- seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64);
-
- if (adev->asic_type > CHIP_VEGA20) {
- /* VCN clocks */
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) {
- if (!value) {
- seq_printf(m, "VCN: Disabled\n");
- } else {
- seq_printf(m, "VCN: Enabled\n");
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
- seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
- seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
- }
- }
- seq_printf(m, "\n");
- } else {
- /* UVD clocks */
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
- if (!value) {
- seq_printf(m, "UVD: Disabled\n");
- } else {
- seq_printf(m, "UVD: Enabled\n");
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
- seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
- seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
- }
- }
- seq_printf(m, "\n");
-
- /* VCE clocks */
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
- if (!value) {
- seq_printf(m, "VCE: Disabled\n");
- } else {
- seq_printf(m, "VCE: Enabled\n");
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
- seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
- }
- }
- }
-
- return 0;
-}
-
-static void amdgpu_parse_cg_state(struct seq_file *m, u32 flags)
-{
- int i;
-
- for (i = 0; clocks[i].flag; i++)
- seq_printf(m, "\t%s: %s\n", clocks[i].name,
- (flags & clocks[i].flag) ? "On" : "Off");
-}
-
-static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = dev->dev_private;
- u32 flags = 0;
- int r;
-
- if (adev->in_gpu_reset)
- return -EPERM;
-
- r = pm_runtime_get_sync(dev->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(dev->dev);
- return r;
- }
-
- amdgpu_device_ip_get_clockgating_state(adev, &flags);
- seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags);
- amdgpu_parse_cg_state(m, flags);
- seq_printf(m, "\n");
-
- if (!adev->pm.dpm_enabled) {
- seq_printf(m, "dpm not enabled\n");
- pm_runtime_mark_last_busy(dev->dev);
- pm_runtime_put_autosuspend(dev->dev);
- return 0;
- }
-
- if (!is_support_sw_smu(adev) &&
- adev->powerplay.pp_funcs->debugfs_print_current_performance_level) {
- mutex_lock(&adev->pm.mutex);
- if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level)
- adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m);
- else
- seq_printf(m, "Debugfs support not implemented for this asic\n");
- mutex_unlock(&adev->pm.mutex);
- r = 0;
- } else {
- r = amdgpu_debugfs_pm_info_pp(m, adev);
- }
-
- pm_runtime_mark_last_busy(dev->dev);
- pm_runtime_put_autosuspend(dev->dev);
-
- return r;
-}
-
-static const struct drm_info_list amdgpu_pm_info_list[] = {
- {"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL},
-};
-#endif
-
-int amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
-{
-#if defined(CONFIG_DEBUG_FS)
- return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list));
-#else
- return 0;
-#endif
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
deleted file mode 100644
index d9ae2b49a402..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef __AMDGPU_PM_H__
-#define __AMDGPU_PM_H__
-
-struct cg_flag_name
-{
- u32 flag;
- const char *name;
-};
-
-enum amdgpu_device_attr_flags {
- ATTR_FLAG_BASIC = (1 << 0),
- ATTR_FLAG_ONEVF = (1 << 16),
-};
-
-#define ATTR_FLAG_TYPE_MASK (0x0000ffff)
-#define ATTR_FLAG_MODE_MASK (0xffff0000)
-#define ATTR_FLAG_MASK_ALL (0xffffffff)
-
-enum amdgpu_device_attr_states {
- ATTR_STATE_UNSUPPORTED = 0,
- ATTR_STATE_SUPPORTED,
-};
-
-struct amdgpu_device_attr {
- struct device_attribute dev_attr;
- enum amdgpu_device_attr_flags flags;
- int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
- uint32_t mask, enum amdgpu_device_attr_states *states);
-
-};
-
-struct amdgpu_device_attr_entry {
- struct list_head entry;
- struct amdgpu_device_attr *attr;
-};
-
-#define to_amdgpu_device_attr(_dev_attr) \
- container_of(_dev_attr, struct amdgpu_device_attr, dev_attr)
-
-#define __AMDGPU_DEVICE_ATTR(_name, _mode, _show, _store, _flags, ...) \
- { .dev_attr = __ATTR(_name, _mode, _show, _store), \
- .flags = _flags, \
- ##__VA_ARGS__, }
-
-#define AMDGPU_DEVICE_ATTR(_name, _mode, _flags, ...) \
- __AMDGPU_DEVICE_ATTR(_name, _mode, \
- amdgpu_get_##_name, amdgpu_set_##_name, \
- _flags, ##__VA_ARGS__)
-
-#define AMDGPU_DEVICE_ATTR_RW(_name, _flags, ...) \
- AMDGPU_DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
- _flags, ##__VA_ARGS__)
-
-#define AMDGPU_DEVICE_ATTR_RO(_name, _flags, ...) \
- __AMDGPU_DEVICE_ATTR(_name, S_IRUGO, \
- amdgpu_get_##_name, NULL, \
- _flags, ##__VA_ARGS__)
-
-void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev);
-int amdgpu_pm_sysfs_init(struct amdgpu_device *adev);
-int amdgpu_pm_virt_sysfs_init(struct amdgpu_device *adev);
-void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev);
-void amdgpu_pm_virt_sysfs_fini(struct amdgpu_device *adev);
-void amdgpu_pm_print_power_states(struct amdgpu_device *adev);
-int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version);
-void amdgpu_pm_compute_clocks(struct amdgpu_device *adev);
-void amdgpu_dpm_thermal_work_handler(struct work_struct *work);
-void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable);
-void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable);
-void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable);
-
-int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
-
-#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
index 1311d6aec5d4..69af462db34d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
@@ -226,7 +226,7 @@ static int init_pmu_by_type(struct amdgpu_device *adev,
pmu_entry->pmu.attr_groups = attr_groups;
pmu_entry->pmu_perf_type = pmu_perf_type;
snprintf(pmu_name, PMU_NAME_SIZE, "%s_%d",
- pmu_file_prefix, adev->ddev->primary->index);
+ pmu_file_prefix, adev_to_drm(adev)->primary->index);
ret = perf_pmu_register(&pmu_entry->pmu, pmu_name, -1);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 7fe564275457..a6dbe4b83533 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -161,10 +161,12 @@ static int psp_sw_init(void *handle)
struct psp_context *psp = &adev->psp;
int ret;
- ret = psp_init_microcode(psp);
- if (ret) {
- DRM_ERROR("Failed to load psp firmware!\n");
- return ret;
+ if (!amdgpu_sriov_vf(adev)) {
+ ret = psp_init_microcode(psp);
+ if (ret) {
+ DRM_ERROR("Failed to load psp firmware!\n");
+ return ret;
+ }
}
ret = psp_memory_training_init(psp);
@@ -178,7 +180,7 @@ static int psp_sw_init(void *handle)
return ret;
}
- if (adev->asic_type == CHIP_NAVI10) {
+ if (adev->asic_type == CHIP_NAVI10 || adev->asic_type == CHIP_SIENNA_CICHLID) {
ret= psp_sysfs_init(adev);
if (ret) {
return ret;
@@ -206,7 +208,8 @@ static int psp_sw_fini(void *handle)
adev->psp.ta_fw = NULL;
}
- if (adev->asic_type == CHIP_NAVI10)
+ if (adev->asic_type == CHIP_NAVI10 ||
+ adev->asic_type == CHIP_SIENNA_CICHLID)
psp_sysfs_fini(adev);
return 0;
@@ -219,6 +222,9 @@ int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
int i;
struct amdgpu_device *adev = psp->adev;
+ if (psp->adev->in_pci_err_recovery)
+ return 0;
+
for (i = 0; i < adev->usec_timeout; i++) {
val = RREG32(reg_index);
if (check_changed) {
@@ -245,6 +251,9 @@ psp_cmd_submit_buf(struct psp_context *psp,
bool ras_intr = false;
bool skip_unsupport = false;
+ if (psp->adev->in_pci_err_recovery)
+ return 0;
+
mutex_lock(&psp->mutex);
memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
@@ -522,8 +531,7 @@ static int psp_asd_load(struct psp_context *psp)
* add workaround to bypass it for sriov now.
* TODO: add version check to make it common
*/
- if (amdgpu_sriov_vf(psp->adev) ||
- (psp->adev->asic_type == CHIP_NAVY_FLOUNDER))
+ if (amdgpu_sriov_vf(psp->adev) || !psp->asd_fw)
return 0;
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
@@ -930,6 +938,7 @@ static int psp_ras_load(struct psp_context *psp)
{
int ret;
struct psp_gfx_cmd_resp *cmd;
+ struct ta_ras_shared_memory *ras_cmd;
/*
* TODO: bypass the loading in sriov for now
@@ -953,11 +962,20 @@ static int psp_ras_load(struct psp_context *psp)
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr);
+ ras_cmd = (struct ta_ras_shared_memory*)psp->ras.ras_shared_buf;
+
if (!ret) {
- psp->ras.ras_initialized = true;
psp->ras.session_id = cmd->resp.session_id;
+
+ if (!ras_cmd->ras_status)
+ psp->ras.ras_initialized = true;
+ else
+ dev_warn(psp->adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status);
}
+ if (ret || ras_cmd->ras_status)
+ amdgpu_ras_fini(psp->adev);
+
kfree(cmd);
return ret;
@@ -1430,6 +1448,168 @@ static int psp_dtm_terminate(struct psp_context *psp)
}
// DTM end
+// RAP start
+static int psp_rap_init_shared_buf(struct psp_context *psp)
+{
+ int ret;
+
+ /*
+ * Allocate 16k memory aligned to 4k from Frame Buffer (local
+ * physical) for rap ta <-> Driver
+ */
+ ret = amdgpu_bo_create_kernel(psp->adev, PSP_RAP_SHARED_MEM_SIZE,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+ &psp->rap_context.rap_shared_bo,
+ &psp->rap_context.rap_shared_mc_addr,
+ &psp->rap_context.rap_shared_buf);
+
+ return ret;
+}
+
+static int psp_rap_load(struct psp_context *psp)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ memset(psp->fw_pri_buf, 0, PSP_1_MEG);
+ memcpy(psp->fw_pri_buf, psp->ta_rap_start_addr, psp->ta_rap_ucode_size);
+
+ psp_prep_ta_load_cmd_buf(cmd,
+ psp->fw_pri_mc_addr,
+ psp->ta_rap_ucode_size,
+ psp->rap_context.rap_shared_mc_addr,
+ PSP_RAP_SHARED_MEM_SIZE);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+
+ if (!ret) {
+ psp->rap_context.rap_initialized = true;
+ psp->rap_context.session_id = cmd->resp.session_id;
+ mutex_init(&psp->rap_context.mutex);
+ }
+
+ kfree(cmd);
+
+ return ret;
+}
+
+static int psp_rap_unload(struct psp_context *psp)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ psp_prep_ta_unload_cmd_buf(cmd, psp->rap_context.session_id);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+
+ kfree(cmd);
+
+ return ret;
+}
+
+static int psp_rap_initialize(struct psp_context *psp)
+{
+ int ret;
+
+ /*
+ * TODO: bypass the initialize in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ if (!psp->adev->psp.ta_rap_ucode_size ||
+ !psp->adev->psp.ta_rap_start_addr) {
+ dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n");
+ return 0;
+ }
+
+ if (!psp->rap_context.rap_initialized) {
+ ret = psp_rap_init_shared_buf(psp);
+ if (ret)
+ return ret;
+ }
+
+ ret = psp_rap_load(psp);
+ if (ret)
+ return ret;
+
+ ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE);
+ if (ret != TA_RAP_STATUS__SUCCESS) {
+ psp_rap_unload(psp);
+
+ amdgpu_bo_free_kernel(&psp->rap_context.rap_shared_bo,
+ &psp->rap_context.rap_shared_mc_addr,
+ &psp->rap_context.rap_shared_buf);
+
+ psp->rap_context.rap_initialized = false;
+
+ dev_warn(psp->adev->dev, "RAP TA initialize fail.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int psp_rap_terminate(struct psp_context *psp)
+{
+ int ret;
+
+ if (!psp->rap_context.rap_initialized)
+ return 0;
+
+ ret = psp_rap_unload(psp);
+
+ psp->rap_context.rap_initialized = false;
+
+ /* free rap shared memory */
+ amdgpu_bo_free_kernel(&psp->rap_context.rap_shared_bo,
+ &psp->rap_context.rap_shared_mc_addr,
+ &psp->rap_context.rap_shared_buf);
+
+ return ret;
+}
+
+int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
+{
+ struct ta_rap_shared_memory *rap_cmd;
+ int ret;
+
+ if (!psp->rap_context.rap_initialized)
+ return -EINVAL;
+
+ if (ta_cmd_id != TA_CMD_RAP__INITIALIZE &&
+ ta_cmd_id != TA_CMD_RAP__VALIDATE_L0)
+ return -EINVAL;
+
+ mutex_lock(&psp->rap_context.mutex);
+
+ rap_cmd = (struct ta_rap_shared_memory *)
+ psp->rap_context.rap_shared_buf;
+ memset(rap_cmd, 0, sizeof(struct ta_rap_shared_memory));
+
+ rap_cmd->cmd_id = ta_cmd_id;
+ rap_cmd->validation_method_id = METHOD_A;
+
+ ret = psp_ta_invoke(psp, rap_cmd->cmd_id, psp->rap_context.session_id);
+ if (ret) {
+ mutex_unlock(&psp->rap_context.mutex);
+ return ret;
+ }
+
+ mutex_unlock(&psp->rap_context.mutex);
+
+ return rap_cmd->rap_status;
+}
+// RAP end
+
static int psp_hw_start(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -1571,6 +1751,12 @@ static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
*type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM;
break;
+ case AMDGPU_UCODE_ID_RLC_IRAM:
+ *type = GFX_FW_TYPE_RLC_IRAM;
+ break;
+ case AMDGPU_UCODE_ID_RLC_DRAM:
+ *type = GFX_FW_TYPE_RLC_DRAM_BOOT;
+ break;
case AMDGPU_UCODE_ID_SMC:
*type = GFX_FW_TYPE_SMU;
break;
@@ -1707,7 +1893,7 @@ static int psp_load_smu_fw(struct psp_context *psp)
return 0;
- if (adev->in_gpu_reset && ras && ras->supported) {
+ if (amdgpu_in_reset(adev) && ras && ras->supported) {
ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD);
if (ret) {
DRM_WARN("Failed to set MP1 state prepare for reload\n");
@@ -1822,7 +2008,7 @@ static int psp_load_fw(struct amdgpu_device *adev)
int ret;
struct psp_context *psp = &adev->psp;
- if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset) {
+ if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
psp_ring_stop(psp, PSP_RING_TYPE__KM); /* should not destroy ring, only stop */
goto skip_memalloc;
}
@@ -1892,6 +2078,11 @@ skip_memalloc:
if (ret)
dev_err(psp->adev->dev,
"DTM: Failed to initialize DTM\n");
+
+ ret = psp_rap_initialize(psp);
+ if (ret)
+ dev_err(psp->adev->dev,
+ "RAP: Failed to initialize RAP\n");
}
return 0;
@@ -1942,6 +2133,7 @@ static int psp_hw_fini(void *handle)
if (psp->adev->psp.ta_fw) {
psp_ras_terminate(psp);
+ psp_rap_terminate(psp);
psp_dtm_terminate(psp);
psp_hdcp_terminate(psp);
}
@@ -2000,6 +2192,11 @@ static int psp_suspend(void *handle)
DRM_ERROR("Failed to terminate dtm ta\n");
return ret;
}
+ ret = psp_rap_terminate(psp);
+ if (ret) {
+ DRM_ERROR("Failed to terminate rap ta\n");
+ return ret;
+ }
}
ret = psp_asd_unload(psp);
@@ -2078,6 +2275,11 @@ static int psp_resume(void *handle)
if (ret)
dev_err(psp->adev->dev,
"DTM: Failed to initialize DTM\n");
+
+ ret = psp_rap_initialize(psp);
+ if (ret)
+ dev_err(psp->adev->dev,
+ "RAP: Failed to initialize RAP\n");
}
mutex_unlock(&adev->firmware.mutex);
@@ -2322,6 +2524,7 @@ int parse_ta_bin_descriptor(struct psp_context *psp,
psp->asd_feature_version = le32_to_cpu(desc->fw_version);
psp->asd_ucode_size = le32_to_cpu(desc->size_bytes);
psp->asd_start_addr = ucode_start_addr;
+ psp->asd_fw = psp->ta_fw;
break;
case TA_FW_TYPE_PSP_XGMI:
psp->ta_xgmi_ucode_version = le32_to_cpu(desc->fw_version);
@@ -2343,6 +2546,11 @@ int parse_ta_bin_descriptor(struct psp_context *psp,
psp->ta_dtm_ucode_size = le32_to_cpu(desc->size_bytes);
psp->ta_dtm_start_addr = ucode_start_addr;
break;
+ case TA_FW_TYPE_PSP_RAP:
+ psp->ta_rap_ucode_version = le32_to_cpu(desc->fw_version);
+ psp->ta_rap_ucode_size = le32_to_cpu(desc->size_bytes);
+ psp->ta_rap_start_addr = ucode_start_addr;
+ break;
default:
dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type);
break;
@@ -2421,7 +2629,7 @@ static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
uint32_t fw_ver;
int ret;
@@ -2448,7 +2656,7 @@ static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
size_t count)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
void *cpu_addr;
dma_addr_t dma_addr;
int ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 623888bf30cb..919d2fb7427b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -29,6 +29,7 @@
#include "psp_gfx_if.h"
#include "ta_xgmi_if.h"
#include "ta_ras_if.h"
+#include "ta_rap_if.h"
#define PSP_FENCE_BUFFER_SIZE 0x1000
#define PSP_CMD_BUFFER_SIZE 0x1000
@@ -38,6 +39,7 @@
#define PSP_TMR_SIZE 0x400000
#define PSP_HDCP_SHARED_MEM_SIZE 0x4000
#define PSP_DTM_SHARED_MEM_SIZE 0x4000
+#define PSP_RAP_SHARED_MEM_SIZE 0x4000
#define PSP_SHARED_MEM_SIZE 0x4000
struct psp_context;
@@ -159,6 +161,15 @@ struct psp_dtm_context {
struct mutex mutex;
};
+struct psp_rap_context {
+ bool rap_initialized;
+ uint32_t session_id;
+ struct amdgpu_bo *rap_shared_bo;
+ uint64_t rap_shared_mc_addr;
+ void *rap_shared_buf;
+ struct mutex mutex;
+};
+
#define MEM_TRAIN_SYSTEM_SIGNATURE 0x54534942
#define GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES 0x1000
#define GDDR6_MEM_TRAINING_OFFSET 0x8000
@@ -277,11 +288,16 @@ struct psp_context
uint32_t ta_dtm_ucode_size;
uint8_t *ta_dtm_start_addr;
+ uint32_t ta_rap_ucode_version;
+ uint32_t ta_rap_ucode_size;
+ uint8_t *ta_rap_start_addr;
+
struct psp_asd_context asd_context;
struct psp_xgmi_context xgmi_context;
struct psp_ras_context ras;
struct psp_hdcp_context hdcp_context;
struct psp_dtm_context dtm_context;
+ struct psp_rap_context rap_context;
struct mutex mutex;
struct psp_memory_training_context mem_train_ctx;
};
@@ -357,6 +373,7 @@ int psp_ras_trigger_error(struct psp_context *psp,
int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
+int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_rlc_autoload_start(struct psp_context *psp);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c
new file mode 100644
index 000000000000..8da5356c36f1
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c
@@ -0,0 +1,127 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/pm_runtime.h>
+
+#include "amdgpu.h"
+#include "amdgpu_rap.h"
+
+/**
+ * DOC: AMDGPU RAP debugfs test interface
+ *
+ * how to use?
+ * echo opcode > <debugfs_dir>/dri/xxx/rap_test
+ *
+ * opcode:
+ * currently, only 2 is supported by Linux host driver,
+ * opcode 2 stands for TA_CMD_RAP__VALIDATE_L0, used to
+ * trigger L0 policy validation, you can refer more detail
+ * from header file ta_rap_if.h
+ *
+ */
+static ssize_t amdgpu_rap_debugfs_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
+ struct ta_rap_shared_memory *rap_shared_mem;
+ struct ta_rap_cmd_output_data *rap_cmd_output;
+ struct drm_device *dev = adev_to_drm(adev);
+ uint32_t op;
+ int ret;
+
+ if (*pos || size != 2)
+ return -EINVAL;
+
+ ret = kstrtouint_from_user(buf, size, *pos, &op);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_get_sync(dev->dev);
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(dev->dev);
+ return ret;
+ }
+
+ /* make sure gfx core is on, RAP TA cann't handle
+ * GFX OFF case currently.
+ */
+ amdgpu_gfx_off_ctrl(adev, false);
+
+ switch (op) {
+ case 2:
+ ret = psp_rap_invoke(&adev->psp, op);
+
+ if (ret == TA_RAP_STATUS__SUCCESS) {
+ dev_info(adev->dev, "RAP L0 validate test success.\n");
+ } else {
+ rap_shared_mem = (struct ta_rap_shared_memory *)
+ adev->psp.rap_context.rap_shared_buf;
+ rap_cmd_output = &(rap_shared_mem->rap_out_message.output);
+
+ dev_info(adev->dev, "RAP test failed, the output is:\n");
+ dev_info(adev->dev, "\tlast_subsection: 0x%08x.\n",
+ rap_cmd_output->last_subsection);
+ dev_info(adev->dev, "\tnum_total_validate: 0x%08x.\n",
+ rap_cmd_output->num_total_validate);
+ dev_info(adev->dev, "\tnum_valid: 0x%08x.\n",
+ rap_cmd_output->num_valid);
+ dev_info(adev->dev, "\tlast_validate_addr: 0x%08x.\n",
+ rap_cmd_output->last_validate_addr);
+ dev_info(adev->dev, "\tlast_validate_val: 0x%08x.\n",
+ rap_cmd_output->last_validate_val);
+ dev_info(adev->dev, "\tlast_validate_val_exptd: 0x%08x.\n",
+ rap_cmd_output->last_validate_val_exptd);
+ }
+ break;
+ default:
+ dev_info(adev->dev, "Unsupported op id: %d, ", op);
+ dev_info(adev->dev, "Only support op 2(L0 validate test).\n");
+ }
+
+ amdgpu_gfx_off_ctrl(adev, true);
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
+
+ return size;
+}
+
+static const struct file_operations amdgpu_rap_debugfs_ops = {
+ .owner = THIS_MODULE,
+ .read = NULL,
+ .write = amdgpu_rap_debugfs_write,
+ .llseek = default_llseek
+};
+
+void amdgpu_rap_debugfs_init(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+
+ if (!adev->psp.rap_context.rap_initialized)
+ return;
+
+ debugfs_create_file("rap_test", S_IWUSR, minor->debugfs_root,
+ adev, &amdgpu_rap_debugfs_ops);
+#endif
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.h
index 2fcc4b60153c..ec6d7632d3a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2014 Advanced Micro Devices, Inc.
+ * Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -19,11 +19,12 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
+ *
*/
+#ifndef _AMDGPU_RAP_H
+#define _AMDGPU_RAP_H
-#ifndef __CIK_DPM_H__
-#define __CIK_DPM_H__
-
-extern const struct amdgpu_ip_block_version kv_smu_ip_block;
+#include "amdgpu.h"
+void amdgpu_rap_debugfs_init(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index bcce4c0be462..4e36551ab50b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -34,6 +34,8 @@
#include "amdgpu_xgmi.h"
#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
+static const char *RAS_FS_NAME = "ras";
+
const char *ras_error_string[] = {
"none",
"parity",
@@ -62,13 +64,14 @@ const char *ras_block_string[] = {
#define ras_err_str(i) (ras_error_string[ffs(i)])
#define ras_block_str(i) (ras_block_string[i])
-#define AMDGPU_RAS_FLAG_INIT_BY_VBIOS 1
-#define AMDGPU_RAS_FLAG_INIT_NEED_RESET 2
#define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
/* inject address is 52 bits */
#define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52)
+/* typical ECC bad page rate(1 bad page per 100MB VRAM) */
+#define RAS_BAD_PAGE_RATE (100 * 1024 * 1024ULL)
+
enum amdgpu_ras_retire_page_reservation {
AMDGPU_RAS_RETIRE_PAGE_RESERVED,
AMDGPU_RAS_RETIRE_PAGE_PENDING,
@@ -367,12 +370,19 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f, const char __user *buf,
size_t size, loff_t *pos)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
+ struct amdgpu_device *adev =
+ (struct amdgpu_device *)file_inode(f)->i_private;
int ret;
- ret = amdgpu_ras_eeprom_reset_table(&adev->psp.ras.ras->eeprom_control);
+ ret = amdgpu_ras_eeprom_reset_table(
+ &(amdgpu_ras_get_context(adev)->eeprom_control));
- return ret == 1 ? size : -EIO;
+ if (ret == 1) {
+ amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS;
+ return size;
+ } else {
+ return -EIO;
+ }
}
static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
@@ -1017,45 +1027,13 @@ static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
return scnprintf(buf, PAGE_SIZE, "feature mask: 0x%x\n", con->features);
}
-static int amdgpu_ras_sysfs_create_feature_node(struct amdgpu_device *adev)
+static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- struct attribute *attrs[] = {
- &con->features_attr.attr,
- NULL
- };
- struct bin_attribute *bin_attrs[] = {
- &con->badpages_attr,
- NULL
- };
- struct attribute_group group = {
- .name = "ras",
- .attrs = attrs,
- .bin_attrs = bin_attrs,
- };
- con->features_attr = (struct device_attribute) {
- .attr = {
- .name = "features",
- .mode = S_IRUGO,
- },
- .show = amdgpu_ras_sysfs_features_read,
- };
-
- con->badpages_attr = (struct bin_attribute) {
- .attr = {
- .name = "gpu_vram_bad_pages",
- .mode = S_IRUGO,
- },
- .size = 0,
- .private = NULL,
- .read = amdgpu_ras_sysfs_badpages_read,
- };
-
- sysfs_attr_init(attrs[0]);
- sysfs_bin_attr_init(bin_attrs[0]);
-
- return sysfs_create_group(&adev->dev->kobj, &group);
+ sysfs_remove_file_from_group(&adev->dev->kobj,
+ &con->badpages_attr.attr,
+ RAS_FS_NAME);
}
static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev)
@@ -1065,14 +1043,9 @@ static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev)
&con->features_attr.attr,
NULL
};
- struct bin_attribute *bin_attrs[] = {
- &con->badpages_attr,
- NULL
- };
struct attribute_group group = {
- .name = "ras",
+ .name = RAS_FS_NAME,
.attrs = attrs,
- .bin_attrs = bin_attrs,
};
sysfs_remove_group(&adev->dev->kobj, &group);
@@ -1105,7 +1078,7 @@ int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
if (sysfs_add_file_to_group(&adev->dev->kobj,
&obj->sysfs_attr.attr,
- "ras")) {
+ RAS_FS_NAME)) {
put_obj(obj);
return -EINVAL;
}
@@ -1125,7 +1098,7 @@ int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
sysfs_remove_file_from_group(&adev->dev->kobj,
&obj->sysfs_attr.attr,
- "ras");
+ RAS_FS_NAME);
obj->attr_inuse = 0;
put_obj(obj);
@@ -1141,6 +1114,9 @@ static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
amdgpu_ras_sysfs_remove(adev, &obj->head);
}
+ if (amdgpu_bad_page_threshold != 0)
+ amdgpu_ras_sysfs_remove_bad_page_node(adev);
+
amdgpu_ras_sysfs_remove_feature_node(adev);
return 0;
@@ -1169,9 +1145,9 @@ static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
static void amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- struct drm_minor *minor = adev->ddev->primary;
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
- con->dir = debugfs_create_dir("ras", minor->debugfs_root);
+ con->dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root);
debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, con->dir,
adev, &amdgpu_ras_debugfs_ctrl_ops);
debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, con->dir,
@@ -1187,6 +1163,13 @@ static void amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
*/
debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, con->dir,
&con->reboot);
+
+ /*
+ * User could set this not to clean up hardware's error count register
+ * of RAS IPs during ras recovery.
+ */
+ debugfs_create_bool("disable_ras_err_cnt_harvest", 0644,
+ con->dir, &con->disable_ras_err_cnt_harvest);
}
void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
@@ -1211,6 +1194,7 @@ void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
{
+#if defined(CONFIG_DEBUG_FS)
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_manager *obj;
struct ras_fs_if fs_info;
@@ -1233,6 +1217,7 @@ void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
amdgpu_ras_debugfs_create(adev, &fs_info);
}
}
+#endif
}
void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
@@ -1243,13 +1228,13 @@ void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
if (!obj || !obj->ent)
return;
- debugfs_remove(obj->ent);
obj->ent = NULL;
put_obj(obj);
}
static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev)
{
+#if defined(CONFIG_DEBUG_FS)
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_manager *obj, *tmp;
@@ -1257,16 +1242,49 @@ static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev)
amdgpu_ras_debugfs_remove(adev, &obj->head);
}
- debugfs_remove_recursive(con->dir);
con->dir = NULL;
+#endif
}
/* debugfs end */
/* ras fs */
-
+static BIN_ATTR(gpu_vram_bad_pages, S_IRUGO,
+ amdgpu_ras_sysfs_badpages_read, NULL, 0);
+static DEVICE_ATTR(features, S_IRUGO,
+ amdgpu_ras_sysfs_features_read, NULL);
static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
{
- amdgpu_ras_sysfs_create_feature_node(adev);
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct attribute_group group = {
+ .name = RAS_FS_NAME,
+ };
+ struct attribute *attrs[] = {
+ &con->features_attr.attr,
+ NULL
+ };
+ struct bin_attribute *bin_attrs[] = {
+ NULL,
+ NULL,
+ };
+ int r;
+
+ /* add features entry */
+ con->features_attr = dev_attr_features;
+ group.attrs = attrs;
+ sysfs_attr_init(attrs[0]);
+
+ if (amdgpu_bad_page_threshold != 0) {
+ /* add bad_page_features entry */
+ bin_attr_gpu_vram_bad_pages.private = NULL;
+ con->badpages_attr = bin_attr_gpu_vram_bad_pages;
+ bin_attrs[0] = &con->badpages_attr;
+ group.bin_attrs = bin_attrs;
+ sysfs_bin_attr_init(bin_attrs[0]);
+ }
+
+ r = sysfs_create_group(&adev->dev->kobj, &group);
+ if (r)
+ dev_err(adev->dev, "Failed to create RAS sysfs group!");
return 0;
}
@@ -1458,6 +1476,45 @@ static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
}
}
+/* Parse RdRspStatus and WrRspStatus */
+void amdgpu_ras_error_status_query(struct amdgpu_device *adev,
+ struct ras_query_if *info)
+{
+ /*
+ * Only two block need to query read/write
+ * RspStatus at current state
+ */
+ switch (info->head.block) {
+ case AMDGPU_RAS_BLOCK__GFX:
+ if (adev->gfx.funcs->query_ras_error_status)
+ adev->gfx.funcs->query_ras_error_status(adev);
+ break;
+ case AMDGPU_RAS_BLOCK__MMHUB:
+ if (adev->mmhub.funcs->query_ras_error_status)
+ adev->mmhub.funcs->query_ras_error_status(adev);
+ break;
+ default:
+ break;
+ }
+}
+
+static void amdgpu_ras_query_err_status(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *obj;
+
+ if (!con)
+ return;
+
+ list_for_each_entry(obj, &con->head, node) {
+ struct ras_query_if info = {
+ .head = obj->head,
+ };
+
+ amdgpu_ras_error_status_query(adev, &info);
+ }
+}
+
/* recovery begin */
/* return 0 on success.
@@ -1514,23 +1571,30 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
struct amdgpu_device *remote_adev = NULL;
struct amdgpu_device *adev = ras->adev;
struct list_head device_list, *device_list_handle = NULL;
- struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, false);
-
- /* Build list of devices to query RAS related errors */
- if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
- device_list_handle = &hive->device_list;
- else {
- INIT_LIST_HEAD(&device_list);
- list_add_tail(&adev->gmc.xgmi.head, &device_list);
- device_list_handle = &device_list;
- }
- list_for_each_entry(remote_adev, device_list_handle, gmc.xgmi.head) {
- amdgpu_ras_log_on_err_counter(remote_adev);
+ if (!ras->disable_ras_err_cnt_harvest) {
+ struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
+
+ /* Build list of devices to query RAS related errors */
+ if (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
+ device_list_handle = &hive->device_list;
+ } else {
+ INIT_LIST_HEAD(&device_list);
+ list_add_tail(&adev->gmc.xgmi.head, &device_list);
+ device_list_handle = &device_list;
+ }
+
+ list_for_each_entry(remote_adev,
+ device_list_handle, gmc.xgmi.head) {
+ amdgpu_ras_query_err_status(remote_adev);
+ amdgpu_ras_log_on_err_counter(remote_adev);
+ }
+
+ amdgpu_put_xgmi_hive(hive);
}
if (amdgpu_device_should_recover_gpu(ras->adev))
- amdgpu_device_gpu_recover(ras->adev, 0);
+ amdgpu_device_gpu_recover(ras->adev, NULL);
atomic_set(&ras->in_recovery, 0);
}
@@ -1645,7 +1709,7 @@ static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
int ret = 0;
/* no bad page record, skip eeprom access */
- if (!control->num_recs)
+ if (!control->num_recs || (amdgpu_bad_page_threshold == 0))
return ret;
bps = kcalloc(control->num_recs, sizeof(*bps), GFP_KERNEL);
@@ -1699,6 +1763,47 @@ out:
return ret;
}
+static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
+ uint32_t max_length)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ int tmp_threshold = amdgpu_bad_page_threshold;
+ u64 val;
+
+ /*
+ * Justification of value bad_page_cnt_threshold in ras structure
+ *
+ * Generally, -1 <= amdgpu_bad_page_threshold <= max record length
+ * in eeprom, and introduce two scenarios accordingly.
+ *
+ * Bad page retirement enablement:
+ * - If amdgpu_bad_page_threshold = -1,
+ * bad_page_cnt_threshold = typical value by formula.
+ *
+ * - When the value from user is 0 < amdgpu_bad_page_threshold <
+ * max record length in eeprom, use it directly.
+ *
+ * Bad page retirement disablement:
+ * - If amdgpu_bad_page_threshold = 0, bad page retirement
+ * functionality is disabled, and bad_page_cnt_threshold will
+ * take no effect.
+ */
+
+ if (tmp_threshold < -1)
+ tmp_threshold = -1;
+ else if (tmp_threshold > max_length)
+ tmp_threshold = max_length;
+
+ if (tmp_threshold == -1) {
+ val = adev->gmc.mc_vram_size;
+ do_div(val, RAS_BAD_PAGE_RATE);
+ con->bad_page_cnt_threshold = min(lower_32_bits(val),
+ max_length);
+ } else {
+ con->bad_page_cnt_threshold = tmp_threshold;
+ }
+}
+
/* called in gpu recovery/init */
int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev)
{
@@ -1708,7 +1813,8 @@ int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev)
struct amdgpu_bo *bo = NULL;
int i, ret = 0;
- if (!con || !con->eh_data)
+ /* Not reserve bad page when amdgpu_bad_page_threshold == 0. */
+ if (!con || !con->eh_data || (amdgpu_bad_page_threshold == 0))
return 0;
mutex_lock(&con->recovery_lock);
@@ -1776,6 +1882,8 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_err_handler_data **data;
+ uint32_t max_eeprom_records_len = 0;
+ bool exc_err_limit = false;
int ret;
if (con)
@@ -1794,8 +1902,15 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
atomic_set(&con->in_recovery, 0);
con->adev = adev;
- ret = amdgpu_ras_eeprom_init(&con->eeprom_control);
- if (ret)
+ max_eeprom_records_len = amdgpu_ras_eeprom_get_record_max_length();
+ amdgpu_ras_validate_threshold(adev, max_eeprom_records_len);
+
+ ret = amdgpu_ras_eeprom_init(&con->eeprom_control, &exc_err_limit);
+ /*
+ * This calling fails when exc_err_limit is true or
+ * ret != 0.
+ */
+ if (exc_err_limit || ret)
goto free;
if (con->eeprom_control.num_recs) {
@@ -1819,6 +1934,15 @@ free:
out:
dev_warn(adev->dev, "Failed to initialize ras recovery!\n");
+ /*
+ * Except error threshold exceeding case, other failure cases in this
+ * function would not fail amdgpu driver init.
+ */
+ if (!exc_err_limit)
+ ret = 0;
+ else
+ ret = -EINVAL;
+
return ret;
}
@@ -1858,6 +1982,17 @@ int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev,
return 0;
}
+static int amdgpu_ras_check_asic_type(struct amdgpu_device *adev)
+{
+ if (adev->asic_type != CHIP_VEGA10 &&
+ adev->asic_type != CHIP_VEGA20 &&
+ adev->asic_type != CHIP_ARCTURUS &&
+ adev->asic_type != CHIP_SIENNA_CICHLID)
+ return 1;
+ else
+ return 0;
+}
+
/*
* check hardware's ras ability which will be saved in hw_supported.
* if hardware does not support ras, we can skip some ras initializtion and
@@ -1874,8 +2009,7 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev,
*supported = 0;
if (amdgpu_sriov_vf(adev) || !adev->is_atom_fw ||
- (adev->asic_type != CHIP_VEGA20 &&
- adev->asic_type != CHIP_ARCTURUS))
+ amdgpu_ras_check_asic_type(adev))
return;
if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
@@ -1897,6 +2031,7 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev,
*supported = amdgpu_ras_enable == 0 ?
0 : *hw_supported & amdgpu_ras_mask;
+ adev->ras_features = *supported;
}
int amdgpu_ras_init(struct amdgpu_device *adev)
@@ -1919,9 +2054,9 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
amdgpu_ras_check_supported(adev, &con->hw_supported,
&con->supported);
- if (!con->hw_supported) {
+ if (!con->hw_supported || (adev->asic_type == CHIP_VEGA10)) {
r = 0;
- goto err_out;
+ goto release_con;
}
con->features = 0;
@@ -1932,25 +2067,25 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
if (adev->nbio.funcs->init_ras_controller_interrupt) {
r = adev->nbio.funcs->init_ras_controller_interrupt(adev);
if (r)
- goto err_out;
+ goto release_con;
}
if (adev->nbio.funcs->init_ras_err_event_athub_interrupt) {
r = adev->nbio.funcs->init_ras_err_event_athub_interrupt(adev);
if (r)
- goto err_out;
+ goto release_con;
}
if (amdgpu_ras_fs_init(adev)) {
r = -EINVAL;
- goto err_out;
+ goto release_con;
}
dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
"hardware ability[%x] ras_mask[%x]\n",
con->hw_supported, con->supported);
return 0;
-err_out:
+release_con:
amdgpu_ras_set_context(adev, NULL);
kfree(con);
@@ -1978,7 +2113,7 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev,
amdgpu_ras_request_reset_on_boot(adev,
ras_block->block);
return 0;
- } else if (adev->in_suspend || adev->in_gpu_reset) {
+ } else if (adev->in_suspend || amdgpu_in_reset(adev)) {
/* in resume phase, if fail to enable ras,
* clean up all ras fs nodes, and disable ras */
goto cleanup;
@@ -1987,7 +2122,7 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev,
}
/* in resume phase, no need to create ras fs node */
- if (adev->in_suspend || adev->in_gpu_reset)
+ if (adev->in_suspend || amdgpu_in_reset(adev))
return 0;
if (ih_info->cb) {
@@ -2145,3 +2280,19 @@ bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev)
return false;
}
+
+bool amdgpu_ras_check_err_threshold(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ bool exc_err_limit = false;
+
+ if (con && (amdgpu_bad_page_threshold != 0))
+ amdgpu_ras_eeprom_check_err_threshold(&con->eeprom_control,
+ &exc_err_limit);
+
+ /*
+ * We are only interested in variable exc_err_limit,
+ * as it says if GPU is in bad state or not.
+ */
+ return exc_err_limit;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index b2667342cf67..6b8d7bb83bb3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -31,6 +31,10 @@
#include "ta_ras_if.h"
#include "amdgpu_ras_eeprom.h"
+#define AMDGPU_RAS_FLAG_INIT_BY_VBIOS (0x1 << 0)
+#define AMDGPU_RAS_FLAG_INIT_NEED_RESET (0x1 << 1)
+#define AMDGPU_RAS_FLAG_SKIP_BAD_PAGE_RESV (0x1 << 2)
+
enum amdgpu_ras_block {
AMDGPU_RAS_BLOCK__UMC = 0,
AMDGPU_RAS_BLOCK__SDMA,
@@ -336,6 +340,12 @@ struct amdgpu_ras {
struct amdgpu_ras_eeprom_control eeprom_control;
bool error_query_ready;
+
+ /* bad page count threshold */
+ uint32_t bad_page_cnt_threshold;
+
+ /* disable ras error count harvest in recovery */
+ bool disable_ras_err_cnt_harvest;
};
struct ras_fs_data {
@@ -490,6 +500,8 @@ void amdgpu_ras_suspend(struct amdgpu_device *adev);
unsigned long amdgpu_ras_query_error_count(struct amdgpu_device *adev,
bool is_ce);
+bool amdgpu_ras_check_err_threshold(struct amdgpu_device *adev);
+
/* error handling functions */
int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
struct eeprom_table_record *bps, int pages);
@@ -500,10 +512,14 @@ static inline int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
{
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
- /* save bad page to eeprom before gpu reset,
- * i2c may be unstable in gpu reset
+ /*
+ * Save bad page to eeprom before gpu reset, i2c may be unstable
+ * in gpu reset.
+ *
+ * Also, exclude the case when ras recovery issuer is
+ * eeprom page write itself.
*/
- if (in_task())
+ if (!(ras->flags & AMDGPU_RAS_FLAG_SKIP_BAD_PAGE_RESV) && in_task())
amdgpu_ras_reserve_bad_pages(adev);
if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index c0096097bbcf..0e64c39a2372 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -46,6 +46,9 @@
#define EEPROM_TABLE_HDR_VAL 0x414d4452
#define EEPROM_TABLE_VER 0x00010000
+/* Bad GPU tag ‘BADG’ */
+#define EEPROM_TABLE_HDR_BAD 0x42414447
+
/* Assume 2 Mbit size */
#define EEPROM_SIZE_BYTES 256000
#define EEPROM_PAGE__SIZE_BYTES 256
@@ -56,6 +59,15 @@
#define to_amdgpu_device(x) (container_of(x, struct amdgpu_ras, eeprom_control))->adev
+static bool __is_ras_eeprom_supported(struct amdgpu_device *adev)
+{
+ if ((adev->asic_type == CHIP_VEGA20) ||
+ (adev->asic_type == CHIP_ARCTURUS))
+ return true;
+
+ return false;
+}
+
static bool __get_eeprom_i2c_addr_arct(struct amdgpu_device *adev,
uint16_t *i2c_addr)
{
@@ -213,6 +225,24 @@ static bool __validate_tbl_checksum(struct amdgpu_ras_eeprom_control *control,
return true;
}
+static int amdgpu_ras_eeprom_correct_header_tag(
+ struct amdgpu_ras_eeprom_control *control,
+ uint32_t header)
+{
+ unsigned char buff[EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE];
+ struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
+ int ret = 0;
+
+ memset(buff, 0, EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE);
+
+ mutex_lock(&control->tbl_mutex);
+ hdr->header = header;
+ ret = __update_table_header(control, buff);
+ mutex_unlock(&control->tbl_mutex);
+
+ return ret;
+}
+
int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control)
{
unsigned char buff[EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE] = { 0 };
@@ -238,12 +268,14 @@ int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control)
}
-int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
+int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control,
+ bool *exceed_err_limit)
{
int ret = 0;
struct amdgpu_device *adev = to_amdgpu_device(control);
unsigned char buff[EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE] = { 0 };
struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
struct i2c_msg msg = {
.addr = 0,
.flags = I2C_M_RD,
@@ -251,6 +283,11 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
.buf = buff,
};
+ *exceed_err_limit = false;
+
+ if (!__is_ras_eeprom_supported(adev))
+ return 0;
+
/* Verify i2c adapter is initialized */
if (!adev->pm.smu_i2c.algo)
return -ENOENT;
@@ -279,6 +316,18 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
DRM_DEBUG_DRIVER("Found existing EEPROM table with %d records",
control->num_recs);
+ } else if ((hdr->header == EEPROM_TABLE_HDR_BAD) &&
+ (amdgpu_bad_page_threshold != 0)) {
+ if (ras->bad_page_cnt_threshold > control->num_recs) {
+ dev_info(adev->dev, "Using one valid bigger bad page "
+ "threshold and correcting eeprom header tag.\n");
+ ret = amdgpu_ras_eeprom_correct_header_tag(control,
+ EEPROM_TABLE_HDR_VAL);
+ } else {
+ *exceed_err_limit = true;
+ dev_err(adev->dev, "Exceeding the bad_page_threshold parameter, "
+ "disabling the GPU.\n");
+ }
} else {
DRM_INFO("Creating new EEPROM table");
@@ -375,6 +424,49 @@ static uint32_t __correct_eeprom_dest_address(uint32_t curr_address)
return curr_address;
}
+int amdgpu_ras_eeprom_check_err_threshold(
+ struct amdgpu_ras_eeprom_control *control,
+ bool *exceed_err_limit)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ unsigned char buff[EEPROM_ADDRESS_SIZE +
+ EEPROM_TABLE_HEADER_SIZE] = { 0 };
+ struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
+ struct i2c_msg msg = {
+ .addr = control->i2c_address,
+ .flags = I2C_M_RD,
+ .len = EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE,
+ .buf = buff,
+ };
+ int ret;
+
+ *exceed_err_limit = false;
+
+ if (!__is_ras_eeprom_supported(adev))
+ return 0;
+
+ /* read EEPROM table header */
+ mutex_lock(&control->tbl_mutex);
+ ret = i2c_transfer(&adev->pm.smu_i2c, &msg, 1);
+ if (ret < 1) {
+ dev_err(adev->dev, "Failed to read EEPROM table header.\n");
+ goto err;
+ }
+
+ __decode_table_header_from_buff(hdr, &buff[2]);
+
+ if (hdr->header == EEPROM_TABLE_HDR_BAD) {
+ dev_warn(adev->dev, "This GPU is in BAD status.");
+ dev_warn(adev->dev, "Please retire it or setting one bigger "
+ "threshold value when reloading driver.\n");
+ *exceed_err_limit = true;
+ }
+
+err:
+ mutex_unlock(&control->tbl_mutex);
+ return 0;
+}
+
int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
struct eeprom_table_record *records,
bool write,
@@ -383,10 +475,12 @@ int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
int i, ret = 0;
struct i2c_msg *msgs, *msg;
unsigned char *buffs, *buff;
+ bool sched_ras_recovery = false;
struct eeprom_table_record *record;
struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
- if (adev->asic_type != CHIP_VEGA20 && adev->asic_type != CHIP_ARCTURUS)
+ if (!__is_ras_eeprom_supported(adev))
return 0;
buffs = kcalloc(num, EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE,
@@ -402,11 +496,30 @@ int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
goto free_buff;
}
+ /*
+ * If saved bad pages number exceeds the bad page threshold for
+ * the whole VRAM, update table header to mark the BAD GPU tag
+ * and schedule one ras recovery after eeprom write is done,
+ * this can avoid the missing for latest records.
+ *
+ * This new header will be picked up and checked in the bootup
+ * by ras recovery, which may break bootup process to notify
+ * user this GPU is in bad state and to retire such GPU for
+ * further check.
+ */
+ if (write && (amdgpu_bad_page_threshold != 0) &&
+ ((control->num_recs + num) >= ras->bad_page_cnt_threshold)) {
+ dev_warn(adev->dev,
+ "Saved bad pages(%d) reaches threshold value(%d).\n",
+ control->num_recs + num, ras->bad_page_cnt_threshold);
+ control->tbl_hdr.header = EEPROM_TABLE_HDR_BAD;
+ sched_ras_recovery = true;
+ }
+
/* In case of overflow just start from beginning to not lose newest records */
if (write && (control->next_addr + EEPROM_TABLE_RECORD_SIZE * num > EEPROM_SIZE_BYTES))
control->next_addr = EEPROM_RECORD_START;
-
/*
* TODO Currently makes EEPROM writes for each record, this creates
* internal fragmentation. Optimized the code to do full page write of
@@ -482,6 +595,20 @@ int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
__update_tbl_checksum(control, records, num, old_hdr_byte_sum);
__update_table_header(control, buffs);
+
+ if (sched_ras_recovery) {
+ /*
+ * Before scheduling ras recovery, assert the related
+ * flag first, which shall bypass common bad page
+ * reservation execution in amdgpu_ras_reset_gpu.
+ */
+ amdgpu_ras_get_context(adev)->flags |=
+ AMDGPU_RAS_FLAG_SKIP_BAD_PAGE_RESV;
+
+ dev_warn(adev->dev, "Conduct ras recovery due to bad "
+ "page threshold reached.\n");
+ amdgpu_ras_reset_gpu(adev);
+ }
} else if (!__validate_tbl_checksum(control, records, num)) {
DRM_WARN("EEPROM Table checksum mismatch!");
/* TODO Uncomment when EEPROM read/write is relliable */
@@ -499,6 +626,11 @@ free_buff:
return ret == num ? 0 : -EIO;
}
+inline uint32_t amdgpu_ras_eeprom_get_record_max_length(void)
+{
+ return EEPROM_MAX_RECORD_NUM;
+}
+
/* Used for testing if bugs encountered */
#if 0
void amdgpu_ras_eeprom_test(struct amdgpu_ras_eeprom_control *control)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
index 9e7d640920fb..c7a5e5c7c61e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
@@ -76,14 +76,21 @@ struct eeprom_table_record {
unsigned char mcumc_id;
}__attribute__((__packed__));
-int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control);
+int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control,
+ bool *exceed_err_limit);
int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control);
+int amdgpu_ras_eeprom_check_err_threshold(
+ struct amdgpu_ras_eeprom_control *control,
+ bool *exceed_err_limit);
+
int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
struct eeprom_table_record *records,
bool write,
int num);
+inline uint32_t amdgpu_ras_eeprom_get_record_max_length(void);
+
void amdgpu_ras_eeprom_test(struct amdgpu_ras_eeprom_control *control);
#endif // _AMDGPU_RAS_EEPROM_H
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 13ea8ebc421c..15ee13c3bd9e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -267,7 +267,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
&ring->sched;
}
- for (i = 0; i < DRM_SCHED_PRIORITY_MAX; ++i)
+ for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; ++i)
atomic_set(&ring->num_jobs[i], 0);
return 0;
@@ -420,7 +420,7 @@ int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
#if defined(CONFIG_DEBUG_FS)
- struct drm_minor *minor = adev->ddev->primary;
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
struct dentry *ent, *root = minor->debugfs_root;
char name[32];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index da871d84b742..7112137689db 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -243,7 +243,7 @@ struct amdgpu_ring {
bool has_compute_vm_bug;
bool no_scheduler;
- atomic_t num_jobs[DRM_SCHED_PRIORITY_MAX];
+ atomic_t num_jobs[DRM_SCHED_PRIORITY_COUNT];
struct mutex priority_mutex;
/* protected by priority_mutex */
int priority;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
index 60bb3e8b3118..aeaaae713c59 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
@@ -168,12 +168,16 @@ struct amdgpu_rlc {
u32 save_restore_list_cntl_size_bytes;
u32 save_restore_list_gpm_size_bytes;
u32 save_restore_list_srm_size_bytes;
+ u32 rlc_iram_ucode_size_bytes;
+ u32 rlc_dram_ucode_size_bytes;
u32 *register_list_format;
u32 *register_restore;
u8 *save_restore_list_cntl;
u8 *save_restore_list_gpm;
u8 *save_restore_list_srm;
+ u8 *rlc_iram_ucode;
+ u8 *rlc_dram_ucode;
bool is_rlc_v2_1;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
index c799691dfa84..0da0a0d98672 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -32,24 +32,32 @@
#include "amdgpu_vm.h"
-enum drm_sched_priority amdgpu_to_sched_priority(int amdgpu_priority)
+int amdgpu_to_sched_priority(int amdgpu_priority,
+ enum drm_sched_priority *prio)
{
switch (amdgpu_priority) {
case AMDGPU_CTX_PRIORITY_VERY_HIGH:
- return DRM_SCHED_PRIORITY_HIGH_HW;
+ *prio = DRM_SCHED_PRIORITY_HIGH;
+ break;
case AMDGPU_CTX_PRIORITY_HIGH:
- return DRM_SCHED_PRIORITY_HIGH_SW;
+ *prio = DRM_SCHED_PRIORITY_HIGH;
+ break;
case AMDGPU_CTX_PRIORITY_NORMAL:
- return DRM_SCHED_PRIORITY_NORMAL;
+ *prio = DRM_SCHED_PRIORITY_NORMAL;
+ break;
case AMDGPU_CTX_PRIORITY_LOW:
case AMDGPU_CTX_PRIORITY_VERY_LOW:
- return DRM_SCHED_PRIORITY_LOW;
+ *prio = DRM_SCHED_PRIORITY_MIN;
+ break;
case AMDGPU_CTX_PRIORITY_UNSET:
- return DRM_SCHED_PRIORITY_UNSET;
+ *prio = DRM_SCHED_PRIORITY_UNSET;
+ break;
default:
WARN(1, "Invalid context priority %d\n", amdgpu_priority);
- return DRM_SCHED_PRIORITY_INVALID;
+ return -EINVAL;
}
+
+ return 0;
}
static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
@@ -115,13 +123,24 @@ int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
union drm_amdgpu_sched *args = data;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
enum drm_sched_priority priority;
int r;
- priority = amdgpu_to_sched_priority(args->in.priority);
- if (priority == DRM_SCHED_PRIORITY_INVALID)
+ /* First check the op, then the op's argument.
+ */
+ switch (args->in.op) {
+ case AMDGPU_SCHED_OP_PROCESS_PRIORITY_OVERRIDE:
+ case AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE:
+ break;
+ default:
+ DRM_ERROR("Invalid sched op specified: %d\n", args->in.op);
return -EINVAL;
+ }
+
+ r = amdgpu_to_sched_priority(args->in.priority, &priority);
+ if (r)
+ return r;
switch (args->in.op) {
case AMDGPU_SCHED_OP_PROCESS_PRIORITY_OVERRIDE:
@@ -136,7 +155,8 @@ int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
priority);
break;
default:
- DRM_ERROR("Invalid sched op specified: %d\n", args->in.op);
+ /* Impossible.
+ */
r = -EINVAL;
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h
index 12299fd95691..67e5b2472f6a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h
@@ -30,7 +30,8 @@ enum drm_sched_priority;
struct drm_device;
struct drm_file;
-enum drm_sched_priority amdgpu_to_sched_priority(int amdgpu_priority);
+int amdgpu_to_sched_priority(int amdgpu_priority,
+ enum drm_sched_priority *prio);
int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 63e734a125fb..ee9480d14cbc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -35,7 +35,7 @@
#define AMDGPU_JOB_GET_TIMELINE_NAME(job) \
job->base.s_fence->finished.ops->get_timeline_name(&job->base.s_fence->finished)
-TRACE_EVENT(amdgpu_mm_rreg,
+TRACE_EVENT(amdgpu_device_rreg,
TP_PROTO(unsigned did, uint32_t reg, uint32_t value),
TP_ARGS(did, reg, value),
TP_STRUCT__entry(
@@ -54,7 +54,7 @@ TRACE_EVENT(amdgpu_mm_rreg,
(unsigned long)__entry->value)
);
-TRACE_EVENT(amdgpu_mm_wreg,
+TRACE_EVENT(amdgpu_device_wreg,
TP_PROTO(unsigned did, uint32_t reg, uint32_t value),
TP_ARGS(did, reg, value),
TP_STRUCT__entry(
@@ -321,6 +321,49 @@ DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_cs,
TP_ARGS(mapping)
);
+TRACE_EVENT(amdgpu_vm_update_ptes,
+ TP_PROTO(struct amdgpu_vm_update_params *p,
+ uint64_t start, uint64_t end,
+ unsigned int nptes, uint64_t dst,
+ uint64_t incr, uint64_t flags,
+ pid_t pid, uint64_t vm_ctx),
+ TP_ARGS(p, start, end, nptes, dst, incr, flags, pid, vm_ctx),
+ TP_STRUCT__entry(
+ __field(u64, start)
+ __field(u64, end)
+ __field(u64, flags)
+ __field(unsigned int, nptes)
+ __field(u64, incr)
+ __field(pid_t, pid)
+ __field(u64, vm_ctx)
+ __dynamic_array(u64, dst, nptes)
+ ),
+
+ TP_fast_assign(
+ unsigned int i;
+
+ __entry->start = start;
+ __entry->end = end;
+ __entry->flags = flags;
+ __entry->incr = incr;
+ __entry->nptes = nptes;
+ __entry->pid = pid;
+ __entry->vm_ctx = vm_ctx;
+ for (i = 0; i < nptes; ++i) {
+ u64 addr = p->pages_addr ? amdgpu_vm_map_gart(
+ p->pages_addr, dst) : dst;
+
+ ((u64 *)__get_dynamic_array(dst))[i] = addr;
+ dst += incr;
+ }
+ ),
+ TP_printk("pid:%u vm_ctx:0x%llx start:0x%010llx end:0x%010llx,"
+ " flags:0x%llx, incr:%llu, dst:\n%s", __entry->pid,
+ __entry->vm_ctx, __entry->start, __entry->end,
+ __entry->flags, __entry->incr, __print_array(
+ __get_dynamic_array(dst), __entry->nptes, 8))
+);
+
TRACE_EVENT(amdgpu_vm_set_ptes,
TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
uint32_t incr, uint64_t flags, bool direct),
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index e11c5d69843d..8039d2399584 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -63,61 +63,16 @@
#define AMDGPU_TTM_VRAM_MAX_DW_READ (size_t)128
+static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
+ struct ttm_tt *ttm,
+ struct ttm_resource *bo_mem);
-/**
- * amdgpu_init_mem_type - Initialize a memory manager for a specific type of
- * memory request.
- *
- * @bdev: The TTM BO device object (contains a reference to amdgpu_device)
- * @type: The type of memory requested
- * @man: The memory type manager for each domain
- *
- * This is called by ttm_bo_init_mm() when a buffer object is being
- * initialized.
- */
-static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
- struct ttm_mem_type_manager *man)
+static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
+ unsigned int type,
+ uint64_t size)
{
- struct amdgpu_device *adev;
-
- adev = amdgpu_ttm_adev(bdev);
-
- switch (type) {
- case TTM_PL_SYSTEM:
- /* System memory */
- man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
- man->available_caching = TTM_PL_MASK_CACHING;
- man->default_caching = TTM_PL_FLAG_CACHED;
- break;
- case TTM_PL_TT:
- /* GTT memory */
- man->func = &amdgpu_gtt_mgr_func;
- man->available_caching = TTM_PL_MASK_CACHING;
- man->default_caching = TTM_PL_FLAG_CACHED;
- man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
- break;
- case TTM_PL_VRAM:
- /* "On-card" video ram */
- man->func = &amdgpu_vram_mgr_func;
- man->flags = TTM_MEMTYPE_FLAG_FIXED |
- TTM_MEMTYPE_FLAG_MAPPABLE;
- man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
- man->default_caching = TTM_PL_FLAG_WC;
- break;
- case AMDGPU_PL_GDS:
- case AMDGPU_PL_GWS:
- case AMDGPU_PL_OA:
- /* On-chip GDS memory*/
- man->func = &ttm_bo_manager_func;
- man->flags = TTM_MEMTYPE_FLAG_FIXED;
- man->available_caching = TTM_PL_FLAG_UNCACHED;
- man->default_caching = TTM_PL_FLAG_UNCACHED;
- break;
- default:
- DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
- return -EINVAL;
- }
- return 0;
+ return ttm_range_man_init(&adev->mman.bdev, type,
+ false, size >> PAGE_SHIFT);
}
/**
@@ -136,7 +91,8 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
static const struct ttm_place placements = {
.fpfn = 0,
.lpfn = 0,
- .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
+ .mem_type = TTM_PL_SYSTEM,
+ .flags = TTM_PL_MASK_CACHING
};
/* Don't handle scatter gather BOs */
@@ -223,24 +179,6 @@ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
}
/**
- * amdgpu_move_null - Register memory for a buffer object
- *
- * @bo: The bo to assign the memory to
- * @new_mem: The memory to be assigned.
- *
- * Assign the memory from new_mem to the memory of the buffer object bo.
- */
-static void amdgpu_move_null(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *new_mem)
-{
- struct ttm_mem_reg *old_mem = &bo->mem;
-
- BUG_ON(old_mem->mm_node != NULL);
- *old_mem = *new_mem;
- new_mem->mm_node = NULL;
-}
-
-/**
* amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT buffer.
*
* @bo: The bo to assign the memory to.
@@ -250,7 +188,7 @@ static void amdgpu_move_null(struct ttm_buffer_object *bo,
*/
static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
struct drm_mm_node *mm_node,
- struct ttm_mem_reg *mem)
+ struct ttm_resource *mem)
{
uint64_t addr = 0;
@@ -270,7 +208,7 @@ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
* @offset: The offset that drm_mm_node is used for finding.
*
*/
-static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
+static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_resource *mem,
uint64_t *offset)
{
struct drm_mm_node *mm_node = mem->mm_node;
@@ -298,7 +236,7 @@ static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
* the physical address for local memory.
*/
static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *mem,
+ struct ttm_resource *mem,
struct drm_mm_node *mm_node,
unsigned num_pages, uint64_t offset,
unsigned window, struct amdgpu_ring *ring,
@@ -521,9 +459,9 @@ error:
* help move buffers to and from VRAM.
*/
static int amdgpu_move_blit(struct ttm_buffer_object *bo,
- bool evict, bool no_wait_gpu,
- struct ttm_mem_reg *new_mem,
- struct ttm_mem_reg *old_mem)
+ bool evict,
+ struct ttm_resource *new_mem,
+ struct ttm_resource *old_mem)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
@@ -562,9 +500,9 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
/* Always block for VM page tables before committing the new location */
if (bo->type == ttm_bo_type_kernel)
- r = ttm_bo_move_accel_cleanup(bo, fence, true, new_mem);
+ r = ttm_bo_move_accel_cleanup(bo, fence, true, false, new_mem);
else
- r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
+ r = ttm_bo_move_accel_cleanup(bo, fence, evict, true, new_mem);
dma_fence_put(fence);
return r;
@@ -582,10 +520,10 @@ error:
*/
static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
struct ttm_operation_ctx *ctx,
- struct ttm_mem_reg *new_mem)
+ struct ttm_resource *new_mem)
{
- struct ttm_mem_reg *old_mem = &bo->mem;
- struct ttm_mem_reg tmp_mem;
+ struct ttm_resource *old_mem = &bo->mem;
+ struct ttm_resource tmp_mem;
struct ttm_place placements;
struct ttm_placement placement;
int r;
@@ -599,7 +537,8 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
placement.busy_placement = &placements;
placements.fpfn = 0;
placements.lpfn = 0;
- placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
+ placements.mem_type = TTM_PL_TT;
+ placements.flags = TTM_PL_MASK_CACHING;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
if (unlikely(r)) {
pr_err("Failed to find GTT space for blit from VRAM\n");
@@ -612,14 +551,18 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
goto out_cleanup;
}
+ r = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
+ if (unlikely(r))
+ goto out_cleanup;
+
/* Bind the memory to the GTT space */
- r = ttm_tt_bind(bo->ttm, &tmp_mem, ctx);
+ r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, &tmp_mem);
if (unlikely(r)) {
goto out_cleanup;
}
/* blit VRAM to GTT */
- r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, &tmp_mem, old_mem);
+ r = amdgpu_move_blit(bo, evict, &tmp_mem, old_mem);
if (unlikely(r)) {
goto out_cleanup;
}
@@ -627,7 +570,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
/* move BO (in tmp_mem) to new_mem */
r = ttm_bo_move_ttm(bo, ctx, new_mem);
out_cleanup:
- ttm_bo_mem_put(bo, &tmp_mem);
+ ttm_resource_free(bo, &tmp_mem);
return r;
}
@@ -638,10 +581,10 @@ out_cleanup:
*/
static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
struct ttm_operation_ctx *ctx,
- struct ttm_mem_reg *new_mem)
+ struct ttm_resource *new_mem)
{
- struct ttm_mem_reg *old_mem = &bo->mem;
- struct ttm_mem_reg tmp_mem;
+ struct ttm_resource *old_mem = &bo->mem;
+ struct ttm_resource tmp_mem;
struct ttm_placement placement;
struct ttm_place placements;
int r;
@@ -655,7 +598,8 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
placement.busy_placement = &placements;
placements.fpfn = 0;
placements.lpfn = 0;
- placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
+ placements.mem_type = TTM_PL_TT;
+ placements.flags = TTM_PL_MASK_CACHING;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
if (unlikely(r)) {
pr_err("Failed to find GTT space for blit to VRAM\n");
@@ -669,12 +613,12 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
}
/* copy to VRAM */
- r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, new_mem, old_mem);
+ r = amdgpu_move_blit(bo, evict, new_mem, old_mem);
if (unlikely(r)) {
goto out_cleanup;
}
out_cleanup:
- ttm_bo_mem_put(bo, &tmp_mem);
+ ttm_resource_free(bo, &tmp_mem);
return r;
}
@@ -684,7 +628,7 @@ out_cleanup:
* Called by amdgpu_bo_move()
*/
static bool amdgpu_mem_visible(struct amdgpu_device *adev,
- struct ttm_mem_reg *mem)
+ struct ttm_resource *mem)
{
struct drm_mm_node *nodes = mem->mm_node;
@@ -694,7 +638,7 @@ static bool amdgpu_mem_visible(struct amdgpu_device *adev,
if (mem->mem_type != TTM_PL_VRAM)
return false;
- /* ttm_mem_reg_ioremap only supports contiguous memory */
+ /* ttm_resource_ioremap only supports contiguous memory */
if (nodes->size != mem->num_pages)
return false;
@@ -709,11 +653,11 @@ static bool amdgpu_mem_visible(struct amdgpu_device *adev,
*/
static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
struct ttm_operation_ctx *ctx,
- struct ttm_mem_reg *new_mem)
+ struct ttm_resource *new_mem)
{
struct amdgpu_device *adev;
struct amdgpu_bo *abo;
- struct ttm_mem_reg *old_mem = &bo->mem;
+ struct ttm_resource *old_mem = &bo->mem;
int r;
/* Can't move a pinned BO */
@@ -724,7 +668,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
adev = amdgpu_ttm_adev(bo->bdev);
if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
- amdgpu_move_null(bo, new_mem);
+ ttm_bo_move_null(bo, new_mem);
return 0;
}
if ((old_mem->mem_type == TTM_PL_TT &&
@@ -732,7 +676,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
(old_mem->mem_type == TTM_PL_SYSTEM &&
new_mem->mem_type == TTM_PL_TT)) {
/* bind is enough */
- amdgpu_move_null(bo, new_mem);
+ ttm_bo_move_null(bo, new_mem);
return 0;
}
if (old_mem->mem_type == AMDGPU_PL_GDS ||
@@ -742,7 +686,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
new_mem->mem_type == AMDGPU_PL_GWS ||
new_mem->mem_type == AMDGPU_PL_OA) {
/* Nothing to save here */
- amdgpu_move_null(bo, new_mem);
+ ttm_bo_move_null(bo, new_mem);
return 0;
}
@@ -758,7 +702,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
new_mem->mem_type == TTM_PL_VRAM) {
r = amdgpu_move_ram_vram(bo, evict, ctx, new_mem);
} else {
- r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu,
+ r = amdgpu_move_blit(bo, evict,
new_mem, old_mem);
}
@@ -795,19 +739,12 @@ memcpy:
*
* Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault()
*/
-static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem)
{
- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
struct drm_mm_node *mm_node = mem->mm_node;
+ size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
- mem->bus.addr = NULL;
- mem->bus.offset = 0;
- mem->bus.size = mem->num_pages << PAGE_SHIFT;
- mem->bus.base = 0;
- mem->bus.is_iomem = false;
- if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
- return -EINVAL;
switch (mem->mem_type) {
case TTM_PL_SYSTEM:
/* system memory */
@@ -817,18 +754,18 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
case TTM_PL_VRAM:
mem->bus.offset = mem->start << PAGE_SHIFT;
/* check if it's visible */
- if ((mem->bus.offset + mem->bus.size) > adev->gmc.visible_vram_size)
+ if ((mem->bus.offset + bus_size) > adev->gmc.visible_vram_size)
return -EINVAL;
/* Only physically contiguous buffers apply. In a contiguous
* buffer, size of the first mm_node would match the number of
- * pages in ttm_mem_reg.
+ * pages in ttm_resource.
*/
if (adev->mman.aper_base_kaddr &&
(mm_node->size == mem->num_pages))
mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr +
mem->bus.offset;
- mem->bus.base = adev->gmc.aper_base;
+ mem->bus.offset += adev->gmc.aper_base;
mem->bus.is_iomem = true;
break;
default:
@@ -840,12 +777,13 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
unsigned long page_offset)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
uint64_t offset = (page_offset << PAGE_SHIFT);
struct drm_mm_node *mm;
mm = amdgpu_find_mm_node(&bo->mem, &offset);
- return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start +
- (offset >> PAGE_SHIFT);
+ offset += adev->gmc.aper_base;
+ return mm->start + (offset >> PAGE_SHIFT);
}
/**
@@ -879,6 +817,7 @@ struct amdgpu_ttm_tt {
uint64_t userptr;
struct task_struct *usertask;
uint32_t userflags;
+ bool bound;
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
struct hmm_range *range;
#endif
@@ -1046,9 +985,10 @@ void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
*
* Called by amdgpu_ttm_backend_bind()
**/
-static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
+static int amdgpu_ttm_tt_pin_userptr(struct ttm_bo_device *bdev,
+ struct ttm_tt *ttm)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
int r;
@@ -1076,15 +1016,17 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
release_sg:
kfree(ttm->sg);
+ ttm->sg = NULL;
return r;
}
/**
* amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages
*/
-static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
+static void amdgpu_ttm_tt_unpin_userptr(struct ttm_bo_device *bdev,
+ struct ttm_tt *ttm)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
@@ -1165,16 +1107,23 @@ gart_bind_fail:
* Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem().
* This handles binding GTT memory to the device address space.
*/
-static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
- struct ttm_mem_reg *bo_mem)
+static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
+ struct ttm_tt *ttm,
+ struct ttm_resource *bo_mem)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
struct amdgpu_ttm_tt *gtt = (void*)ttm;
uint64_t flags;
int r = 0;
+ if (!bo_mem)
+ return -EINVAL;
+
+ if (gtt->bound)
+ return 0;
+
if (gtt->userptr) {
- r = amdgpu_ttm_tt_pin_userptr(ttm);
+ r = amdgpu_ttm_tt_pin_userptr(bdev, ttm);
if (r) {
DRM_ERROR("failed to pin userptr\n");
return r;
@@ -1206,18 +1155,24 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
if (r)
DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
ttm->num_pages, gtt->offset);
+ gtt->bound = true;
return r;
}
/**
- * amdgpu_ttm_alloc_gart - Allocate GART memory for buffer object
+ * amdgpu_ttm_alloc_gart - Make sure buffer object is accessible either
+ * through AGP or GART aperture.
+ *
+ * If bo is accessible through AGP aperture, then use AGP aperture
+ * to access bo; otherwise allocate logical space in GART aperture
+ * and map bo to GART aperture.
*/
int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_ttm_tt *gtt = (void*)bo->ttm;
- struct ttm_mem_reg tmp;
+ struct ttm_resource tmp;
struct ttm_placement placement;
struct ttm_place placements;
uint64_t addr, flags;
@@ -1240,8 +1195,8 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
placement.busy_placement = &placements;
placements.fpfn = 0;
placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
- placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) |
- TTM_PL_FLAG_TT;
+ placements.mem_type = TTM_PL_TT;
+ placements.flags = bo->mem.placement;
r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
if (unlikely(r))
@@ -1254,11 +1209,11 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
gtt->offset = (u64)tmp.start << PAGE_SHIFT;
r = amdgpu_ttm_gart_bind(adev, bo, flags);
if (unlikely(r)) {
- ttm_bo_mem_put(bo, &tmp);
+ ttm_resource_free(bo, &tmp);
return r;
}
- ttm_bo_mem_put(bo, &bo->mem);
+ ttm_resource_free(bo, &bo->mem);
bo->mem = tmp;
}
@@ -1292,15 +1247,19 @@ int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
* Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and
* ttm_tt_destroy().
*/
-static void amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
+static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
+ struct ttm_tt *ttm)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
int r;
+ if (!gtt->bound)
+ return;
+
/* if the pages have userptr pinning then clear that first */
if (gtt->userptr)
- amdgpu_ttm_tt_unpin_userptr(ttm);
+ amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
return;
@@ -1310,12 +1269,16 @@ static void amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
if (r)
DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n",
gtt->ttm.ttm.num_pages, gtt->offset);
+ gtt->bound = false;
}
-static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm)
+static void amdgpu_ttm_backend_destroy(struct ttm_bo_device *bdev,
+ struct ttm_tt *ttm)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ amdgpu_ttm_backend_unbind(bdev, ttm);
+ ttm_tt_destroy_common(bdev, ttm);
if (gtt->usertask)
put_task_struct(gtt->usertask);
@@ -1323,12 +1286,6 @@ static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm)
kfree(gtt);
}
-static struct ttm_backend_func amdgpu_backend_func = {
- .bind = &amdgpu_ttm_backend_bind,
- .unbind = &amdgpu_ttm_backend_unbind,
- .destroy = &amdgpu_ttm_backend_destroy,
-};
-
/**
* amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO
*
@@ -1345,7 +1302,6 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
if (gtt == NULL) {
return NULL;
}
- gtt->ttm.ttm.func = &amdgpu_backend_func;
gtt->gobj = &bo->base;
/* allocate space for the uninitialized page entries */
@@ -1362,10 +1318,11 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
* Map the pages of a ttm_tt object to an address space visible
* to the underlying device.
*/
-static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
- struct ttm_operation_ctx *ctx)
+static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev,
+ struct ttm_tt *ttm,
+ struct ttm_operation_ctx *ctx)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
/* user pages are bound by amdgpu_ttm_tt_pin_userptr() */
@@ -1375,7 +1332,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
return -ENOMEM;
ttm->page_flags |= TTM_PAGE_FLAG_SG;
- ttm->state = tt_unbound;
+ ttm_tt_set_populated(ttm);
return 0;
}
@@ -1395,7 +1352,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
gtt->ttm.dma_address,
ttm->num_pages);
- ttm->state = tt_unbound;
+ ttm_tt_set_populated(ttm);
return 0;
}
@@ -1416,7 +1373,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
* Unmaps pages of a ttm_tt object from the device address space and
* unpopulates the page array backing it.
*/
-static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
+static void amdgpu_ttm_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
struct amdgpu_device *adev;
@@ -1440,7 +1397,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
if (ttm->page_flags & TTM_PAGE_FLAG_SG)
return;
- adev = amdgpu_ttm_adev(ttm->bdev);
+ adev = amdgpu_ttm_adev(bdev);
#ifdef CONFIG_SWIOTLB
if (adev->need_swiotlb && swiotlb_nr_tbl()) {
@@ -1457,21 +1414,26 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
* amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current
* task
*
- * @ttm: The ttm_tt object to bind this userptr object to
+ * @bo: The ttm_buffer_object to bind this userptr to
* @addr: The address in the current tasks VM space to use
* @flags: Requirements of userptr object.
*
* Called by amdgpu_gem_userptr_ioctl() to bind userptr pages
* to current task
*/
-int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
- uint32_t flags)
+int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
+ uint64_t addr, uint32_t flags)
{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt;
- if (gtt == NULL)
- return -EINVAL;
+ if (!bo->ttm) {
+ /* TODO: We want a separate TTM object type for userptrs */
+ bo->ttm = amdgpu_ttm_tt_create(bo, 0);
+ if (bo->ttm == NULL)
+ return -ENOMEM;
+ }
+ gtt = (void*)bo->ttm;
gtt->userptr = addr;
gtt->userflags = flags;
@@ -1557,7 +1519,7 @@ bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
*
* Figure out the flags to use for a VM PDE (Page Directory Entry).
*/
-uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
+uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
{
uint64_t flags = 0;
@@ -1583,7 +1545,7 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
* Figure out the flags to use for a VM PTE (Page Table Entry).
*/
uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
- struct ttm_mem_reg *mem)
+ struct ttm_resource *mem)
{
uint64_t flags = amdgpu_ttm_tt_pde_flags(ttm, mem);
@@ -1741,7 +1703,9 @@ static struct ttm_bo_driver amdgpu_bo_driver = {
.ttm_tt_create = &amdgpu_ttm_tt_create,
.ttm_tt_populate = &amdgpu_ttm_tt_populate,
.ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
- .init_mem_type = &amdgpu_init_mem_type,
+ .ttm_tt_bind = &amdgpu_ttm_backend_bind,
+ .ttm_tt_unbind = &amdgpu_ttm_backend_unbind,
+ .ttm_tt_destroy = &amdgpu_ttm_backend_destroy,
.eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
.evict_flags = &amdgpu_evict_flags,
.move = &amdgpu_bo_move,
@@ -1767,8 +1731,8 @@ static struct ttm_bo_driver amdgpu_bo_driver = {
*/
static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
{
- amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo,
- NULL, &adev->fw_vram_usage.va);
+ amdgpu_bo_free_kernel(&adev->mman.fw_vram_usage_reserved_bo,
+ NULL, &adev->mman.fw_vram_usage_va);
}
/**
@@ -1782,19 +1746,19 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
{
uint64_t vram_size = adev->gmc.visible_vram_size;
- adev->fw_vram_usage.va = NULL;
- adev->fw_vram_usage.reserved_bo = NULL;
+ adev->mman.fw_vram_usage_va = NULL;
+ adev->mman.fw_vram_usage_reserved_bo = NULL;
- if (adev->fw_vram_usage.size == 0 ||
- adev->fw_vram_usage.size > vram_size)
+ if (adev->mman.fw_vram_usage_size == 0 ||
+ adev->mman.fw_vram_usage_size > vram_size)
return 0;
return amdgpu_bo_create_kernel_at(adev,
- adev->fw_vram_usage.start_offset,
- adev->fw_vram_usage.size,
+ adev->mman.fw_vram_usage_start_offset,
+ adev->mman.fw_vram_usage_size,
AMDGPU_GEM_DOMAIN_VRAM,
- &adev->fw_vram_usage.reserved_bo,
- &adev->fw_vram_usage.va);
+ &adev->mman.fw_vram_usage_reserved_bo,
+ &adev->mman.fw_vram_usage_va);
}
/*
@@ -1826,7 +1790,7 @@ static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev)
memset(ctx, 0, sizeof(*ctx));
ctx->c2p_train_data_offset =
- ALIGN((adev->gmc.mc_vram_size - adev->discovery_tmr_size - SZ_1M), SZ_1M);
+ ALIGN((adev->gmc.mc_vram_size - adev->mman.discovery_tmr_size - SZ_1M), SZ_1M);
ctx->p2c_train_data_offset =
(adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET);
ctx->train_data_size =
@@ -1865,10 +1829,10 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
* Otherwise, fallback to legacy approach to check and reserve tmr block for ip
* discovery data and G6 memory training data respectively
*/
- adev->discovery_tmr_size =
+ adev->mman.discovery_tmr_size =
amdgpu_atomfirmware_get_fw_reserved_fb_size(adev);
- if (!adev->discovery_tmr_size)
- adev->discovery_tmr_size = DISCOVERY_TMR_OFFSET;
+ if (!adev->mman.discovery_tmr_size)
+ adev->mman.discovery_tmr_size = DISCOVERY_TMR_OFFSET;
if (mem_train_support) {
/* reserve vram for mem train according to TMR location */
@@ -1888,14 +1852,14 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
}
ret = amdgpu_bo_create_kernel_at(adev,
- adev->gmc.real_vram_size - adev->discovery_tmr_size,
- adev->discovery_tmr_size,
+ adev->gmc.real_vram_size - adev->mman.discovery_tmr_size,
+ adev->mman.discovery_tmr_size,
AMDGPU_GEM_DOMAIN_VRAM,
- &adev->discovery_memory,
+ &adev->mman.discovery_memory,
NULL);
if (ret) {
DRM_ERROR("alloc tmr failed(%d)!\n", ret);
- amdgpu_bo_free_kernel(&adev->discovery_memory, NULL, NULL);
+ amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
return ret;
}
@@ -1916,15 +1880,14 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
uint64_t gtt_size;
int r;
u64 vis_vram_limit;
- void *stolen_vga_buf;
mutex_init(&adev->mman.gtt_window_lock);
/* No others user of address space so set it to 0 */
r = ttm_bo_device_init(&adev->mman.bdev,
&amdgpu_bo_driver,
- adev->ddev->anon_inode->i_mapping,
- adev->ddev->vma_offset_manager,
+ adev_to_drm(adev)->anon_inode->i_mapping,
+ adev_to_drm(adev)->vma_offset_manager,
dma_addressing_limited(adev->dev));
if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
@@ -1936,8 +1899,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
adev->mman.bdev.no_retry = true;
/* Initialize VRAM pool with all of VRAM divided into pages */
- r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
- adev->gmc.real_vram_size >> PAGE_SHIFT);
+ r = amdgpu_vram_mgr_init(adev);
if (r) {
DRM_ERROR("Failed initializing VRAM heap.\n");
return r;
@@ -1970,7 +1932,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
* If IP discovery enabled, a block of memory should be
* reserved for IP discovey.
*/
- if (adev->discovery_bin) {
+ if (adev->mman.discovery_bin) {
r = amdgpu_ttm_reserve_tmr(adev);
if (r)
return r;
@@ -1980,10 +1942,17 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
* This is used for VGA emulation and pre-OS scanout buffers to
* avoid display artifacts while transitioning between pre-OS
* and driver. */
- r = amdgpu_bo_create_kernel(adev, adev->gmc.stolen_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &adev->stolen_vga_memory,
- NULL, &stolen_vga_buf);
+ r = amdgpu_bo_create_kernel_at(adev, 0, adev->mman.stolen_vga_size,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->mman.stolen_vga_memory,
+ NULL);
+ if (r)
+ return r;
+ r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_vga_size,
+ adev->mman.stolen_extended_size,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->mman.stolen_extended_memory,
+ NULL);
if (r)
return r;
@@ -2004,7 +1973,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
gtt_size = (uint64_t)amdgpu_gtt_size << 20;
/* Initialize GTT memory pool */
- r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, gtt_size >> PAGE_SHIFT);
+ r = amdgpu_gtt_mgr_init(adev, gtt_size);
if (r) {
DRM_ERROR("Failed initializing GTT heap.\n");
return r;
@@ -2013,22 +1982,19 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
(unsigned)(gtt_size / (1024 * 1024)));
/* Initialize various on-chip memory pools */
- r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GDS,
- adev->gds.gds_size);
+ r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GDS, adev->gds.gds_size);
if (r) {
DRM_ERROR("Failed initializing GDS heap.\n");
return r;
}
- r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GWS,
- adev->gds.gws_size);
+ r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GWS, adev->gds.gws_size);
if (r) {
DRM_ERROR("Failed initializing gws heap.\n");
return r;
}
- r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_OA,
- adev->gds.oa_size);
+ r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_OA, adev->gds.oa_size);
if (r) {
DRM_ERROR("Failed initializing oa heap.\n");
return r;
@@ -2042,9 +2008,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
*/
void amdgpu_ttm_late_init(struct amdgpu_device *adev)
{
- void *stolen_vga_buf;
/* return the VGA stolen memory (if any) back to VRAM */
- amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, &stolen_vga_buf);
+ if (!adev->mman.keep_stolen_vga_memory)
+ amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
+ amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
}
/**
@@ -2056,19 +2023,22 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
return;
amdgpu_ttm_training_reserve_vram_fini(adev);
+ /* return the stolen vga memory back to VRAM */
+ if (adev->mman.keep_stolen_vga_memory)
+ amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
/* return the IP Discovery TMR memory back to VRAM */
- amdgpu_bo_free_kernel(&adev->discovery_memory, NULL, NULL);
+ amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
amdgpu_ttm_fw_reserve_vram_fini(adev);
if (adev->mman.aper_base_kaddr)
iounmap(adev->mman.aper_base_kaddr);
adev->mman.aper_base_kaddr = NULL;
- ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM);
- ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT);
- ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GDS);
- ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS);
- ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA);
+ amdgpu_vram_mgr_fini(adev);
+ amdgpu_gtt_mgr_fini(adev);
+ ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS);
+ ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
+ ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
ttm_bo_device_release(&adev->mman.bdev);
adev->mman.initialized = false;
DRM_INFO("amdgpu: ttm finalized\n");
@@ -2085,11 +2055,11 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
*/
void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
{
- struct ttm_mem_type_manager *man = &adev->mman.bdev.man[TTM_PL_VRAM];
+ struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
uint64_t size;
int r;
- if (!adev->mman.initialized || adev->in_gpu_reset ||
+ if (!adev->mman.initialized || amdgpu_in_reset(adev) ||
adev->mman.buffer_funcs_enabled == enable)
return;
@@ -2100,7 +2070,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
ring = adev->mman.buffer_funcs_ring;
sched = &ring->sched;
r = drm_sched_entity_init(&adev->mman.entity,
- DRM_SCHED_PRIORITY_KERNEL, &sched,
+ DRM_SCHED_PRIORITY_KERNEL, &sched,
1, NULL);
if (r) {
DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
@@ -2125,7 +2095,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *file_priv = filp->private_data;
- struct amdgpu_device *adev = file_priv->minor->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(file_priv->minor->dev);
if (adev == NULL)
return -EINVAL;
@@ -2306,8 +2276,8 @@ static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *)m->private;
unsigned ttm_pl = (uintptr_t)node->info_ent->data;
struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = dev->dev_private;
- struct ttm_mem_type_manager *man = &adev->mman.bdev.man[ttm_pl];
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, ttm_pl);
struct drm_printer p = drm_seq_file_printer(m);
man->func->debug(man, &p);
@@ -2597,7 +2567,7 @@ int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
#if defined(CONFIG_DEBUG_FS)
unsigned count;
- struct drm_minor *minor = adev->ddev->primary;
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
struct dentry *ent, *root = minor->debugfs_root;
for (count = 0; count < ARRAY_SIZE(ttm_debugfs_entries); count++) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 17c8d0d7bcc3..a87951b2f06d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -32,15 +32,26 @@
#define AMDGPU_PL_GWS (TTM_PL_PRIV + 1)
#define AMDGPU_PL_OA (TTM_PL_PRIV + 2)
-#define AMDGPU_PL_FLAG_GDS (TTM_PL_FLAG_PRIV << 0)
-#define AMDGPU_PL_FLAG_GWS (TTM_PL_FLAG_PRIV << 1)
-#define AMDGPU_PL_FLAG_OA (TTM_PL_FLAG_PRIV << 2)
-
#define AMDGPU_GTT_MAX_TRANSFER_SIZE 512
#define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2
#define AMDGPU_POISON 0xd0bed0be
+struct amdgpu_vram_mgr {
+ struct ttm_resource_manager manager;
+ struct drm_mm mm;
+ spinlock_t lock;
+ atomic64_t usage;
+ atomic64_t vis_usage;
+};
+
+struct amdgpu_gtt_mgr {
+ struct ttm_resource_manager manager;
+ struct drm_mm mm;
+ spinlock_t lock;
+ atomic64_t available;
+};
+
struct amdgpu_mman {
struct ttm_bo_device bdev;
bool mem_global_referenced;
@@ -59,24 +70,46 @@ struct amdgpu_mman {
struct mutex gtt_window_lock;
/* Scheduler entity for buffer moves */
struct drm_sched_entity entity;
+
+ struct amdgpu_vram_mgr vram_mgr;
+ struct amdgpu_gtt_mgr gtt_mgr;
+
+ uint64_t stolen_vga_size;
+ struct amdgpu_bo *stolen_vga_memory;
+ uint64_t stolen_extended_size;
+ struct amdgpu_bo *stolen_extended_memory;
+ bool keep_stolen_vga_memory;
+
+ /* discovery */
+ uint8_t *discovery_bin;
+ uint32_t discovery_tmr_size;
+ struct amdgpu_bo *discovery_memory;
+
+ /* firmware VRAM reservation */
+ u64 fw_vram_usage_start_offset;
+ u64 fw_vram_usage_size;
+ struct amdgpu_bo *fw_vram_usage_reserved_bo;
+ void *fw_vram_usage_va;
};
struct amdgpu_copy_mem {
struct ttm_buffer_object *bo;
- struct ttm_mem_reg *mem;
+ struct ttm_resource *mem;
unsigned long offset;
};
-extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func;
-extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func;
+int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size);
+void amdgpu_gtt_mgr_fini(struct amdgpu_device *adev);
+int amdgpu_vram_mgr_init(struct amdgpu_device *adev);
+void amdgpu_vram_mgr_fini(struct amdgpu_device *adev);
-bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem);
-uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man);
-int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man);
+bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem);
+uint64_t amdgpu_gtt_mgr_usage(struct ttm_resource_manager *man);
+int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man);
u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo);
int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
- struct ttm_mem_reg *mem,
+ struct ttm_resource *mem,
struct device *dev,
enum dma_data_direction dir,
struct sg_table **sgt);
@@ -84,8 +117,8 @@ void amdgpu_vram_mgr_free_sgt(struct amdgpu_device *adev,
struct device *dev,
enum dma_data_direction dir,
struct sg_table *sgt);
-uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
-uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);
+uint64_t amdgpu_vram_mgr_usage(struct ttm_resource_manager *man);
+uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_resource_manager *man);
int amdgpu_ttm_init(struct amdgpu_device *adev);
void amdgpu_ttm_late_init(struct amdgpu_device *adev);
@@ -130,8 +163,8 @@ static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
#endif
void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages);
-int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
- uint32_t flags);
+int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
+ uint64_t addr, uint32_t flags);
bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm);
bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
@@ -140,9 +173,9 @@ bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
int *last_invalidated);
bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm);
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
-uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_mem_reg *mem);
+uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem);
uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
- struct ttm_mem_reg *mem);
+ struct ttm_resource *mem);
int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 183743c5fb7b..b313ce4c3e97 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -408,7 +408,7 @@ static ssize_t show_##name(struct device *dev, \
char *buf) \
{ \
struct drm_device *ddev = dev_get_drvdata(dev); \
- struct amdgpu_device *adev = ddev->dev_private; \
+ struct amdgpu_device *adev = drm_to_adev(ddev); \
\
return snprintf(buf, PAGE_SIZE, "0x%08x\n", adev->field); \
} \
@@ -500,6 +500,8 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL &&
ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM &&
ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM &&
+ ucode->ucode_id != AMDGPU_UCODE_ID_RLC_IRAM &&
+ ucode->ucode_id != AMDGPU_UCODE_ID_RLC_DRAM &&
ucode->ucode_id != AMDGPU_UCODE_ID_DMCU_ERAM &&
ucode->ucode_id != AMDGPU_UCODE_ID_DMCU_INTV &&
ucode->ucode_id != AMDGPU_UCODE_ID_DMCUB)) {
@@ -556,6 +558,14 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
ucode->ucode_size = adev->gfx.rlc.save_restore_list_srm_size_bytes;
memcpy(ucode->kaddr, adev->gfx.rlc.save_restore_list_srm,
ucode->ucode_size);
+ } else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_IRAM) {
+ ucode->ucode_size = adev->gfx.rlc.rlc_iram_ucode_size_bytes;
+ memcpy(ucode->kaddr, adev->gfx.rlc.rlc_iram_ucode,
+ ucode->ucode_size);
+ } else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_DRAM) {
+ ucode->ucode_size = adev->gfx.rlc.rlc_dram_ucode_size_bytes;
+ memcpy(ucode->kaddr, adev->gfx.rlc.rlc_dram_ucode,
+ ucode->ucode_size);
} else if (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MES) {
ucode->ucode_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes);
memcpy(ucode->kaddr, (void *)((uint8_t *)adev->mes.fw->data +
@@ -628,7 +638,7 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
struct amdgpu_firmware_info *ucode = NULL;
/* for baremetal, the ucode is allocated in gtt, so don't need to fill the bo when reset/suspend */
- if (!amdgpu_sriov_vf(adev) && (adev->in_gpu_reset || adev->in_suspend))
+ if (!amdgpu_sriov_vf(adev) && (amdgpu_in_reset(adev) || adev->in_suspend))
return 0;
/*
* if SMU loaded firmware, it needn't add SMC, UVD, and VCE
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index 12a8bc8fca0b..0e43b46d3ab5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -131,6 +131,7 @@ enum ta_fw_type {
TA_FW_TYPE_PSP_RAS,
TA_FW_TYPE_PSP_HDCP,
TA_FW_TYPE_PSP_DTM,
+ TA_FW_TYPE_PSP_RAP,
};
struct ta_fw_bin_desc {
@@ -221,6 +222,15 @@ struct rlc_firmware_header_v2_1 {
uint32_t save_restore_list_srm_offset_bytes;
};
+/* version_major=2, version_minor=1 */
+struct rlc_firmware_header_v2_2 {
+ struct rlc_firmware_header_v2_1 v2_1;
+ uint32_t rlc_iram_ucode_size_bytes;
+ uint32_t rlc_iram_ucode_offset_bytes;
+ uint32_t rlc_dram_ucode_size_bytes;
+ uint32_t rlc_dram_ucode_offset_bytes;
+};
+
/* version_major=1, version_minor=0 */
struct sdma_firmware_header_v1_0 {
struct common_firmware_header header;
@@ -338,6 +348,8 @@ enum AMDGPU_UCODE_ID {
AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL,
AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM,
AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM,
+ AMDGPU_UCODE_ID_RLC_IRAM,
+ AMDGPU_UCODE_ID_RLC_DRAM,
AMDGPU_UCODE_ID_RLC_G,
AMDGPU_UCODE_ID_STORAGE,
AMDGPU_UCODE_ID_SMC,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index af1b1ccf613c..262baf0f61ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -125,8 +125,9 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
"detected in UMC block\n",
err_data->ue_count);
- if (err_data->err_addr_cnt &&
- amdgpu_ras_add_bad_pages(adev, err_data->err_addr,
+ if ((amdgpu_bad_page_threshold != 0) &&
+ err_data->err_addr_cnt &&
+ amdgpu_ras_add_bad_pages(adev, err_data->err_addr,
err_data->err_addr_cnt))
dev_warn(adev->dev, "Failed to add ras bad page!\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index a615a1eb750b..183814493658 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -21,6 +21,20 @@
#ifndef __AMDGPU_UMC_H__
#define __AMDGPU_UMC_H__
+/*
+ * (addr / 256) * 8192, the higher 26 bits in ErrorAddr
+ * is the index of 8KB block
+ */
+#define ADDR_OF_8KB_BLOCK(addr) (((addr) & ~0xffULL) << 5)
+/* channel index is the index of 256B block */
+#define ADDR_OF_256B_BLOCK(channel_index) ((channel_index) << 8)
+/* offset in 256B block */
+#define OFFSET_IN_256B_BLOCK(addr) ((addr) & 0xffULL)
+
+#define LOOP_UMC_INST(umc_inst) for ((umc_inst) = 0; (umc_inst) < adev->umc.umc_inst_num; (umc_inst)++)
+#define LOOP_UMC_CH_INST(ch_inst) for ((ch_inst) = 0; (ch_inst) < adev->umc.channel_inst_num; (ch_inst)++)
+#define LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) LOOP_UMC_INST((umc_inst)) LOOP_UMC_CH_INST((ch_inst))
+
struct amdgpu_umc_funcs {
void (*err_cnt_init)(struct amdgpu_device *adev);
int (*ras_late_init)(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 495c3d7bb2b2..a563328e3dae 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -39,6 +39,7 @@
#define FIRMWARE_RAVEN2 "amdgpu/raven2_vcn.bin"
#define FIRMWARE_ARCTURUS "amdgpu/arcturus_vcn.bin"
#define FIRMWARE_RENOIR "amdgpu/renoir_vcn.bin"
+#define FIRMWARE_GREEN_SARDINE "amdgpu/green_sardine_vcn.bin"
#define FIRMWARE_NAVI10 "amdgpu/navi10_vcn.bin"
#define FIRMWARE_NAVI14 "amdgpu/navi14_vcn.bin"
#define FIRMWARE_NAVI12 "amdgpu/navi12_vcn.bin"
@@ -50,6 +51,7 @@ MODULE_FIRMWARE(FIRMWARE_PICASSO);
MODULE_FIRMWARE(FIRMWARE_RAVEN2);
MODULE_FIRMWARE(FIRMWARE_ARCTURUS);
MODULE_FIRMWARE(FIRMWARE_RENOIR);
+MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE);
MODULE_FIRMWARE(FIRMWARE_NAVI10);
MODULE_FIRMWARE(FIRMWARE_NAVI14);
MODULE_FIRMWARE(FIRMWARE_NAVI12);
@@ -68,6 +70,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
mutex_init(&adev->vcn.vcn_pg_lock);
+ mutex_init(&adev->vcn.vcn1_jpeg1_workaround);
atomic_set(&adev->vcn.total_submission_cnt, 0);
for (i = 0; i < adev->vcn.num_vcn_inst; i++)
atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
@@ -88,7 +91,11 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
adev->vcn.indirect_sram = true;
break;
case CHIP_RENOIR:
- fw_name = FIRMWARE_RENOIR;
+ if (adev->apu_flags & AMD_APU_IS_RENOIR)
+ fw_name = FIRMWARE_RENOIR;
+ else
+ fw_name = FIRMWARE_GREEN_SARDINE;
+
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
adev->vcn.indirect_sram = true;
@@ -237,6 +244,7 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
}
release_firmware(adev->vcn.fw);
+ mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround);
mutex_destroy(&adev->vcn.vcn_pg_lock);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 7a9b804bc988..17691158f783 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -220,6 +220,7 @@ struct amdgpu_vcn {
struct amdgpu_vcn_inst inst[AMDGPU_MAX_VCN_INSTANCES];
struct amdgpu_vcn_reg internal;
struct mutex vcn_pg_lock;
+ struct mutex vcn1_jpeg1_workaround;
atomic_t total_submission_cnt;
unsigned harvest_config;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 1203c20491e6..d0aea5e39531 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -31,6 +31,12 @@
#include "soc15.h"
#include "nv.h"
+#define POPULATE_UCODE_INFO(vf2pf_info, ucode, ver) \
+ do { \
+ vf2pf_info->ucode_info[ucode].id = ucode; \
+ vf2pf_info->ucode_info[ucode].version = ver; \
+ } while (0)
+
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
{
/* By now all MMIO pages except mailbox are blocked */
@@ -45,7 +51,7 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev)
if (adev->mode_info.num_crtc == 0)
adev->mode_info.num_crtc = 1;
adev->enable_virtual_display = true;
- adev->ddev->driver->driver_features &= ~DRIVER_ATOMIC;
+ adev_to_drm(adev)->driver->driver_features &= ~DRIVER_ATOMIC;
adev->cg_flags = 0;
adev->pg_flags = 0;
}
@@ -93,7 +99,7 @@ failed_undo:
amdgpu_ring_undo(ring);
spin_unlock_irqrestore(&kiq->ring_lock, flags);
failed_kiq:
- pr_err("failed to write reg %x wait reg %x\n", reg0, reg1);
+ dev_err(adev->dev, "failed to write reg %x wait reg %x\n", reg0, reg1);
}
/**
@@ -239,10 +245,10 @@ void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
}
-int amdgpu_virt_fw_reserve_get_checksum(void *obj,
- unsigned long obj_size,
- unsigned int key,
- unsigned int chksum)
+unsigned int amd_sriov_msg_checksum(void *obj,
+ unsigned long obj_size,
+ unsigned int key,
+ unsigned int checksum)
{
unsigned int ret = key;
unsigned long i = 0;
@@ -252,9 +258,9 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj,
/* calculate checksum */
for (i = 0; i < obj_size; ++i)
ret += *(pos + i);
- /* minus the chksum itself */
- pos = (char *)&chksum;
- for (i = 0; i < sizeof(chksum); ++i)
+ /* minus the checksum itself */
+ pos = (char *)&checksum;
+ for (i = 0; i < sizeof(checksum); ++i)
ret -= *(pos + i);
return ret;
}
@@ -401,7 +407,7 @@ static void amdgpu_virt_add_bad_page(struct amdgpu_device *adev,
if (bp_block_size) {
bp_cnt = bp_block_size / sizeof(uint64_t);
for (bp_idx = 0; bp_idx < bp_cnt; bp_idx++) {
- retired_page = *(uint64_t *)(adev->fw_vram_usage.va +
+ retired_page = *(uint64_t *)(adev->mman.fw_vram_usage_va +
bp_block_offset + bp_idx * sizeof(uint64_t));
bp.retired_page = retired_page;
@@ -415,33 +421,188 @@ static void amdgpu_virt_add_bad_page(struct amdgpu_device *adev,
}
}
-void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
+static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev)
{
- uint32_t pf2vf_size = 0;
- uint32_t checksum = 0;
+ struct amd_sriov_msg_pf2vf_info_header *pf2vf_info = adev->virt.fw_reserve.p_pf2vf;
+ uint32_t checksum;
uint32_t checkval;
- char *str;
+
+ if (adev->virt.fw_reserve.p_pf2vf == NULL)
+ return -EINVAL;
+
+ if (pf2vf_info->size > 1024) {
+ DRM_ERROR("invalid pf2vf message size\n");
+ return -EINVAL;
+ }
+
+ switch (pf2vf_info->version) {
+ case 1:
+ checksum = ((struct amdgim_pf2vf_info_v1 *)pf2vf_info)->checksum;
+ checkval = amd_sriov_msg_checksum(
+ adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size,
+ adev->virt.fw_reserve.checksum_key, checksum);
+ if (checksum != checkval) {
+ DRM_ERROR("invalid pf2vf message\n");
+ return -EINVAL;
+ }
+
+ adev->virt.gim_feature =
+ ((struct amdgim_pf2vf_info_v1 *)pf2vf_info)->feature_flags;
+ break;
+ case 2:
+ /* TODO: missing key, need to add it later */
+ checksum = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->checksum;
+ checkval = amd_sriov_msg_checksum(
+ adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size,
+ 0, checksum);
+ if (checksum != checkval) {
+ DRM_ERROR("invalid pf2vf message\n");
+ return -EINVAL;
+ }
+
+ adev->virt.vf2pf_update_interval_ms =
+ ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->vf2pf_update_interval_ms;
+ adev->virt.gim_feature =
+ ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->feature_flags.all;
+
+ break;
+ default:
+ DRM_ERROR("invalid pf2vf version\n");
+ return -EINVAL;
+ }
+
+ /* correct too large or too little interval value */
+ if (adev->virt.vf2pf_update_interval_ms < 200 || adev->virt.vf2pf_update_interval_ms > 10000)
+ adev->virt.vf2pf_update_interval_ms = 2000;
+
+ return 0;
+}
+
+static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device *adev)
+{
+ struct amd_sriov_msg_vf2pf_info *vf2pf_info;
+ vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf;
+
+ if (adev->virt.fw_reserve.p_vf2pf == NULL)
+ return;
+
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_VCE, adev->vce.fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_UVD, adev->uvd.fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MC, adev->gmc.fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ME, adev->gfx.me_fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_PFP, adev->gfx.pfp_fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_CE, adev->gfx.ce_fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC, adev->gfx.rlc_fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLC, adev->gfx.rlc_srlc_fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLG, adev->gfx.rlc_srlg_fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLS, adev->gfx.rlc_srls_fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC, adev->gfx.mec_fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC2, adev->gfx.mec2_fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SOS, adev->psp.sos_fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD, adev->psp.asd_fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_RAS, adev->psp.ta_ras_ucode_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_XGMI, adev->psp.ta_xgmi_ucode_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SMC, adev->pm.fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA, adev->sdma.instance[0].fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA2, adev->sdma.instance[1].fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_VCN, adev->vcn.fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_DMCU, adev->dm.dmcu_fw_version);
+}
+
+static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
+{
+ struct amd_sriov_msg_vf2pf_info *vf2pf_info;
+ struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
+
+ vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf;
+
+ if (adev->virt.fw_reserve.p_vf2pf == NULL)
+ return -EINVAL;
+
+ memset(vf2pf_info, 0, sizeof(struct amd_sriov_msg_vf2pf_info));
+
+ vf2pf_info->header.size = sizeof(struct amd_sriov_msg_vf2pf_info);
+ vf2pf_info->header.version = AMD_SRIOV_MSG_FW_VRAM_VF2PF_VER;
+
+#ifdef MODULE
+ if (THIS_MODULE->version != NULL)
+ strcpy(vf2pf_info->driver_version, THIS_MODULE->version);
+ else
+#endif
+ strcpy(vf2pf_info->driver_version, "N/A");
+
+ vf2pf_info->pf2vf_version_required = 0; // no requirement, guest understands all
+ vf2pf_info->driver_cert = 0;
+ vf2pf_info->os_info.all = 0;
+
+ vf2pf_info->fb_usage = amdgpu_vram_mgr_usage(vram_man) >> 20;
+ vf2pf_info->fb_vis_usage = amdgpu_vram_mgr_vis_usage(vram_man) >> 20;
+ vf2pf_info->fb_size = adev->gmc.real_vram_size >> 20;
+ vf2pf_info->fb_vis_size = adev->gmc.visible_vram_size >> 20;
+
+ amdgpu_virt_populate_vf2pf_ucode_info(adev);
+
+ /* TODO: read dynamic info */
+ vf2pf_info->gfx_usage = 0;
+ vf2pf_info->compute_usage = 0;
+ vf2pf_info->encode_usage = 0;
+ vf2pf_info->decode_usage = 0;
+
+ vf2pf_info->checksum =
+ amd_sriov_msg_checksum(
+ vf2pf_info, vf2pf_info->header.size, 0, 0);
+
+ return 0;
+}
+
+void amdgpu_virt_update_vf2pf_work_item(struct work_struct *work)
+{
+ struct amdgpu_device *adev = container_of(work, struct amdgpu_device, virt.vf2pf_work.work);
+
+ amdgpu_virt_read_pf2vf_data(adev);
+ amdgpu_virt_write_vf2pf_data(adev);
+
+ schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms);
+}
+
+void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev)
+{
+ if (adev->virt.vf2pf_update_interval_ms != 0) {
+ DRM_INFO("clean up the vf2pf work item\n");
+ flush_delayed_work(&adev->virt.vf2pf_work);
+ cancel_delayed_work_sync(&adev->virt.vf2pf_work);
+ }
+}
+
+void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
+{
uint64_t bp_block_offset = 0;
uint32_t bp_block_size = 0;
- struct amdgim_pf2vf_info_v2 *pf2vf_v2 = NULL;
+ struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL;
adev->virt.fw_reserve.p_pf2vf = NULL;
adev->virt.fw_reserve.p_vf2pf = NULL;
+ adev->virt.vf2pf_update_interval_ms = 0;
+
+ if (adev->mman.fw_vram_usage_va != NULL) {
+ adev->virt.vf2pf_update_interval_ms = 2000;
- if (adev->fw_vram_usage.va != NULL) {
adev->virt.fw_reserve.p_pf2vf =
- (struct amd_sriov_msg_pf2vf_info_header *)(
- adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET);
- AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size);
- AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum);
- AMDGPU_FW_VRAM_PF2VF_READ(adev, feature_flags, &adev->virt.gim_feature);
-
- /* pf2vf message must be in 4K */
- if (pf2vf_size > 0 && pf2vf_size < 4096) {
- if (adev->virt.fw_reserve.p_pf2vf->version == 2) {
- pf2vf_v2 = (struct amdgim_pf2vf_info_v2 *)adev->virt.fw_reserve.p_pf2vf;
- bp_block_offset = ((uint64_t)pf2vf_v2->bp_block_offset_L & 0xFFFFFFFF) |
- ((((uint64_t)pf2vf_v2->bp_block_offset_H) << 32) & 0xFFFFFFFF00000000);
+ (struct amd_sriov_msg_pf2vf_info_header *)
+ (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
+ adev->virt.fw_reserve.p_vf2pf =
+ (struct amd_sriov_msg_vf2pf_info_header *)
+ (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10));
+
+ amdgpu_virt_read_pf2vf_data(adev);
+ amdgpu_virt_write_vf2pf_data(adev);
+
+ /* bad page handling for version 2 */
+ if (adev->virt.fw_reserve.p_pf2vf->version == 2) {
+ pf2vf_v2 = (struct amd_sriov_msg_pf2vf_info *)adev->virt.fw_reserve.p_pf2vf;
+
+ bp_block_offset = ((uint64_t)pf2vf_v2->bp_block_offset_low & 0xFFFFFFFF) |
+ ((((uint64_t)pf2vf_v2->bp_block_offset_high) << 32) & 0xFFFFFFFF00000000);
bp_block_size = pf2vf_v2->bp_block_size;
if (bp_block_size && !adev->virt.ras_init_done)
@@ -450,37 +611,11 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
if (adev->virt.ras_init_done)
amdgpu_virt_add_bad_page(adev, bp_block_offset, bp_block_size);
}
+ }
- checkval = amdgpu_virt_fw_reserve_get_checksum(
- adev->virt.fw_reserve.p_pf2vf, pf2vf_size,
- adev->virt.fw_reserve.checksum_key, checksum);
- if (checkval == checksum) {
- adev->virt.fw_reserve.p_vf2pf =
- ((void *)adev->virt.fw_reserve.p_pf2vf +
- pf2vf_size);
- memset((void *)adev->virt.fw_reserve.p_vf2pf, 0,
- sizeof(amdgim_vf2pf_info));
- AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.version,
- AMDGPU_FW_VRAM_VF2PF_VER);
- AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.size,
- sizeof(amdgim_vf2pf_info));
- AMDGPU_FW_VRAM_VF2PF_READ(adev, driver_version,
- &str);
-#ifdef MODULE
- if (THIS_MODULE->version != NULL)
- strcpy(str, THIS_MODULE->version);
- else
-#endif
- strcpy(str, "N/A");
- AMDGPU_FW_VRAM_VF2PF_WRITE(adev, driver_cert,
- 0);
- AMDGPU_FW_VRAM_VF2PF_WRITE(adev, checksum,
- amdgpu_virt_fw_reserve_get_checksum(
- adev->virt.fw_reserve.p_vf2pf,
- pf2vf_size,
- adev->virt.fw_reserve.checksum_key, 0));
- }
- }
+ if (adev->virt.vf2pf_update_interval_ms != 0) {
+ INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item);
+ schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index f826945989c7..8dd624c20f89 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -24,6 +24,8 @@
#ifndef AMDGPU_VIRT_H
#define AMDGPU_VIRT_H
+#include "amdgv_sriovmsg.h"
+
#define AMDGPU_SRIOV_CAPS_SRIOV_VBIOS (1 << 0) /* vBIOS is sr-iov ready */
#define AMDGPU_SRIOV_CAPS_ENABLE_IOV (1 << 1) /* sr-iov is enabled on this GPU */
#define AMDGPU_SRIOV_CAPS_IS_VF (1 << 2) /* this GPU is a virtual function */
@@ -79,7 +81,10 @@ struct amdgpu_virt_fw_reserve {
struct amd_sriov_msg_vf2pf_info_header *p_vf2pf;
unsigned int checksum_key;
};
+
/*
+ * Legacy GIM header
+ *
* Defination between PF and VF
* Structures forcibly aligned to 4 to keep the same style as PF.
*/
@@ -101,15 +106,7 @@ enum AMDGIM_FEATURE_FLAG {
AMDGIM_FEATURE_PP_ONE_VF = (1 << 4),
};
-struct amd_sriov_msg_pf2vf_info_header {
- /* the total structure size in byte. */
- uint32_t size;
- /* version of this structure, written by the GIM */
- uint32_t version;
- /* reserved */
- uint32_t reserved[2];
-} __aligned(4);
-struct amdgim_pf2vf_info_v1 {
+struct amdgim_pf2vf_info_v1 {
/* header contains size and version */
struct amd_sriov_msg_pf2vf_info_header header;
/* max_width * max_height */
@@ -128,54 +125,6 @@ struct amdgim_pf2vf_info_v1 {
unsigned int checksum;
} __aligned(4);
-struct amdgim_pf2vf_info_v2 {
- /* header contains size and version */
- struct amd_sriov_msg_pf2vf_info_header header;
- /* use private key from mailbox 2 to create chueksum */
- uint32_t checksum;
- /* The features flags of the GIM driver supports. */
- uint32_t feature_flags;
- /* max_width * max_height */
- uint32_t uvd_enc_max_pixels_count;
- /* 16x16 pixels/sec, codec independent */
- uint32_t uvd_enc_max_bandwidth;
- /* max_width * max_height */
- uint32_t vce_enc_max_pixels_count;
- /* 16x16 pixels/sec, codec independent */
- uint32_t vce_enc_max_bandwidth;
- /* Bad pages block position in BYTE */
- uint32_t bp_block_offset_L;
- uint32_t bp_block_offset_H;
- /* Bad pages block size in BYTE */
- uint32_t bp_block_size;
- /* MEC FW position in kb from the start of VF visible frame buffer */
- uint32_t mecfw_kboffset_L;
- uint32_t mecfw_kboffset_H;
- /* MEC FW size in KB */
- uint32_t mecfw_ksize;
- /* UVD FW position in kb from the start of VF visible frame buffer */
- uint32_t uvdfw_kboffset_L;
- uint32_t uvdfw_kboffset_H;
- /* UVD FW size in KB */
- uint32_t uvdfw_ksize;
- /* VCE FW position in kb from the start of VF visible frame buffer */
- uint32_t vcefw_kboffset_L;
- uint32_t vcefw_kboffset_H;
- /* VCE FW size in KB */
- uint32_t vcefw_ksize;
- uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 0, 0, (18 + sizeof(struct amd_sriov_msg_pf2vf_info_header)/sizeof(uint32_t)), 0)];
-} __aligned(4);
-
-
-struct amd_sriov_msg_vf2pf_info_header {
- /* the total structure size in byte. */
- uint32_t size;
- /*version of this structure, written by the guest */
- uint32_t version;
- /* reserved */
- uint32_t reserved[2];
-} __aligned(4);
-
struct amdgim_vf2pf_info_v1 {
/* header contains size and version */
struct amd_sriov_msg_vf2pf_info_header header;
@@ -237,31 +186,6 @@ struct amdgim_vf2pf_info_v2 {
uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 64, 0, (12 + sizeof(struct amd_sriov_msg_vf2pf_info_header)/sizeof(uint32_t)), 0)];
} __aligned(4);
-#define AMDGPU_FW_VRAM_VF2PF_VER 2
-typedef struct amdgim_vf2pf_info_v2 amdgim_vf2pf_info ;
-
-#define AMDGPU_FW_VRAM_VF2PF_WRITE(adev, field, val) \
- do { \
- ((amdgim_vf2pf_info *)adev->virt.fw_reserve.p_vf2pf)->field = (val); \
- } while (0)
-
-#define AMDGPU_FW_VRAM_VF2PF_READ(adev, field, val) \
- do { \
- (*val) = ((amdgim_vf2pf_info *)adev->virt.fw_reserve.p_vf2pf)->field; \
- } while (0)
-
-#define AMDGPU_FW_VRAM_PF2VF_READ(adev, field, val) \
- do { \
- if (!adev->virt.fw_reserve.p_pf2vf) \
- *(val) = 0; \
- else { \
- if (adev->virt.fw_reserve.p_pf2vf->version == 1) \
- *(val) = ((struct amdgim_pf2vf_info_v1 *)adev->virt.fw_reserve.p_pf2vf)->field; \
- if (adev->virt.fw_reserve.p_pf2vf->version == 2) \
- *(val) = ((struct amdgim_pf2vf_info_v2 *)adev->virt.fw_reserve.p_pf2vf)->field; \
- } \
- } while (0)
-
struct amdgpu_virt_ras_err_handler_data {
/* point to bad page records array */
struct eeprom_table_record *bps;
@@ -285,7 +209,7 @@ struct amdgpu_virt {
struct work_struct flr_work;
struct amdgpu_mm_table mm_table;
const struct amdgpu_virt_ops *ops;
- struct amdgpu_vf_error_buffer vf_errors;
+ struct amdgpu_vf_error_buffer vf_errors;
struct amdgpu_virt_fw_reserve fw_reserve;
uint32_t gim_feature;
uint32_t reg_access_mode;
@@ -293,6 +217,10 @@ struct amdgpu_virt {
bool tdr_debug;
struct amdgpu_virt_ras_err_handler_data *virt_eh_data;
bool ras_init_done;
+
+ /* vf2pf message */
+ struct delayed_work vf2pf_work;
+ uint32_t vf2pf_update_interval_ms;
};
#define amdgpu_sriov_enabled(adev) \
@@ -325,9 +253,9 @@ static inline bool is_virtual_machine(void)
#define amdgpu_sriov_is_pp_one_vf(adev) \
((adev)->virt.gim_feature & AMDGIM_FEATURE_PP_ONE_VF)
#define amdgpu_sriov_is_debug(adev) \
- ((!adev->in_gpu_reset) && adev->virt.tdr_debug)
+ ((!amdgpu_in_reset(adev)) && adev->virt.tdr_debug)
#define amdgpu_sriov_is_normal(adev) \
- ((!adev->in_gpu_reset) && (!adev->virt.tdr_debug))
+ ((!amdgpu_in_reset(adev)) && (!adev->virt.tdr_debug))
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
void amdgpu_virt_init_setting(struct amdgpu_device *adev);
@@ -341,11 +269,9 @@ void amdgpu_virt_request_init_data(struct amdgpu_device *adev);
int amdgpu_virt_wait_reset(struct amdgpu_device *adev);
int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev);
void amdgpu_virt_free_mm_table(struct amdgpu_device *adev);
-int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size,
- unsigned int key,
- unsigned int chksum);
void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev);
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
+void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev);
void amdgpu_detect_virtualization(struct amdgpu_device *adev);
bool amdgpu_virt_can_access_debugfs(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 71e005cf2952..df110afa97bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -28,6 +28,7 @@
#include <linux/dma-fence-array.h>
#include <linux/interval_tree_generic.h>
#include <linux/idr.h>
+#include <linux/dma-buf.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
@@ -35,6 +36,7 @@
#include "amdgpu_amdkfd.h"
#include "amdgpu_gmc.h"
#include "amdgpu_xgmi.h"
+#include "amdgpu_dma_buf.h"
/**
* DOC: GPUVM
@@ -1500,6 +1502,8 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
pt = cursor.entry->base.bo;
shift = parent_shift;
+ frag_end = max(frag_end, ALIGN(frag_start + 1,
+ 1ULL << shift));
}
/* Looks good so far, calculate parameters for the update */
@@ -1511,19 +1515,26 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
entry_end = min(entry_end, end);
do {
+ struct amdgpu_vm *vm = params->vm;
uint64_t upd_end = min(entry_end, frag_end);
unsigned nptes = (upd_end - frag_start) >> shift;
+ uint64_t upd_flags = flags | AMDGPU_PTE_FRAG(frag);
/* This can happen when we set higher level PDs to
* silent to stop fault floods.
*/
nptes = max(nptes, 1u);
+
+ trace_amdgpu_vm_update_ptes(params, frag_start, upd_end,
+ nptes, dst, incr, upd_flags,
+ vm->task_info.pid,
+ vm->immediate.fence_context);
amdgpu_vm_update_flags(params, pt, cursor.level,
pe_start, dst, nptes, incr,
- flags | AMDGPU_PTE_FRAG(frag));
+ upd_flags);
pe_start += nptes * 8;
- dst += (uint64_t)nptes * AMDGPU_GPU_PAGE_SIZE << shift;
+ dst += nptes * incr;
frag_start = upd_end;
if (frag_start >= frag_end) {
@@ -1691,13 +1702,13 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
uint64_t max_entries;
uint64_t addr, last;
+ max_entries = mapping->last - start + 1;
if (nodes) {
addr = nodes->start << PAGE_SHIFT;
- max_entries = (nodes->size - pfn) *
- AMDGPU_GPU_PAGES_IN_CPU_PAGE;
+ max_entries = min((nodes->size - pfn) *
+ AMDGPU_GPU_PAGES_IN_CPU_PAGE, max_entries);
} else {
addr = 0;
- max_entries = S64_MAX;
}
if (pages_addr) {
@@ -1727,7 +1738,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
addr += pfn << PAGE_SHIFT;
}
- last = min((uint64_t)mapping->last, start + max_entries - 1);
+ last = start + max_entries - 1;
r = amdgpu_vm_bo_update_mapping(adev, vm, false, false, resv,
start, last, flags, addr,
dma_addr, fence);
@@ -1765,7 +1776,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
struct amdgpu_vm *vm = bo_va->base.vm;
struct amdgpu_bo_va_mapping *mapping;
dma_addr_t *pages_addr = NULL;
- struct ttm_mem_reg *mem;
+ struct ttm_resource *mem;
struct drm_mm_node *nodes;
struct dma_fence **last_update;
struct dma_resv *resv;
@@ -1778,15 +1789,24 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
nodes = NULL;
resv = vm->root.base.bo->tbo.base.resv;
} else {
+ struct drm_gem_object *obj = &bo->tbo.base;
struct ttm_dma_tt *ttm;
+ resv = bo->tbo.base.resv;
+ if (obj->import_attach && bo_va->is_xgmi) {
+ struct dma_buf *dma_buf = obj->import_attach->dmabuf;
+ struct drm_gem_object *gobj = dma_buf->priv;
+ struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
+
+ if (abo->tbo.mem.mem_type == TTM_PL_VRAM)
+ bo = gem_to_amdgpu_bo(gobj);
+ }
mem = &bo->tbo.mem;
nodes = mem->mm_node;
if (mem->mem_type == TTM_PL_TT) {
ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
pages_addr = ttm->dma_address;
}
- resv = bo->tbo.base.resv;
}
if (bo) {
@@ -2132,8 +2152,10 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
INIT_LIST_HEAD(&bo_va->valids);
INIT_LIST_HEAD(&bo_va->invalids);
- if (bo && amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev)) &&
- (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)) {
+ if (!bo)
+ return bo_va;
+
+ if (amdgpu_dmabuf_is_xgmi_accessible(adev, bo)) {
bo_va->is_xgmi = true;
/* Power up XGMI if it can be potentially used */
amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MAX_VEGA20);
@@ -2785,7 +2807,7 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
* 0 for success, error for failure.
*/
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- int vm_context, unsigned int pasid)
+ int vm_context, u32 pasid)
{
struct amdgpu_bo_param bp;
struct amdgpu_bo *root;
@@ -2956,7 +2978,7 @@ static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev,
* 0 for success, -errno for errors.
*/
int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- unsigned int pasid)
+ u32 pasid)
{
bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);
int r;
@@ -3209,7 +3231,7 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
union drm_amdgpu_vm *args = data;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_fpriv *fpriv = filp->driver_priv;
long timeout = msecs_to_jiffies(2000);
int r;
@@ -3254,7 +3276,7 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
* @pasid: PASID identifier for VM
* @task_info: task_info to fill.
*/
-void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
+void amdgpu_vm_get_task_info(struct amdgpu_device *adev, u32 pasid,
struct amdgpu_task_info *task_info)
{
struct amdgpu_vm *vm;
@@ -3298,7 +3320,7 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
* Try to gracefully handle a VM fault. Return true if the fault was handled and
* shouldn't be reported any more.
*/
-bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, unsigned int pasid,
+bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
uint64_t addr)
{
struct amdgpu_bo *root;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 770025a5e500..58c83a7ad0fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -98,7 +98,7 @@ struct amdgpu_bo_list_entry;
#define AMDGPU_PTE_MTYPE_NV10(a) ((uint64_t)(a) << 48)
#define AMDGPU_PTE_MTYPE_NV10_MASK AMDGPU_PTE_MTYPE_NV10(7ULL)
-/* How to programm VM fault handling */
+/* How to program VM fault handling */
#define AMDGPU_VM_FAULT_STOP_NEVER 0
#define AMDGPU_VM_FAULT_STOP_FIRST 1
#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
@@ -112,8 +112,8 @@ struct amdgpu_bo_list_entry;
#define AMDGPU_MMHUB_0 1
#define AMDGPU_MMHUB_1 2
-/* hardcode that limit for now */
-#define AMDGPU_VA_RESERVED_SIZE (1ULL << 20)
+/* Reserve 2MB at top/bottom of address space for kernel use */
+#define AMDGPU_VA_RESERVED_SIZE (2ULL << 20)
/* max vmids dedicated for process */
#define AMDGPU_VM_MAX_RESERVED_VMID 1
@@ -372,8 +372,8 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout);
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- int vm_context, unsigned int pasid);
-int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid);
+ int vm_context, u32 pasid);
+int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, u32 pasid);
void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
@@ -430,9 +430,9 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
struct amdgpu_job *job);
void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev);
-void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
+void amdgpu_vm_get_task_info(struct amdgpu_device *adev, u32 pasid,
struct amdgpu_task_info *task_info);
-bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, unsigned int pasid,
+bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
uint64_t addr);
void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
index 39c704a1fb0e..0786e7555554 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
@@ -59,7 +59,7 @@ static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p,
*
* @p: see amdgpu_vm_update_params definition
* @bo: PD/PT to update
- * @pe: kmap addr of the page entry
+ * @pe: byte offset of the PDE/PTE, relative to start of PDB/PTB
* @addr: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index 189d46ea603b..db790574dc2e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -155,7 +155,7 @@ static void amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params *p,
*
* @p: see amdgpu_vm_update_params definition
* @bo: PD/PT to update
- * @pe: addr of the page entry
+ * @pe: byte offset of the PDE/PTE, relative to start of PDB/PTB
* @addr: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
@@ -187,7 +187,7 @@ static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p,
*
* @p: see amdgpu_vm_update_params definition
* @bo: PD/PT to update
- * @pe: addr of the page entry
+ * @pe: byte offset of the PDE/PTE, relative to start of PDB/PTB
* @addr: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 134cc36e30c5..0c6b7c5ecfec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -28,12 +28,15 @@
#include "amdgpu_atomfirmware.h"
#include "atom.h"
-struct amdgpu_vram_mgr {
- struct drm_mm mm;
- spinlock_t lock;
- atomic64_t usage;
- atomic64_t vis_usage;
-};
+static inline struct amdgpu_vram_mgr *to_vram_mgr(struct ttm_resource_manager *man)
+{
+ return container_of(man, struct amdgpu_vram_mgr, manager);
+}
+
+static inline struct amdgpu_device *to_amdgpu_device(struct amdgpu_vram_mgr *mgr)
+{
+ return container_of(mgr, struct amdgpu_device, mman.vram_mgr);
+}
/**
* DOC: mem_info_vram_total
@@ -47,7 +50,7 @@ static ssize_t amdgpu_mem_info_vram_total_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.real_vram_size);
}
@@ -64,7 +67,7 @@ static ssize_t amdgpu_mem_info_vis_vram_total_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.visible_vram_size);
}
@@ -81,10 +84,11 @@ static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
return snprintf(buf, PAGE_SIZE, "%llu\n",
- amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]));
+ amdgpu_vram_mgr_usage(man));
}
/**
@@ -99,10 +103,11 @@ static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
return snprintf(buf, PAGE_SIZE, "%llu\n",
- amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]));
+ amdgpu_vram_mgr_vis_usage(man));
}
static ssize_t amdgpu_mem_info_vram_vendor(struct device *dev,
@@ -110,7 +115,7 @@ static ssize_t amdgpu_mem_info_vram_vendor(struct device *dev,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
switch (adev->gmc.vram_vendor) {
case SAMSUNG:
@@ -158,63 +163,72 @@ static const struct attribute *amdgpu_vram_mgr_attributes[] = {
NULL
};
+static const struct ttm_resource_manager_func amdgpu_vram_mgr_func;
+
/**
* amdgpu_vram_mgr_init - init VRAM manager and DRM MM
*
- * @man: TTM memory type manager
- * @p_size: maximum size of VRAM
+ * @adev: amdgpu_device pointer
*
* Allocate and initialize the VRAM manager.
*/
-static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager *man,
- unsigned long p_size)
+int amdgpu_vram_mgr_init(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
- struct amdgpu_vram_mgr *mgr;
+ struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
+ struct ttm_resource_manager *man = &mgr->manager;
int ret;
- mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
- if (!mgr)
- return -ENOMEM;
+ ttm_resource_manager_init(man, adev->gmc.real_vram_size >> PAGE_SHIFT);
+
+ man->func = &amdgpu_vram_mgr_func;
- drm_mm_init(&mgr->mm, 0, p_size);
+ drm_mm_init(&mgr->mm, 0, man->size);
spin_lock_init(&mgr->lock);
- man->priv = mgr;
/* Add the two VRAM-related sysfs files */
ret = sysfs_create_files(&adev->dev->kobj, amdgpu_vram_mgr_attributes);
if (ret)
DRM_ERROR("Failed to register sysfs\n");
+ ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, &mgr->manager);
+ ttm_resource_manager_set_used(man, true);
return 0;
}
/**
* amdgpu_vram_mgr_fini - free and destroy VRAM manager
*
- * @man: TTM memory type manager
+ * @adev: amdgpu_device pointer
*
* Destroy and free the VRAM manager, returns -EBUSY if ranges are still
* allocated inside it.
*/
-static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man)
+void amdgpu_vram_mgr_fini(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
- struct amdgpu_vram_mgr *mgr = man->priv;
+ struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
+ struct ttm_resource_manager *man = &mgr->manager;
+ int ret;
+
+ ttm_resource_manager_set_used(man, false);
+
+ ret = ttm_resource_manager_force_list_clean(&adev->mman.bdev, man);
+ if (ret)
+ return;
spin_lock(&mgr->lock);
drm_mm_takedown(&mgr->mm);
spin_unlock(&mgr->lock);
- kfree(mgr);
- man->priv = NULL;
+
sysfs_remove_files(&adev->dev->kobj, amdgpu_vram_mgr_attributes);
- return 0;
+
+ ttm_resource_manager_cleanup(man);
+ ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, NULL);
}
/**
* amdgpu_vram_mgr_vis_size - Calculate visible node size
*
- * @adev: amdgpu device structure
+ * @adev: amdgpu_device pointer
* @node: MM node structure
*
* Calculate how many bytes of the MM node are inside visible VRAM
@@ -243,7 +257,7 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- struct ttm_mem_reg *mem = &bo->tbo.mem;
+ struct ttm_resource *mem = &bo->tbo.mem;
struct drm_mm_node *nodes = mem->mm_node;
unsigned pages = mem->num_pages;
u64 usage;
@@ -263,13 +277,13 @@ u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
/**
* amdgpu_vram_mgr_virt_start - update virtual start address
*
- * @mem: ttm_mem_reg to update
+ * @mem: ttm_resource to update
* @node: just allocated node
*
* Calculate a virtual BO start address to easily check if everything is CPU
* accessible.
*/
-static void amdgpu_vram_mgr_virt_start(struct ttm_mem_reg *mem,
+static void amdgpu_vram_mgr_virt_start(struct ttm_resource *mem,
struct drm_mm_node *node)
{
unsigned long start;
@@ -292,13 +306,13 @@ static void amdgpu_vram_mgr_virt_start(struct ttm_mem_reg *mem,
*
* Allocate VRAM for the given BO.
*/
-static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
+static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
struct ttm_buffer_object *tbo,
const struct ttm_place *place,
- struct ttm_mem_reg *mem)
+ struct ttm_resource *mem)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
- struct amdgpu_vram_mgr *mgr = man->priv;
+ struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
+ struct amdgpu_device *adev = to_amdgpu_device(mgr);
struct drm_mm *mm = &mgr->mm;
struct drm_mm_node *nodes;
enum drm_mm_insert_mode mode;
@@ -410,11 +424,11 @@ error:
*
* Free the allocated VRAM again.
*/
-static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man,
- struct ttm_mem_reg *mem)
+static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
+ struct ttm_resource *mem)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
- struct amdgpu_vram_mgr *mgr = man->priv;
+ struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
+ struct amdgpu_device *adev = to_amdgpu_device(mgr);
struct drm_mm_node *nodes = mem->mm_node;
uint64_t usage = 0, vis_usage = 0;
unsigned pages = mem->num_pages;
@@ -451,7 +465,7 @@ static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man,
* Allocate and fill a sg table from a VRAM allocation.
*/
int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
- struct ttm_mem_reg *mem,
+ struct ttm_resource *mem,
struct device *dev,
enum dma_data_direction dir,
struct sg_table **sgt)
@@ -462,7 +476,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
unsigned int pages;
int i, r;
- *sgt = kmalloc(sizeof(*sg), GFP_KERNEL);
+ *sgt = kmalloc(sizeof(**sgt), GFP_KERNEL);
if (!*sgt)
return -ENOMEM;
@@ -544,9 +558,9 @@ void amdgpu_vram_mgr_free_sgt(struct amdgpu_device *adev,
*
* Returns how many bytes are used in this domain.
*/
-uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man)
+uint64_t amdgpu_vram_mgr_usage(struct ttm_resource_manager *man)
{
- struct amdgpu_vram_mgr *mgr = man->priv;
+ struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
return atomic64_read(&mgr->usage);
}
@@ -558,9 +572,9 @@ uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man)
*
* Returns how many bytes are used in the visible part of VRAM
*/
-uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man)
+uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_resource_manager *man)
{
- struct amdgpu_vram_mgr *mgr = man->priv;
+ struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
return atomic64_read(&mgr->vis_usage);
}
@@ -573,10 +587,10 @@ uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man)
*
* Dump the table content using printk.
*/
-static void amdgpu_vram_mgr_debug(struct ttm_mem_type_manager *man,
+static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
struct drm_printer *printer)
{
- struct amdgpu_vram_mgr *mgr = man->priv;
+ struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
spin_lock(&mgr->lock);
drm_mm_print(&mgr->mm, printer);
@@ -587,10 +601,8 @@ static void amdgpu_vram_mgr_debug(struct ttm_mem_type_manager *man,
amdgpu_vram_mgr_vis_usage(man) >> 20);
}
-const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func = {
- .init = amdgpu_vram_mgr_init,
- .takedown = amdgpu_vram_mgr_fini,
- .get_node = amdgpu_vram_mgr_new,
- .put_node = amdgpu_vram_mgr_del,
- .debug = amdgpu_vram_mgr_debug
+static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = {
+ .alloc = amdgpu_vram_mgr_new,
+ .free = amdgpu_vram_mgr_del,
+ .debug = amdgpu_vram_mgr_debug
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index e3a3755cb999..1162913c8bf4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -35,11 +35,9 @@
static DEFINE_MUTEX(xgmi_mutex);
-#define AMDGPU_MAX_XGMI_HIVE 8
#define AMDGPU_MAX_XGMI_DEVICE_PER_HIVE 4
-static struct amdgpu_hive_info xgmi_hives[AMDGPU_MAX_XGMI_HIVE];
-static unsigned hive_count = 0;
+static LIST_HEAD(xgmi_hive_list);
static const int xgmi_pcs_err_status_reg_vg20[] = {
smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS,
@@ -171,65 +169,53 @@ static const struct amdgpu_pcs_ras_field wafl_pcs_ras_fields[] = {
*
*/
+static struct attribute amdgpu_xgmi_hive_id = {
+ .name = "xgmi_hive_id",
+ .mode = S_IRUGO
+};
-static ssize_t amdgpu_xgmi_show_hive_id(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct amdgpu_hive_info *hive =
- container_of(attr, struct amdgpu_hive_info, dev_attr);
-
- return snprintf(buf, PAGE_SIZE, "%llu\n", hive->hive_id);
-}
+static struct attribute *amdgpu_xgmi_hive_attrs[] = {
+ &amdgpu_xgmi_hive_id,
+ NULL
+};
-static int amdgpu_xgmi_sysfs_create(struct amdgpu_device *adev,
- struct amdgpu_hive_info *hive)
+static ssize_t amdgpu_xgmi_show_attrs(struct kobject *kobj,
+ struct attribute *attr, char *buf)
{
- int ret = 0;
+ struct amdgpu_hive_info *hive = container_of(
+ kobj, struct amdgpu_hive_info, kobj);
- if (WARN_ON(hive->kobj))
- return -EINVAL;
-
- hive->kobj = kobject_create_and_add("xgmi_hive_info", &adev->dev->kobj);
- if (!hive->kobj) {
- dev_err(adev->dev, "XGMI: Failed to allocate sysfs entry!\n");
- return -EINVAL;
- }
-
- hive->dev_attr = (struct device_attribute) {
- .attr = {
- .name = "xgmi_hive_id",
- .mode = S_IRUGO,
-
- },
- .show = amdgpu_xgmi_show_hive_id,
- };
+ if (attr == &amdgpu_xgmi_hive_id)
+ return snprintf(buf, PAGE_SIZE, "%llu\n", hive->hive_id);
- ret = sysfs_create_file(hive->kobj, &hive->dev_attr.attr);
- if (ret) {
- dev_err(adev->dev, "XGMI: Failed to create device file xgmi_hive_id\n");
- kobject_del(hive->kobj);
- kobject_put(hive->kobj);
- hive->kobj = NULL;
- }
-
- return ret;
+ return 0;
}
-static void amdgpu_xgmi_sysfs_destroy(struct amdgpu_device *adev,
- struct amdgpu_hive_info *hive)
+static void amdgpu_xgmi_hive_release(struct kobject *kobj)
{
- sysfs_remove_file(hive->kobj, &hive->dev_attr.attr);
- kobject_del(hive->kobj);
- kobject_put(hive->kobj);
- hive->kobj = NULL;
+ struct amdgpu_hive_info *hive = container_of(
+ kobj, struct amdgpu_hive_info, kobj);
+
+ mutex_destroy(&hive->hive_lock);
+ kfree(hive);
}
+static const struct sysfs_ops amdgpu_xgmi_hive_ops = {
+ .show = amdgpu_xgmi_show_attrs,
+};
+
+struct kobj_type amdgpu_xgmi_hive_type = {
+ .release = amdgpu_xgmi_hive_release,
+ .sysfs_ops = &amdgpu_xgmi_hive_ops,
+ .default_attrs = amdgpu_xgmi_hive_attrs,
+};
+
static ssize_t amdgpu_xgmi_show_device_id(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.xgmi.node_id);
@@ -241,7 +227,7 @@ static ssize_t amdgpu_xgmi_show_error(struct device *dev,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
uint32_t ficaa_pie_ctl_in, ficaa_pie_status_in;
uint64_t fica_out;
unsigned int error_count = 0;
@@ -287,8 +273,8 @@ static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev,
/* Create sysfs link to hive info folder on the first device */
- if (adev != hive->adev) {
- ret = sysfs_create_link(&adev->dev->kobj, hive->kobj,
+ if (hive->kobj.parent != (&adev->dev->kobj)) {
+ ret = sysfs_create_link(&adev->dev->kobj, &hive->kobj,
"xgmi_hive_info");
if (ret) {
dev_err(adev->dev, "XGMI: Failed to create link to hive info");
@@ -296,9 +282,9 @@ static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev,
}
}
- sprintf(node, "node%d", hive->number_devices);
+ sprintf(node, "node%d", atomic_read(&hive->number_devices));
/* Create sysfs link form the hive folder to yourself */
- ret = sysfs_create_link(hive->kobj, &adev->dev->kobj, node);
+ ret = sysfs_create_link(&hive->kobj, &adev->dev->kobj, node);
if (ret) {
dev_err(adev->dev, "XGMI: Failed to create link from hive info");
goto remove_link;
@@ -308,7 +294,7 @@ static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev,
remove_link:
- sysfs_remove_link(&adev->dev->kobj, adev->ddev->unique);
+ sysfs_remove_link(&adev->dev->kobj, adev_to_drm(adev)->unique);
remove_file:
device_remove_file(adev->dev, &dev_attr_xgmi_device_id);
@@ -326,78 +312,96 @@ static void amdgpu_xgmi_sysfs_rem_dev_info(struct amdgpu_device *adev,
device_remove_file(adev->dev, &dev_attr_xgmi_device_id);
device_remove_file(adev->dev, &dev_attr_xgmi_error);
- if (adev != hive->adev)
+ if (hive->kobj.parent != (&adev->dev->kobj))
sysfs_remove_link(&adev->dev->kobj,"xgmi_hive_info");
- sprintf(node, "node%d", hive->number_devices);
- sysfs_remove_link(hive->kobj, node);
+ sprintf(node, "node%d", atomic_read(&hive->number_devices));
+ sysfs_remove_link(&hive->kobj, node);
}
-struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock)
+struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
{
- int i;
- struct amdgpu_hive_info *tmp;
+ struct amdgpu_hive_info *hive = NULL, *tmp = NULL;
+ int ret;
if (!adev->gmc.xgmi.hive_id)
return NULL;
+ if (adev->hive) {
+ kobject_get(&adev->hive->kobj);
+ return adev->hive;
+ }
+
mutex_lock(&xgmi_mutex);
- for (i = 0 ; i < hive_count; ++i) {
- tmp = &xgmi_hives[i];
- if (tmp->hive_id == adev->gmc.xgmi.hive_id) {
- if (lock)
- mutex_lock(&tmp->hive_lock);
- mutex_unlock(&xgmi_mutex);
- return tmp;
+ if (!list_empty(&xgmi_hive_list)) {
+ list_for_each_entry_safe(hive, tmp, &xgmi_hive_list, node) {
+ if (hive->hive_id == adev->gmc.xgmi.hive_id)
+ goto pro_end;
}
}
- if (i >= AMDGPU_MAX_XGMI_HIVE) {
- mutex_unlock(&xgmi_mutex);
- return NULL;
+
+ hive = kzalloc(sizeof(*hive), GFP_KERNEL);
+ if (!hive) {
+ dev_err(adev->dev, "XGMI: allocation failed\n");
+ hive = NULL;
+ goto pro_end;
}
/* initialize new hive if not exist */
- tmp = &xgmi_hives[hive_count++];
-
- if (amdgpu_xgmi_sysfs_create(adev, tmp)) {
- mutex_unlock(&xgmi_mutex);
- return NULL;
+ ret = kobject_init_and_add(&hive->kobj,
+ &amdgpu_xgmi_hive_type,
+ &adev->dev->kobj,
+ "%s", "xgmi_hive_info");
+ if (ret) {
+ dev_err(adev->dev, "XGMI: failed initializing kobject for xgmi hive\n");
+ kfree(hive);
+ hive = NULL;
+ goto pro_end;
}
- tmp->adev = adev;
- tmp->hive_id = adev->gmc.xgmi.hive_id;
- INIT_LIST_HEAD(&tmp->device_list);
- mutex_init(&tmp->hive_lock);
- mutex_init(&tmp->reset_lock);
- task_barrier_init(&tmp->tb);
-
- if (lock)
- mutex_lock(&tmp->hive_lock);
- tmp->pstate = AMDGPU_XGMI_PSTATE_UNKNOWN;
- tmp->hi_req_gpu = NULL;
+ hive->hive_id = adev->gmc.xgmi.hive_id;
+ INIT_LIST_HEAD(&hive->device_list);
+ INIT_LIST_HEAD(&hive->node);
+ mutex_init(&hive->hive_lock);
+ atomic_set(&hive->in_reset, 0);
+ atomic_set(&hive->number_devices, 0);
+ task_barrier_init(&hive->tb);
+ hive->pstate = AMDGPU_XGMI_PSTATE_UNKNOWN;
+ hive->hi_req_gpu = NULL;
/*
* hive pstate on boot is high in vega20 so we have to go to low
* pstate on after boot.
*/
- tmp->hi_req_count = AMDGPU_MAX_XGMI_DEVICE_PER_HIVE;
+ hive->hi_req_count = AMDGPU_MAX_XGMI_DEVICE_PER_HIVE;
+ list_add_tail(&hive->node, &xgmi_hive_list);
+
+pro_end:
+ if (hive)
+ kobject_get(&hive->kobj);
mutex_unlock(&xgmi_mutex);
+ return hive;
+}
- return tmp;
+void amdgpu_put_xgmi_hive(struct amdgpu_hive_info *hive)
+{
+ if (hive)
+ kobject_put(&hive->kobj);
}
int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate)
{
int ret = 0;
- struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0);
+ struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
struct amdgpu_device *request_adev = hive->hi_req_gpu ?
hive->hi_req_gpu : adev;
bool is_hi_req = pstate == AMDGPU_XGMI_PSTATE_MAX_VEGA20;
bool init_low = hive->pstate == AMDGPU_XGMI_PSTATE_UNKNOWN;
+ amdgpu_put_xgmi_hive(hive);
/* fw bug so temporarily disable pstate switching */
return 0;
@@ -449,7 +453,7 @@ int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_dev
/* Each psp need to set the latest topology */
ret = psp_xgmi_set_topology_info(&adev->psp,
- hive->number_devices,
+ atomic_read(&hive->number_devices),
&adev->psp.xgmi_context.top_info);
if (ret)
dev_err(adev->dev,
@@ -511,7 +515,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
adev->gmc.xgmi.node_id = adev->gmc.xgmi.physical_node_id + 16;
}
- hive = amdgpu_get_xgmi_hive(adev, 1);
+ hive = amdgpu_get_xgmi_hive(adev);
if (!hive) {
ret = -EINVAL;
dev_err(adev->dev,
@@ -519,6 +523,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
adev->gmc.xgmi.node_id, adev->gmc.xgmi.hive_id);
goto exit;
}
+ mutex_lock(&hive->hive_lock);
top_info = &adev->psp.xgmi_context.top_info;
@@ -526,7 +531,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
list_for_each_entry(entry, &hive->device_list, head)
top_info->nodes[count++].node_id = entry->node_id;
top_info->num_nodes = count;
- hive->number_devices = count;
+ atomic_set(&hive->number_devices, count);
task_barrier_add_task(&hive->tb);
@@ -541,7 +546,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
}
ret = amdgpu_xgmi_update_topology(hive, tmp_adev);
if (ret)
- goto exit;
+ goto exit_unlock;
}
/* get latest topology info for each device from psp */
@@ -554,7 +559,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
tmp_adev->gmc.xgmi.node_id,
tmp_adev->gmc.xgmi.hive_id, ret);
/* To do : continue with some node failed or disable the whole hive */
- goto exit;
+ goto exit_unlock;
}
}
}
@@ -562,39 +567,51 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
if (!ret)
ret = amdgpu_xgmi_sysfs_add_dev_info(adev, hive);
-
+exit_unlock:
mutex_unlock(&hive->hive_lock);
exit:
- if (!ret)
+ if (!ret) {
+ adev->hive = hive;
dev_info(adev->dev, "XGMI: Add node %d, hive 0x%llx.\n",
adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id);
- else
+ } else {
+ amdgpu_put_xgmi_hive(hive);
dev_err(adev->dev, "XGMI: Failed to add node %d, hive 0x%llx ret: %d\n",
adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id,
ret);
+ }
return ret;
}
int amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
{
- struct amdgpu_hive_info *hive;
+ struct amdgpu_hive_info *hive = adev->hive;
if (!adev->gmc.xgmi.supported)
return -EINVAL;
- hive = amdgpu_get_xgmi_hive(adev, 1);
if (!hive)
return -EINVAL;
+ mutex_lock(&hive->hive_lock);
task_barrier_rem_task(&hive->tb);
amdgpu_xgmi_sysfs_rem_dev_info(adev, hive);
+ if (hive->hi_req_gpu == adev)
+ hive->hi_req_gpu = NULL;
+ list_del(&adev->gmc.xgmi.head);
mutex_unlock(&hive->hive_lock);
- if(!(--hive->number_devices)){
- amdgpu_xgmi_sysfs_destroy(adev, hive);
- mutex_destroy(&hive->hive_lock);
- mutex_destroy(&hive->reset_lock);
+ amdgpu_put_xgmi_hive(hive);
+ adev->hive = NULL;
+
+ if (atomic_dec_return(&hive->number_devices) == 0) {
+ /* Remove the hive from global hive list */
+ mutex_lock(&xgmi_mutex);
+ list_del(&hive->node);
+ mutex_unlock(&xgmi_mutex);
+
+ amdgpu_put_xgmi_hive(hive);
}
return psp_xgmi_terminate(&adev->psp);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
index 6999eab16a72..148560d63554 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -27,13 +27,13 @@
struct amdgpu_hive_info {
- uint64_t hive_id;
- struct list_head device_list;
- int number_devices;
- struct mutex hive_lock, reset_lock;
- struct kobject *kobj;
- struct device_attribute dev_attr;
- struct amdgpu_device *adev;
+ struct kobject kobj;
+ uint64_t hive_id;
+ struct list_head device_list;
+ struct list_head node;
+ atomic_t number_devices;
+ struct mutex hive_lock;
+ atomic_t in_reset;
int hi_req_count;
struct amdgpu_device *hi_req_gpu;
struct task_barrier tb;
@@ -50,7 +50,8 @@ struct amdgpu_pcs_ras_field {
uint32_t pcs_err_shift;
};
-struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock);
+struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev);
+void amdgpu_put_xgmi_hive(struct amdgpu_hive_info *hive);
int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev);
int amdgpu_xgmi_add_device(struct amdgpu_device *adev);
int amdgpu_xgmi_remove_device(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
new file mode 100644
index 000000000000..5355827ed0ae
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
@@ -0,0 +1,276 @@
+/*
+ * Copyright 2018-2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef AMDGV_SRIOV_MSG__H_
+#define AMDGV_SRIOV_MSG__H_
+
+/* unit in kilobytes */
+#define AMD_SRIOV_MSG_VBIOS_OFFSET 0
+#define AMD_SRIOV_MSG_VBIOS_SIZE_KB 64
+#define AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB AMD_SRIOV_MSG_VBIOS_SIZE_KB
+#define AMD_SRIOV_MSG_DATAEXCHANGE_SIZE_KB 4
+
+/*
+ * layout
+ * 0 64KB 65KB 66KB
+ * | VBIOS | PF2VF | VF2PF | Bad Page | ...
+ * | 64KB | 1KB | 1KB |
+ */
+#define AMD_SRIOV_MSG_SIZE_KB 1
+#define AMD_SRIOV_MSG_PF2VF_OFFSET_KB AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB
+#define AMD_SRIOV_MSG_VF2PF_OFFSET_KB (AMD_SRIOV_MSG_PF2VF_OFFSET_KB + AMD_SRIOV_MSG_SIZE_KB)
+#define AMD_SRIOV_MSG_BAD_PAGE_OFFSET_KB (AMD_SRIOV_MSG_VF2PF_OFFSET_KB + AMD_SRIOV_MSG_SIZE_KB)
+
+/*
+ * PF2VF history log:
+ * v1 defined in amdgim
+ * v2 current
+ *
+ * VF2PF history log:
+ * v1 defined in amdgim
+ * v2 defined in amdgim
+ * v3 current
+ */
+#define AMD_SRIOV_MSG_FW_VRAM_PF2VF_VER 2
+#define AMD_SRIOV_MSG_FW_VRAM_VF2PF_VER 3
+
+#define AMD_SRIOV_MSG_RESERVE_UCODE 24
+
+enum amd_sriov_ucode_engine_id {
+ AMD_SRIOV_UCODE_ID_VCE = 0,
+ AMD_SRIOV_UCODE_ID_UVD,
+ AMD_SRIOV_UCODE_ID_MC,
+ AMD_SRIOV_UCODE_ID_ME,
+ AMD_SRIOV_UCODE_ID_PFP,
+ AMD_SRIOV_UCODE_ID_CE,
+ AMD_SRIOV_UCODE_ID_RLC,
+ AMD_SRIOV_UCODE_ID_RLC_SRLC,
+ AMD_SRIOV_UCODE_ID_RLC_SRLG,
+ AMD_SRIOV_UCODE_ID_RLC_SRLS,
+ AMD_SRIOV_UCODE_ID_MEC,
+ AMD_SRIOV_UCODE_ID_MEC2,
+ AMD_SRIOV_UCODE_ID_SOS,
+ AMD_SRIOV_UCODE_ID_ASD,
+ AMD_SRIOV_UCODE_ID_TA_RAS,
+ AMD_SRIOV_UCODE_ID_TA_XGMI,
+ AMD_SRIOV_UCODE_ID_SMC,
+ AMD_SRIOV_UCODE_ID_SDMA,
+ AMD_SRIOV_UCODE_ID_SDMA2,
+ AMD_SRIOV_UCODE_ID_VCN,
+ AMD_SRIOV_UCODE_ID_DMCU,
+ AMD_SRIOV_UCODE_ID__MAX
+};
+
+#pragma pack(push, 1) // PF2VF / VF2PF data areas are byte packed
+
+union amd_sriov_msg_feature_flags {
+ struct {
+ uint32_t error_log_collect : 1;
+ uint32_t host_load_ucodes : 1;
+ uint32_t host_flr_vramlost : 1;
+ uint32_t mm_bw_management : 1;
+ uint32_t pp_one_vf_mode : 1;
+ uint32_t reserved : 27;
+ } flags;
+ uint32_t all;
+};
+
+union amd_sriov_msg_os_info {
+ struct {
+ uint32_t windows : 1;
+ uint32_t reserved : 31;
+ } info;
+ uint32_t all;
+};
+
+struct amd_sriov_msg_pf2vf_info_header {
+ /* the total structure size in byte */
+ uint32_t size;
+ /* version of this structure, written by the HOST */
+ uint32_t version;
+ /* reserved */
+ uint32_t reserved[2];
+};
+
+struct amd_sriov_msg_pf2vf_info {
+ /* header contains size and version */
+ struct amd_sriov_msg_pf2vf_info_header header;
+ /* use private key from mailbox 2 to create checksum */
+ uint32_t checksum;
+ /* The features flags of the HOST driver supports */
+ union amd_sriov_msg_feature_flags feature_flags;
+ /* (max_width * max_height * fps) / (16 * 16) */
+ uint32_t hevc_enc_max_mb_per_second;
+ /* (max_width * max_height) / (16 * 16) */
+ uint32_t hevc_enc_max_mb_per_frame;
+ /* (max_width * max_height * fps) / (16 * 16) */
+ uint32_t avc_enc_max_mb_per_second;
+ /* (max_width * max_height) / (16 * 16) */
+ uint32_t avc_enc_max_mb_per_frame;
+ /* MEC FW position in BYTE from the start of VF visible frame buffer */
+ uint64_t mecfw_offset;
+ /* MEC FW size in BYTE */
+ uint32_t mecfw_size;
+ /* UVD FW position in BYTE from the start of VF visible frame buffer */
+ uint64_t uvdfw_offset;
+ /* UVD FW size in BYTE */
+ uint32_t uvdfw_size;
+ /* VCE FW position in BYTE from the start of VF visible frame buffer */
+ uint64_t vcefw_offset;
+ /* VCE FW size in BYTE */
+ uint32_t vcefw_size;
+ /* Bad pages block position in BYTE */
+ uint32_t bp_block_offset_low;
+ uint32_t bp_block_offset_high;
+ /* Bad pages block size in BYTE */
+ uint32_t bp_block_size;
+ /* frequency for VF to update the VF2PF area in msec, 0 = manual */
+ uint32_t vf2pf_update_interval_ms;
+ /* identification in ROCm SMI */
+ uint64_t uuid;
+ uint32_t fcn_idx;
+ /* reserved */
+ uint32_t reserved[256-26];
+};
+
+struct amd_sriov_msg_vf2pf_info_header {
+ /* the total structure size in byte */
+ uint32_t size;
+ /* version of this structure, written by the guest */
+ uint32_t version;
+ /* reserved */
+ uint32_t reserved[2];
+};
+
+struct amd_sriov_msg_vf2pf_info {
+ /* header contains size and version */
+ struct amd_sriov_msg_vf2pf_info_header header;
+ uint32_t checksum;
+ /* driver version */
+ uint8_t driver_version[64];
+ /* driver certification, 1=WHQL, 0=None */
+ uint32_t driver_cert;
+ /* guest OS type and version */
+ union amd_sriov_msg_os_info os_info;
+ /* guest fb information in the unit of MB */
+ uint32_t fb_usage;
+ /* guest gfx engine usage percentage */
+ uint32_t gfx_usage;
+ /* guest gfx engine health percentage */
+ uint32_t gfx_health;
+ /* guest compute engine usage percentage */
+ uint32_t compute_usage;
+ /* guest compute engine health percentage */
+ uint32_t compute_health;
+ /* guest avc engine usage percentage. 0xffff means N/A */
+ uint32_t avc_enc_usage;
+ /* guest avc engine health percentage. 0xffff means N/A */
+ uint32_t avc_enc_health;
+ /* guest hevc engine usage percentage. 0xffff means N/A */
+ uint32_t hevc_enc_usage;
+ /* guest hevc engine usage percentage. 0xffff means N/A */
+ uint32_t hevc_enc_health;
+ /* combined encode/decode usage */
+ uint32_t encode_usage;
+ uint32_t decode_usage;
+ /* Version of PF2VF that VF understands */
+ uint32_t pf2vf_version_required;
+ /* additional FB usage */
+ uint32_t fb_vis_usage;
+ uint32_t fb_vis_size;
+ uint32_t fb_size;
+ /* guest ucode data, each one is 1.25 Dword */
+ struct {
+ uint8_t id;
+ uint32_t version;
+ } ucode_info[AMD_SRIOV_MSG_RESERVE_UCODE];
+
+ /* reserved */
+ uint32_t reserved[256-68];
+};
+
+/* mailbox message send from guest to host */
+enum amd_sriov_mailbox_request_message {
+ MB_REQ_MSG_REQ_GPU_INIT_ACCESS = 1,
+ MB_REQ_MSG_REL_GPU_INIT_ACCESS,
+ MB_REQ_MSG_REQ_GPU_FINI_ACCESS,
+ MB_REQ_MSG_REL_GPU_FINI_ACCESS,
+ MB_REQ_MSG_REQ_GPU_RESET_ACCESS,
+ MB_REQ_MSG_REQ_GPU_INIT_DATA,
+
+ MB_REQ_MSG_LOG_VF_ERROR = 200,
+};
+
+/* mailbox message send from host to guest */
+enum amd_sriov_mailbox_response_message {
+ MB_RES_MSG_CLR_MSG_BUF = 0,
+ MB_RES_MSG_READY_TO_ACCESS_GPU = 1,
+ MB_RES_MSG_FLR_NOTIFICATION,
+ MB_RES_MSG_FLR_NOTIFICATION_COMPLETION,
+ MB_RES_MSG_SUCCESS,
+ MB_RES_MSG_FAIL,
+ MB_RES_MSG_QUERY_ALIVE,
+ MB_RES_MSG_GPU_INIT_DATA_READY,
+
+ MB_RES_MSG_TEXT_MESSAGE = 255
+};
+
+/* version data stored in MAILBOX_MSGBUF_RCV_DW1 for future expansion */
+enum amd_sriov_gpu_init_data_version {
+ GPU_INIT_DATA_READY_V1 = 1,
+};
+
+#pragma pack(pop) // Restore previous packing option
+
+/* checksum function between host and guest */
+unsigned int amd_sriov_msg_checksum(void *obj,
+ unsigned long obj_size,
+ unsigned int key,
+ unsigned int checksum);
+
+/* assertion at compile time */
+#ifdef __linux__
+#define stringification(s) _stringification(s)
+#define _stringification(s) #s
+
+_Static_assert(
+ sizeof(struct amd_sriov_msg_vf2pf_info) == AMD_SRIOV_MSG_SIZE_KB << 10,
+ "amd_sriov_msg_vf2pf_info must be " stringification(AMD_SRIOV_MSG_SIZE_KB) " KB");
+
+_Static_assert(
+ sizeof(struct amd_sriov_msg_pf2vf_info) == AMD_SRIOV_MSG_SIZE_KB << 10,
+ "amd_sriov_msg_pf2vf_info must be " stringification(AMD_SRIOV_MSG_SIZE_KB) " KB");
+
+_Static_assert(
+ AMD_SRIOV_MSG_RESERVE_UCODE % 4 == 0,
+ "AMD_SRIOV_MSG_RESERVE_UCODE must be multiple of 4");
+
+_Static_assert(
+ AMD_SRIOV_MSG_RESERVE_UCODE > AMD_SRIOV_UCODE_ID__MAX,
+ "AMD_SRIOV_MSG_RESERVE_UCODE must be bigger than AMD_SRIOV_UCODE_ID__MAX");
+
+#undef _stringification
+#undef stringification
+#endif
+
+#endif /* AMDGV_SRIOV_MSG__H_ */
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c
index 847ca9b3ce4e..3ea557864320 100644
--- a/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c
@@ -73,6 +73,7 @@ int athub_v1_0_set_clockgating(struct amdgpu_device *adev,
case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
+ case CHIP_RENOIR:
athub_update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
athub_update_medium_grain_light_sleep(adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
index 213e62a28ba0..159a2a4385a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
@@ -41,7 +41,7 @@ void amdgpu_atombios_crtc_overscan_setup(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
SET_CRTC_OVERSCAN_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_OverScan);
@@ -84,7 +84,7 @@ void amdgpu_atombios_crtc_overscan_setup(struct drm_crtc *crtc,
void amdgpu_atombios_crtc_scaler_setup(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
ENABLE_SCALER_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, EnableScaler);
@@ -114,7 +114,7 @@ void amdgpu_atombios_crtc_lock(struct drm_crtc *crtc, int lock)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
int index =
GetIndexIntoMasterTable(COMMAND, UpdateCRTC_DoubleBufferRegisters);
ENABLE_CRTC_PS_ALLOCATION args;
@@ -131,7 +131,7 @@ void amdgpu_atombios_crtc_enable(struct drm_crtc *crtc, int state)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
int index = GetIndexIntoMasterTable(COMMAND, EnableCRTC);
ENABLE_CRTC_PS_ALLOCATION args;
@@ -147,7 +147,7 @@ void amdgpu_atombios_crtc_blank(struct drm_crtc *crtc, int state)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
int index = GetIndexIntoMasterTable(COMMAND, BlankCRTC);
BLANK_CRTC_PS_ALLOCATION args;
@@ -163,7 +163,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
ENABLE_DISP_POWER_GATING_PS_ALLOCATION args;
@@ -192,7 +192,7 @@ void amdgpu_atombios_crtc_set_dtd_timing(struct drm_crtc *crtc,
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
SET_CRTC_USING_DTD_TIMING_PARAMETERS args;
int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_UsingDTDTiming);
u16 misc = 0;
@@ -307,7 +307,7 @@ static u32 amdgpu_atombios_crtc_adjust_pll(struct drm_crtc *crtc,
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_encoder *encoder = amdgpu_crtc->encoder;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
@@ -588,7 +588,7 @@ void amdgpu_atombios_crtc_program_pll(struct drm_crtc *crtc,
struct amdgpu_atom_ss *ss)
{
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
u8 frev, crev;
int index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
union set_pixel_clock args;
@@ -749,7 +749,7 @@ int amdgpu_atombios_crtc_prepare_pll(struct drm_crtc *crtc,
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder =
to_amdgpu_encoder(amdgpu_crtc->encoder);
int encoder_mode = amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder);
@@ -818,7 +818,7 @@ void amdgpu_atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder =
to_amdgpu_encoder(amdgpu_crtc->encoder);
u32 pll_clock = mode->clock;
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
index 9b74cfdba7b8..a3ba9ca11e98 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
@@ -60,7 +60,7 @@ static int amdgpu_atombios_dp_process_aux_ch(struct amdgpu_i2c_chan *chan,
u8 delay, u8 *ack)
{
struct drm_device *dev = chan->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
union aux_channel_transaction args;
int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
unsigned char *base;
@@ -305,7 +305,7 @@ static u8 amdgpu_atombios_dp_encoder_service(struct amdgpu_device *adev,
u8 amdgpu_atombios_dp_get_sinktype(struct amdgpu_connector *amdgpu_connector)
{
struct drm_device *dev = amdgpu_connector->base.dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
return amdgpu_atombios_dp_encoder_service(adev, ATOM_DP_ACTION_GET_SINK_TYPE, 0,
amdgpu_connector->ddc_bus->rec.i2c_id, 0);
@@ -328,6 +328,22 @@ static void amdgpu_atombios_dp_probe_oui(struct amdgpu_connector *amdgpu_connect
buf[0], buf[1], buf[2]);
}
+static void amdgpu_atombios_dp_ds_ports(struct amdgpu_connector *amdgpu_connector)
+{
+ struct amdgpu_connector_atom_dig *dig_connector = amdgpu_connector->con_priv;
+ int ret;
+
+ if (dig_connector->dpcd[DP_DPCD_REV] > 0x10) {
+ ret = drm_dp_dpcd_read(&amdgpu_connector->ddc_bus->aux,
+ DP_DOWNSTREAM_PORT_0,
+ dig_connector->downstream_ports,
+ DP_MAX_DOWNSTREAM_PORTS);
+ if (ret)
+ memset(dig_connector->downstream_ports, 0,
+ DP_MAX_DOWNSTREAM_PORTS);
+ }
+}
+
int amdgpu_atombios_dp_get_dpcd(struct amdgpu_connector *amdgpu_connector)
{
struct amdgpu_connector_atom_dig *dig_connector = amdgpu_connector->con_priv;
@@ -343,7 +359,7 @@ int amdgpu_atombios_dp_get_dpcd(struct amdgpu_connector *amdgpu_connector)
dig_connector->dpcd);
amdgpu_atombios_dp_probe_oui(amdgpu_connector);
-
+ amdgpu_atombios_dp_ds_ports(amdgpu_connector);
return 0;
}
@@ -702,7 +718,7 @@ void amdgpu_atombios_dp_link_train(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_connector *amdgpu_connector;
struct amdgpu_connector_atom_dig *dig_connector;
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index 1e94a9b652f7..8339c8c3a328 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -70,7 +70,7 @@ u8
amdgpu_atombios_encoder_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder)
{
struct drm_device *dev = amdgpu_encoder->base.dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
if (!(adev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
return 0;
@@ -84,7 +84,7 @@ amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encode
{
struct drm_encoder *encoder = &amdgpu_encoder->base;
struct drm_device *dev = amdgpu_encoder->base.dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder_atom_dig *dig;
if (!(adev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
@@ -152,7 +152,7 @@ amdgpu_atombios_encoder_get_backlight_brightness(struct backlight_device *bd)
struct amdgpu_backlight_privdata *pdata = bl_get_data(bd);
struct amdgpu_encoder *amdgpu_encoder = pdata->encoder;
struct drm_device *dev = amdgpu_encoder->base.dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
return amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
}
@@ -166,7 +166,7 @@ void amdgpu_atombios_encoder_init_backlight(struct amdgpu_encoder *amdgpu_encode
struct drm_connector *drm_connector)
{
struct drm_device *dev = amdgpu_encoder->base.dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct backlight_device *bd;
struct backlight_properties props;
struct amdgpu_backlight_privdata *pdata;
@@ -229,7 +229,7 @@ void
amdgpu_atombios_encoder_fini_backlight(struct amdgpu_encoder *amdgpu_encoder)
{
struct drm_device *dev = amdgpu_encoder->base.dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct backlight_device *bd = NULL;
struct amdgpu_encoder_atom_dig *dig;
@@ -319,7 +319,7 @@ static void
amdgpu_atombios_encoder_setup_dac(struct drm_encoder *encoder, int action)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
DAC_ENCODER_CONTROL_PS_ALLOCATION args;
int index = 0;
@@ -382,7 +382,7 @@ static void
amdgpu_atombios_encoder_setup_dvo(struct drm_encoder *encoder, int action)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
union dvo_encoder_control args;
int index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl);
@@ -573,7 +573,7 @@ amdgpu_atombios_encoder_setup_dig_encoder(struct drm_encoder *encoder,
int action, int panel_mode)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
@@ -762,7 +762,7 @@ amdgpu_atombios_encoder_setup_dig_transmitter(struct drm_encoder *encoder, int a
uint8_t lane_num, uint8_t lane_set)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
@@ -1178,7 +1178,7 @@ amdgpu_atombios_encoder_set_edp_panel_power(struct drm_connector *connector,
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct drm_device *dev = amdgpu_connector->base.dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
union dig_transmitter_control args;
int index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
uint8_t frev, crev;
@@ -1225,7 +1225,7 @@ amdgpu_atombios_encoder_setup_external_encoder(struct drm_encoder *encoder,
int action)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder *ext_amdgpu_encoder = to_amdgpu_encoder(ext_encoder);
union external_encoder_control args;
@@ -1466,7 +1466,7 @@ void
amdgpu_atombios_encoder_set_crtc_source(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
union crtc_source_param args;
@@ -1673,7 +1673,7 @@ amdgpu_atombios_encoder_set_crtc_source(struct drm_encoder *encoder)
void
amdgpu_atombios_encoder_init_dig(struct amdgpu_device *adev)
{
- struct drm_device *dev = adev->ddev;
+ struct drm_device *dev = adev_to_drm(adev);
struct drm_encoder *encoder;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
@@ -1701,7 +1701,7 @@ amdgpu_atombios_encoder_dac_load_detect(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
@@ -1751,7 +1751,7 @@ amdgpu_atombios_encoder_dac_detect(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
uint32_t bios_0_scratch;
@@ -1790,7 +1790,7 @@ amdgpu_atombios_encoder_dig_detect(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct drm_encoder *ext_encoder = amdgpu_get_external_encoder(encoder);
@@ -1848,7 +1848,7 @@ amdgpu_atombios_encoder_set_bios_scratch_regs(struct drm_connector *connector,
bool connected)
{
struct drm_device *dev = connector->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_connector *amdgpu_connector =
to_amdgpu_connector(connector);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
@@ -1999,7 +1999,7 @@ struct amdgpu_encoder_atom_dig *
amdgpu_atombios_encoder_get_lcd_info(struct amdgpu_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_mode_info *mode_info = &adev->mode_info;
int index = GetIndexIntoMasterTable(DATA, LVDS_Info);
uint16_t data_offset, misc;
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
index b4cc7c55fa16..09a538465ffd 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
@@ -40,7 +40,7 @@ static int amdgpu_atombios_i2c_process_i2c_ch(struct amdgpu_i2c_chan *chan,
u8 *buf, u8 num)
{
struct drm_device *dev = chan->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction);
unsigned char *base;
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index c2c67ab68a43..5442df094102 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1336,11 +1336,13 @@ cik_asic_reset_method(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_BONAIRE:
- case CHIP_HAWAII:
/* disable baco reset until it works */
/* smu7_asic_get_baco_capability(adev, &baco_reset); */
baco_reset = false;
break;
+ case CHIP_HAWAII:
+ baco_reset = cik_asic_supports_baco(adev);
+ break;
default:
baco_reset = false;
break;
@@ -1366,8 +1368,10 @@ static int cik_asic_reset(struct amdgpu_device *adev)
int r;
if (cik_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
+ dev_info(adev->dev, "BACO reset\n");
r = amdgpu_dpm_baco_reset(adev);
} else {
+ dev_info(adev->dev, "PCI CONFIG reset\n");
r = cik_asic_pci_config_reset(adev);
}
@@ -1919,6 +1923,10 @@ static uint64_t cik_get_pcie_replay_count(struct amdgpu_device *adev)
return (nak_r + nak_g);
}
+static void cik_pre_asic_init(struct amdgpu_device *adev)
+{
+}
+
static const struct amdgpu_asic_funcs cik_asic_funcs =
{
.read_disabled_bios = &cik_read_disabled_bios,
@@ -1939,6 +1947,7 @@ static const struct amdgpu_asic_funcs cik_asic_funcs =
.need_reset_on_init = &cik_need_reset_on_init,
.get_pcie_replay_count = &cik_get_pcie_replay_count,
.supports_baco = &cik_asic_supports_baco,
+ .pre_asic_init = &cik_pre_asic_init,
};
static int cik_common_early_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index 401c99f0b2d0..db953e95f3d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -316,14 +316,9 @@ static int cik_ih_sw_fini(void *handle)
static int cik_ih_hw_init(void *handle)
{
- int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = cik_ih_irq_init(adev);
- if (r)
- return r;
-
- return 0;
+ return cik_ih_irq_init(adev);
}
static int cik_ih_hw_fini(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 20f108818b2b..a3c3fe96515f 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -1071,22 +1071,19 @@ static int cik_sdma_soft_reset(void *handle)
{
u32 srbm_soft_reset = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- u32 tmp = RREG32(mmSRBM_STATUS2);
+ u32 tmp;
- if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) {
- /* sdma0 */
- tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
- tmp |= SDMA0_F32_CNTL__HALT_MASK;
- WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
- srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
- }
- if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) {
- /* sdma1 */
- tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
- tmp |= SDMA0_F32_CNTL__HALT_MASK;
- WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
- srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
- }
+ /* sdma0 */
+ tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
+ tmp |= SDMA0_F32_CNTL__HALT_MASK;
+ WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
+ srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
+
+ /* sdma1 */
+ tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
+ tmp |= SDMA0_F32_CNTL__HALT_MASK;
+ WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
+ srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
if (srbm_soft_reset) {
tmp = RREG32(mmSRBM_SOFT_RESET);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 84b45a019a36..5963cbe0d455 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -328,7 +328,7 @@ static void dce_v10_0_hpd_set_polarity(struct amdgpu_device *adev,
*/
static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
{
- struct drm_device *dev = adev->ddev;
+ struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
struct drm_connector_list_iter iter;
u32 tmp;
@@ -383,7 +383,7 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
*/
static void dce_v10_0_hpd_fini(struct amdgpu_device *adev)
{
- struct drm_device *dev = adev->ddev;
+ struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
struct drm_connector_list_iter iter;
u32 tmp;
@@ -504,7 +504,7 @@ void dce_v10_0_disable_dce(struct amdgpu_device *adev)
static void dce_v10_0_program_fmt(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
@@ -1209,7 +1209,7 @@ static struct amdgpu_audio_pin *dce_v10_0_audio_get_pin(struct amdgpu_device *ad
static void dce_v10_0_afmt_audio_select_pin(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(encoder->dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
u32 tmp;
@@ -1226,7 +1226,7 @@ static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
@@ -1272,7 +1272,7 @@ static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder,
static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
@@ -1328,7 +1328,7 @@ static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder
static void dce_v10_0_audio_write_sad_regs(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
@@ -1483,7 +1483,7 @@ static void dce_v10_0_audio_fini(struct amdgpu_device *adev)
static void dce_v10_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
@@ -1519,7 +1519,7 @@ static void dce_v10_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
void *buffer, size_t size)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
uint8_t *frame = buffer + 3;
@@ -1538,7 +1538,7 @@ static void dce_v10_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
static void dce_v10_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
@@ -1569,7 +1569,7 @@ static void dce_v10_0_afmt_setmode(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
@@ -1749,7 +1749,7 @@ static void dce_v10_0_afmt_setmode(struct drm_encoder *encoder,
static void dce_v10_0_afmt_enable(struct drm_encoder *encoder, bool enable)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
@@ -1822,7 +1822,7 @@ static void dce_v10_0_vga_enable(struct drm_crtc *crtc, bool enable)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
u32 vga_control;
vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
@@ -1836,7 +1836,7 @@ static void dce_v10_0_grph_enable(struct drm_crtc *crtc, bool enable)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
if (enable)
WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
@@ -1850,7 +1850,7 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_framebuffer *target_fb;
struct drm_gem_object *obj;
struct amdgpu_bo *abo;
@@ -2095,7 +2095,7 @@ static void dce_v10_0_set_interleave(struct drm_crtc *crtc,
struct drm_display_mode *mode)
{
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
u32 tmp;
@@ -2111,7 +2111,7 @@ static void dce_v10_0_crtc_load_lut(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
u16 *r, *g, *b;
int i;
u32 tmp;
@@ -2250,7 +2250,7 @@ static u32 dce_v10_0_pick_pll(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
u32 pll_in_use;
int pll;
@@ -2285,7 +2285,7 @@ static u32 dce_v10_0_pick_pll(struct drm_crtc *crtc)
static void dce_v10_0_lock_cursor(struct drm_crtc *crtc, bool lock)
{
- struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
uint32_t cur_lock;
@@ -2300,7 +2300,7 @@ static void dce_v10_0_lock_cursor(struct drm_crtc *crtc, bool lock)
static void dce_v10_0_hide_cursor(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
u32 tmp;
tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
@@ -2311,7 +2311,7 @@ static void dce_v10_0_hide_cursor(struct drm_crtc *crtc)
static void dce_v10_0_show_cursor(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
u32 tmp;
WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
@@ -2329,7 +2329,7 @@ static int dce_v10_0_cursor_move_locked(struct drm_crtc *crtc,
int x, int y)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
int xorigin = 0, yorigin = 0;
amdgpu_crtc->cursor_x = x;
@@ -2503,7 +2503,7 @@ static const struct drm_crtc_funcs dce_v10_0_crtc_funcs = {
static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
unsigned type;
@@ -2557,7 +2557,7 @@ static void dce_v10_0_crtc_disable(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_atom_ss ss;
int i;
@@ -2701,7 +2701,7 @@ static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index)
if (amdgpu_crtc == NULL)
return -ENOMEM;
- drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v10_0_crtc_funcs);
+ drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v10_0_crtc_funcs);
drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
amdgpu_crtc->crtc_id = index;
@@ -2709,8 +2709,8 @@ static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index)
amdgpu_crtc->max_cursor_width = 128;
amdgpu_crtc->max_cursor_height = 128;
- adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
- adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
+ adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
+ adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
switch (amdgpu_crtc->crtc_id) {
case 0:
@@ -2792,24 +2792,24 @@ static int dce_v10_0_sw_init(void *handle)
if (r)
return r;
- adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
+ adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
- adev->ddev->mode_config.async_page_flip = true;
+ adev_to_drm(adev)->mode_config.async_page_flip = true;
- adev->ddev->mode_config.max_width = 16384;
- adev->ddev->mode_config.max_height = 16384;
+ adev_to_drm(adev)->mode_config.max_width = 16384;
+ adev_to_drm(adev)->mode_config.max_height = 16384;
- adev->ddev->mode_config.preferred_depth = 24;
- adev->ddev->mode_config.prefer_shadow = 1;
+ adev_to_drm(adev)->mode_config.preferred_depth = 24;
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
- adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
+ adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
r = amdgpu_display_modeset_create_props(adev);
if (r)
return r;
- adev->ddev->mode_config.max_width = 16384;
- adev->ddev->mode_config.max_height = 16384;
+ adev_to_drm(adev)->mode_config.max_width = 16384;
+ adev_to_drm(adev)->mode_config.max_height = 16384;
/* allocate crtcs */
for (i = 0; i < adev->mode_info.num_crtc; i++) {
@@ -2819,7 +2819,7 @@ static int dce_v10_0_sw_init(void *handle)
}
if (amdgpu_atombios_get_connector_info_from_object_table(adev))
- amdgpu_display_print_display_setup(adev->ddev);
+ amdgpu_display_print_display_setup(adev_to_drm(adev));
else
return -EINVAL;
@@ -2832,7 +2832,7 @@ static int dce_v10_0_sw_init(void *handle)
if (r)
return r;
- drm_kms_helper_poll_init(adev->ddev);
+ drm_kms_helper_poll_init(adev_to_drm(adev));
adev->mode_info.mode_config_initialized = true;
return 0;
@@ -2844,13 +2844,13 @@ static int dce_v10_0_sw_fini(void *handle)
kfree(adev->mode_info.bios_hardcoded_edid);
- drm_kms_helper_poll_fini(adev->ddev);
+ drm_kms_helper_poll_fini(adev_to_drm(adev));
dce_v10_0_audio_fini(adev);
dce_v10_0_afmt_fini(adev);
- drm_mode_config_cleanup(adev->ddev);
+ drm_mode_config_cleanup(adev_to_drm(adev));
adev->mode_info.mode_config_initialized = false;
return 0;
@@ -3157,14 +3157,14 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev,
if (amdgpu_crtc == NULL)
return 0;
- spin_lock_irqsave(&adev->ddev->event_lock, flags);
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
works = amdgpu_crtc->pflip_works;
if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
"AMDGPU_FLIP_SUBMITTED(%d)\n",
amdgpu_crtc->pflip_status,
AMDGPU_FLIP_SUBMITTED);
- spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
return 0;
}
@@ -3176,7 +3176,7 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev,
if (works->event)
drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
- spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
drm_crtc_vblank_put(&amdgpu_crtc->base);
schedule_work(&works->unpin_work);
@@ -3245,7 +3245,7 @@ static int dce_v10_0_crtc_irq(struct amdgpu_device *adev,
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
if (amdgpu_irq_enabled(adev, source, irq_type)) {
- drm_handle_vblank(adev->ddev, crtc);
+ drm_handle_vblank(adev_to_drm(adev), crtc);
}
DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
@@ -3345,7 +3345,7 @@ dce_v10_0_encoder_mode_set(struct drm_encoder *encoder,
static void dce_v10_0_encoder_prepare(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(encoder->dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
@@ -3385,7 +3385,7 @@ static void dce_v10_0_encoder_prepare(struct drm_encoder *encoder)
static void dce_v10_0_encoder_commit(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
/* need to call this here as we need the crtc set up */
amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
@@ -3485,7 +3485,7 @@ static void dce_v10_0_encoder_add(struct amdgpu_device *adev,
uint32_t supported_device,
u16 caps)
{
- struct drm_device *dev = adev->ddev;
+ struct drm_device *dev = adev_to_drm(adev);
struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 01ce52266966..1954472c8e8f 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -346,7 +346,7 @@ static void dce_v11_0_hpd_set_polarity(struct amdgpu_device *adev,
*/
static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
{
- struct drm_device *dev = adev->ddev;
+ struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
struct drm_connector_list_iter iter;
u32 tmp;
@@ -400,7 +400,7 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
*/
static void dce_v11_0_hpd_fini(struct amdgpu_device *adev)
{
- struct drm_device *dev = adev->ddev;
+ struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
struct drm_connector_list_iter iter;
u32 tmp;
@@ -530,7 +530,7 @@ void dce_v11_0_disable_dce(struct amdgpu_device *adev)
static void dce_v11_0_program_fmt(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
@@ -1235,7 +1235,7 @@ static struct amdgpu_audio_pin *dce_v11_0_audio_get_pin(struct amdgpu_device *ad
static void dce_v11_0_afmt_audio_select_pin(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(encoder->dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
u32 tmp;
@@ -1252,7 +1252,7 @@ static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
@@ -1298,7 +1298,7 @@ static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder,
static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
@@ -1354,7 +1354,7 @@ static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder
static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
@@ -1525,7 +1525,7 @@ static void dce_v11_0_audio_fini(struct amdgpu_device *adev)
static void dce_v11_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
@@ -1561,7 +1561,7 @@ static void dce_v11_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
void *buffer, size_t size)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
uint8_t *frame = buffer + 3;
@@ -1580,7 +1580,7 @@ static void dce_v11_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
static void dce_v11_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
@@ -1611,7 +1611,7 @@ static void dce_v11_0_afmt_setmode(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
@@ -1791,7 +1791,7 @@ static void dce_v11_0_afmt_setmode(struct drm_encoder *encoder,
static void dce_v11_0_afmt_enable(struct drm_encoder *encoder, bool enable)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
@@ -1864,7 +1864,7 @@ static void dce_v11_0_vga_enable(struct drm_crtc *crtc, bool enable)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
u32 vga_control;
vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
@@ -1878,7 +1878,7 @@ static void dce_v11_0_grph_enable(struct drm_crtc *crtc, bool enable)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
if (enable)
WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
@@ -1892,7 +1892,7 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_framebuffer *target_fb;
struct drm_gem_object *obj;
struct amdgpu_bo *abo;
@@ -2137,7 +2137,7 @@ static void dce_v11_0_set_interleave(struct drm_crtc *crtc,
struct drm_display_mode *mode)
{
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
u32 tmp;
@@ -2153,7 +2153,7 @@ static void dce_v11_0_crtc_load_lut(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
u16 *r, *g, *b;
int i;
u32 tmp;
@@ -2283,7 +2283,7 @@ static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
u32 pll_in_use;
int pll;
@@ -2364,7 +2364,7 @@ static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc)
static void dce_v11_0_lock_cursor(struct drm_crtc *crtc, bool lock)
{
- struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
uint32_t cur_lock;
@@ -2379,7 +2379,7 @@ static void dce_v11_0_lock_cursor(struct drm_crtc *crtc, bool lock)
static void dce_v11_0_hide_cursor(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
u32 tmp;
tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
@@ -2390,7 +2390,7 @@ static void dce_v11_0_hide_cursor(struct drm_crtc *crtc)
static void dce_v11_0_show_cursor(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
u32 tmp;
WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
@@ -2408,7 +2408,7 @@ static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc,
int x, int y)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
int xorigin = 0, yorigin = 0;
amdgpu_crtc->cursor_x = x;
@@ -2582,7 +2582,7 @@ static const struct drm_crtc_funcs dce_v11_0_crtc_funcs = {
static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
unsigned type;
@@ -2636,7 +2636,7 @@ static void dce_v11_0_crtc_disable(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_atom_ss ss;
int i;
@@ -2706,7 +2706,7 @@ static int dce_v11_0_crtc_mode_set(struct drm_crtc *crtc,
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
if (!amdgpu_crtc->adjusted_clock)
return -EINVAL;
@@ -2809,7 +2809,7 @@ static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index)
if (amdgpu_crtc == NULL)
return -ENOMEM;
- drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v11_0_crtc_funcs);
+ drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v11_0_crtc_funcs);
drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
amdgpu_crtc->crtc_id = index;
@@ -2817,8 +2817,8 @@ static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index)
amdgpu_crtc->max_cursor_width = 128;
amdgpu_crtc->max_cursor_height = 128;
- adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
- adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
+ adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
+ adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
switch (amdgpu_crtc->crtc_id) {
case 0:
@@ -2913,24 +2913,24 @@ static int dce_v11_0_sw_init(void *handle)
if (r)
return r;
- adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
+ adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
- adev->ddev->mode_config.async_page_flip = true;
+ adev_to_drm(adev)->mode_config.async_page_flip = true;
- adev->ddev->mode_config.max_width = 16384;
- adev->ddev->mode_config.max_height = 16384;
+ adev_to_drm(adev)->mode_config.max_width = 16384;
+ adev_to_drm(adev)->mode_config.max_height = 16384;
- adev->ddev->mode_config.preferred_depth = 24;
- adev->ddev->mode_config.prefer_shadow = 1;
+ adev_to_drm(adev)->mode_config.preferred_depth = 24;
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
- adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
+ adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
r = amdgpu_display_modeset_create_props(adev);
if (r)
return r;
- adev->ddev->mode_config.max_width = 16384;
- adev->ddev->mode_config.max_height = 16384;
+ adev_to_drm(adev)->mode_config.max_width = 16384;
+ adev_to_drm(adev)->mode_config.max_height = 16384;
/* allocate crtcs */
@@ -2941,7 +2941,7 @@ static int dce_v11_0_sw_init(void *handle)
}
if (amdgpu_atombios_get_connector_info_from_object_table(adev))
- amdgpu_display_print_display_setup(adev->ddev);
+ amdgpu_display_print_display_setup(adev_to_drm(adev));
else
return -EINVAL;
@@ -2954,7 +2954,7 @@ static int dce_v11_0_sw_init(void *handle)
if (r)
return r;
- drm_kms_helper_poll_init(adev->ddev);
+ drm_kms_helper_poll_init(adev_to_drm(adev));
adev->mode_info.mode_config_initialized = true;
return 0;
@@ -2966,13 +2966,13 @@ static int dce_v11_0_sw_fini(void *handle)
kfree(adev->mode_info.bios_hardcoded_edid);
- drm_kms_helper_poll_fini(adev->ddev);
+ drm_kms_helper_poll_fini(adev_to_drm(adev));
dce_v11_0_audio_fini(adev);
dce_v11_0_afmt_fini(adev);
- drm_mode_config_cleanup(adev->ddev);
+ drm_mode_config_cleanup(adev_to_drm(adev));
adev->mode_info.mode_config_initialized = false;
return 0;
@@ -3283,14 +3283,14 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev,
if(amdgpu_crtc == NULL)
return 0;
- spin_lock_irqsave(&adev->ddev->event_lock, flags);
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
works = amdgpu_crtc->pflip_works;
if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
"AMDGPU_FLIP_SUBMITTED(%d)\n",
amdgpu_crtc->pflip_status,
AMDGPU_FLIP_SUBMITTED);
- spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
return 0;
}
@@ -3302,7 +3302,7 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev,
if(works->event)
drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
- spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
drm_crtc_vblank_put(&amdgpu_crtc->base);
schedule_work(&works->unpin_work);
@@ -3372,7 +3372,7 @@ static int dce_v11_0_crtc_irq(struct amdgpu_device *adev,
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
if (amdgpu_irq_enabled(adev, source, irq_type)) {
- drm_handle_vblank(adev->ddev, crtc);
+ drm_handle_vblank(adev_to_drm(adev), crtc);
}
DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
@@ -3471,7 +3471,7 @@ dce_v11_0_encoder_mode_set(struct drm_encoder *encoder,
static void dce_v11_0_encoder_prepare(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(encoder->dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
@@ -3511,7 +3511,7 @@ static void dce_v11_0_encoder_prepare(struct drm_encoder *encoder)
static void dce_v11_0_encoder_commit(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
/* need to call this here as we need the crtc set up */
amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
@@ -3611,7 +3611,7 @@ static void dce_v11_0_encoder_add(struct amdgpu_device *adev,
uint32_t supported_device,
u16 caps)
{
- struct drm_device *dev = adev->ddev;
+ struct drm_device *dev = adev_to_drm(adev);
struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index cbddead3dafb..3a44753a80d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -279,7 +279,7 @@ static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev,
*/
static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
{
- struct drm_device *dev = adev->ddev;
+ struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
struct drm_connector_list_iter iter;
u32 tmp;
@@ -324,7 +324,7 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
*/
static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
{
- struct drm_device *dev = adev->ddev;
+ struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
struct drm_connector_list_iter iter;
u32 tmp;
@@ -401,7 +401,7 @@ static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
@@ -1114,7 +1114,7 @@ static struct amdgpu_audio_pin *dce_v6_0_audio_get_pin(struct amdgpu_device *ade
static void dce_v6_0_audio_select_pin(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(encoder->dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
@@ -1130,7 +1130,7 @@ static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
@@ -1174,7 +1174,7 @@ static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder,
static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
@@ -1235,7 +1235,7 @@ static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
@@ -1392,7 +1392,7 @@ static void dce_v6_0_audio_fini(struct amdgpu_device *adev)
static void dce_v6_0_audio_set_vbi_packet(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
u32 tmp;
@@ -1408,7 +1408,7 @@ static void dce_v6_0_audio_set_acr(struct drm_encoder *encoder,
uint32_t clock, int bpc)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
@@ -1446,7 +1446,7 @@ static void dce_v6_0_audio_set_avi_infoframe(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
@@ -1488,7 +1488,7 @@ static void dce_v6_0_audio_set_avi_infoframe(struct drm_encoder *encoder,
static void dce_v6_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
u32 tmp;
@@ -1522,7 +1522,7 @@ static void dce_v6_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
static void dce_v6_0_audio_set_packet(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
u32 tmp;
@@ -1566,7 +1566,7 @@ static void dce_v6_0_audio_set_packet(struct drm_encoder *encoder)
static void dce_v6_0_audio_set_mute(struct drm_encoder *encoder, bool mute)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
u32 tmp;
@@ -1579,7 +1579,7 @@ static void dce_v6_0_audio_set_mute(struct drm_encoder *encoder, bool mute)
static void dce_v6_0_audio_hdmi_enable(struct drm_encoder *encoder, bool enable)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
u32 tmp;
@@ -1616,7 +1616,7 @@ static void dce_v6_0_audio_hdmi_enable(struct drm_encoder *encoder, bool enable)
static void dce_v6_0_audio_dp_enable(struct drm_encoder *encoder, bool enable)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
u32 tmp;
@@ -1645,7 +1645,7 @@ static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
@@ -1714,7 +1714,7 @@ static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder,
static void dce_v6_0_afmt_enable(struct drm_encoder *encoder, bool enable)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
@@ -1788,7 +1788,7 @@ static void dce_v6_0_vga_enable(struct drm_crtc *crtc, bool enable)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
u32 vga_control;
vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
@@ -1799,7 +1799,7 @@ static void dce_v6_0_grph_enable(struct drm_crtc *crtc, bool enable)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, enable ? 1 : 0);
}
@@ -1810,7 +1810,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_framebuffer *target_fb;
struct drm_gem_object *obj;
struct amdgpu_bo *abo;
@@ -2033,7 +2033,7 @@ static void dce_v6_0_set_interleave(struct drm_crtc *crtc,
struct drm_display_mode *mode)
{
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -2048,7 +2048,7 @@ static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
u16 *r, *g, *b;
int i;
@@ -2148,7 +2148,7 @@ static u32 dce_v6_0_pick_pll(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
u32 pll_in_use;
int pll;
@@ -2177,7 +2177,7 @@ static u32 dce_v6_0_pick_pll(struct drm_crtc *crtc)
static void dce_v6_0_lock_cursor(struct drm_crtc *crtc, bool lock)
{
- struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
uint32_t cur_lock;
@@ -2192,7 +2192,7 @@ static void dce_v6_0_lock_cursor(struct drm_crtc *crtc, bool lock)
static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
(CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
@@ -2204,7 +2204,7 @@ static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
static void dce_v6_0_show_cursor(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(amdgpu_crtc->cursor_addr));
@@ -2222,7 +2222,7 @@ static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
int x, int y)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
int xorigin = 0, yorigin = 0;
int w = amdgpu_crtc->cursor_width;
@@ -2397,7 +2397,7 @@ static const struct drm_crtc_funcs dce_v6_0_crtc_funcs = {
static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
unsigned type;
@@ -2447,7 +2447,7 @@ static void dce_v6_0_crtc_disable(struct drm_crtc *crtc)
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_atom_ss ss;
int i;
@@ -2591,7 +2591,7 @@ static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
if (amdgpu_crtc == NULL)
return -ENOMEM;
- drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v6_0_crtc_funcs);
+ drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v6_0_crtc_funcs);
drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
amdgpu_crtc->crtc_id = index;
@@ -2599,8 +2599,8 @@ static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
amdgpu_crtc->max_cursor_width = CURSOR_WIDTH;
amdgpu_crtc->max_cursor_height = CURSOR_HEIGHT;
- adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
- adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
+ adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
+ adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
@@ -2669,20 +2669,20 @@ static int dce_v6_0_sw_init(void *handle)
adev->mode_info.mode_config_initialized = true;
- adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
- adev->ddev->mode_config.async_page_flip = true;
- adev->ddev->mode_config.max_width = 16384;
- adev->ddev->mode_config.max_height = 16384;
- adev->ddev->mode_config.preferred_depth = 24;
- adev->ddev->mode_config.prefer_shadow = 1;
- adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
+ adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
+ adev_to_drm(adev)->mode_config.async_page_flip = true;
+ adev_to_drm(adev)->mode_config.max_width = 16384;
+ adev_to_drm(adev)->mode_config.max_height = 16384;
+ adev_to_drm(adev)->mode_config.preferred_depth = 24;
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
+ adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
r = amdgpu_display_modeset_create_props(adev);
if (r)
return r;
- adev->ddev->mode_config.max_width = 16384;
- adev->ddev->mode_config.max_height = 16384;
+ adev_to_drm(adev)->mode_config.max_width = 16384;
+ adev_to_drm(adev)->mode_config.max_height = 16384;
/* allocate crtcs */
for (i = 0; i < adev->mode_info.num_crtc; i++) {
@@ -2693,7 +2693,7 @@ static int dce_v6_0_sw_init(void *handle)
ret = amdgpu_atombios_get_connector_info_from_object_table(adev);
if (ret)
- amdgpu_display_print_display_setup(adev->ddev);
+ amdgpu_display_print_display_setup(adev_to_drm(adev));
else
return -EINVAL;
@@ -2706,7 +2706,7 @@ static int dce_v6_0_sw_init(void *handle)
if (r)
return r;
- drm_kms_helper_poll_init(adev->ddev);
+ drm_kms_helper_poll_init(adev_to_drm(adev));
return r;
}
@@ -2717,12 +2717,12 @@ static int dce_v6_0_sw_fini(void *handle)
kfree(adev->mode_info.bios_hardcoded_edid);
- drm_kms_helper_poll_fini(adev->ddev);
+ drm_kms_helper_poll_fini(adev_to_drm(adev));
dce_v6_0_audio_fini(adev);
dce_v6_0_afmt_fini(adev);
- drm_mode_config_cleanup(adev->ddev);
+ drm_mode_config_cleanup(adev_to_drm(adev));
adev->mode_info.mode_config_initialized = false;
return 0;
@@ -2967,7 +2967,7 @@ static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
if (amdgpu_irq_enabled(adev, source, irq_type)) {
- drm_handle_vblank(adev->ddev, crtc);
+ drm_handle_vblank(adev_to_drm(adev), crtc);
}
DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
break;
@@ -3036,14 +3036,14 @@ static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev,
if (amdgpu_crtc == NULL)
return 0;
- spin_lock_irqsave(&adev->ddev->event_lock, flags);
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
works = amdgpu_crtc->pflip_works;
if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
"AMDGPU_FLIP_SUBMITTED(%d)\n",
amdgpu_crtc->pflip_status,
AMDGPU_FLIP_SUBMITTED);
- spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
return 0;
}
@@ -3055,7 +3055,7 @@ static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev,
if (works->event)
drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
- spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
drm_crtc_vblank_put(&amdgpu_crtc->base);
schedule_work(&works->unpin_work);
@@ -3146,7 +3146,7 @@ dce_v6_0_encoder_mode_set(struct drm_encoder *encoder,
static void dce_v6_0_encoder_prepare(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(encoder->dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
@@ -3187,7 +3187,7 @@ static void dce_v6_0_encoder_commit(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
/* need to call this here as we need the crtc set up */
amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
@@ -3297,7 +3297,7 @@ static void dce_v6_0_encoder_add(struct amdgpu_device *adev,
uint32_t supported_device,
u16 caps)
{
- struct drm_device *dev = adev->ddev;
+ struct drm_device *dev = adev_to_drm(adev);
struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index fa0ad50b628c..3603e5f13077 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -273,7 +273,7 @@ static void dce_v8_0_hpd_set_polarity(struct amdgpu_device *adev,
*/
static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
{
- struct drm_device *dev = adev->ddev;
+ struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
struct drm_connector_list_iter iter;
u32 tmp;
@@ -318,7 +318,7 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
*/
static void dce_v8_0_hpd_fini(struct amdgpu_device *adev)
{
- struct drm_device *dev = adev->ddev;
+ struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
struct drm_connector_list_iter iter;
u32 tmp;
@@ -444,7 +444,7 @@ void dce_v8_0_disable_dce(struct amdgpu_device *adev)
static void dce_v8_0_program_fmt(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
@@ -1146,7 +1146,7 @@ static struct amdgpu_audio_pin *dce_v8_0_audio_get_pin(struct amdgpu_device *ade
static void dce_v8_0_afmt_audio_select_pin(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(encoder->dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
u32 offset;
@@ -1164,7 +1164,7 @@ static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
@@ -1225,7 +1225,7 @@ static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder,
static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
@@ -1278,7 +1278,7 @@ static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
u32 offset;
@@ -1446,7 +1446,7 @@ static void dce_v8_0_audio_fini(struct amdgpu_device *adev)
static void dce_v8_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
@@ -1469,7 +1469,7 @@ static void dce_v8_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
void *buffer, size_t size)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
uint32_t offset = dig->afmt->offset;
@@ -1489,7 +1489,7 @@ static void dce_v8_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
static void dce_v8_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
@@ -1516,7 +1516,7 @@ static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
@@ -1678,7 +1678,7 @@ static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder,
static void dce_v8_0_afmt_enable(struct drm_encoder *encoder, bool enable)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
@@ -1751,7 +1751,7 @@ static void dce_v8_0_vga_enable(struct drm_crtc *crtc, bool enable)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
u32 vga_control;
vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
@@ -1765,7 +1765,7 @@ static void dce_v8_0_grph_enable(struct drm_crtc *crtc, bool enable)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
if (enable)
WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
@@ -1779,7 +1779,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_framebuffer *target_fb;
struct drm_gem_object *obj;
struct amdgpu_bo *abo;
@@ -2004,7 +2004,7 @@ static void dce_v8_0_set_interleave(struct drm_crtc *crtc,
struct drm_display_mode *mode)
{
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -2018,7 +2018,7 @@ static void dce_v8_0_crtc_load_lut(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
u16 *r, *g, *b;
int i;
@@ -2140,7 +2140,7 @@ static u32 dce_v8_0_pick_pll(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
u32 pll_in_use;
int pll;
@@ -2188,7 +2188,7 @@ static u32 dce_v8_0_pick_pll(struct drm_crtc *crtc)
static void dce_v8_0_lock_cursor(struct drm_crtc *crtc, bool lock)
{
- struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
uint32_t cur_lock;
@@ -2203,7 +2203,7 @@ static void dce_v8_0_lock_cursor(struct drm_crtc *crtc, bool lock)
static void dce_v8_0_hide_cursor(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
(CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
@@ -2213,7 +2213,7 @@ static void dce_v8_0_hide_cursor(struct drm_crtc *crtc)
static void dce_v8_0_show_cursor(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(amdgpu_crtc->cursor_addr));
@@ -2230,7 +2230,7 @@ static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc,
int x, int y)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
int xorigin = 0, yorigin = 0;
amdgpu_crtc->cursor_x = x;
@@ -2404,7 +2404,7 @@ static const struct drm_crtc_funcs dce_v8_0_crtc_funcs = {
static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
unsigned type;
@@ -2458,7 +2458,7 @@ static void dce_v8_0_crtc_disable(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_atom_ss ss;
int i;
@@ -2609,7 +2609,7 @@ static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index)
if (amdgpu_crtc == NULL)
return -ENOMEM;
- drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v8_0_crtc_funcs);
+ drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v8_0_crtc_funcs);
drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
amdgpu_crtc->crtc_id = index;
@@ -2617,8 +2617,8 @@ static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index)
amdgpu_crtc->max_cursor_width = CIK_CURSOR_WIDTH;
amdgpu_crtc->max_cursor_height = CIK_CURSOR_HEIGHT;
- adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
- adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
+ adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
+ adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
@@ -2689,24 +2689,24 @@ static int dce_v8_0_sw_init(void *handle)
if (r)
return r;
- adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
+ adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
- adev->ddev->mode_config.async_page_flip = true;
+ adev_to_drm(adev)->mode_config.async_page_flip = true;
- adev->ddev->mode_config.max_width = 16384;
- adev->ddev->mode_config.max_height = 16384;
+ adev_to_drm(adev)->mode_config.max_width = 16384;
+ adev_to_drm(adev)->mode_config.max_height = 16384;
- adev->ddev->mode_config.preferred_depth = 24;
- adev->ddev->mode_config.prefer_shadow = 1;
+ adev_to_drm(adev)->mode_config.preferred_depth = 24;
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
- adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
+ adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
r = amdgpu_display_modeset_create_props(adev);
if (r)
return r;
- adev->ddev->mode_config.max_width = 16384;
- adev->ddev->mode_config.max_height = 16384;
+ adev_to_drm(adev)->mode_config.max_width = 16384;
+ adev_to_drm(adev)->mode_config.max_height = 16384;
/* allocate crtcs */
for (i = 0; i < adev->mode_info.num_crtc; i++) {
@@ -2716,7 +2716,7 @@ static int dce_v8_0_sw_init(void *handle)
}
if (amdgpu_atombios_get_connector_info_from_object_table(adev))
- amdgpu_display_print_display_setup(adev->ddev);
+ amdgpu_display_print_display_setup(adev_to_drm(adev));
else
return -EINVAL;
@@ -2729,7 +2729,7 @@ static int dce_v8_0_sw_init(void *handle)
if (r)
return r;
- drm_kms_helper_poll_init(adev->ddev);
+ drm_kms_helper_poll_init(adev_to_drm(adev));
adev->mode_info.mode_config_initialized = true;
return 0;
@@ -2741,13 +2741,13 @@ static int dce_v8_0_sw_fini(void *handle)
kfree(adev->mode_info.bios_hardcoded_edid);
- drm_kms_helper_poll_fini(adev->ddev);
+ drm_kms_helper_poll_fini(adev_to_drm(adev));
dce_v8_0_audio_fini(adev);
dce_v8_0_afmt_fini(adev);
- drm_mode_config_cleanup(adev->ddev);
+ drm_mode_config_cleanup(adev_to_drm(adev));
adev->mode_info.mode_config_initialized = false;
return 0;
@@ -3057,7 +3057,7 @@ static int dce_v8_0_crtc_irq(struct amdgpu_device *adev,
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
if (amdgpu_irq_enabled(adev, source, irq_type)) {
- drm_handle_vblank(adev->ddev, crtc);
+ drm_handle_vblank(adev_to_drm(adev), crtc);
}
DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
break;
@@ -3126,14 +3126,14 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
if (amdgpu_crtc == NULL)
return 0;
- spin_lock_irqsave(&adev->ddev->event_lock, flags);
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
works = amdgpu_crtc->pflip_works;
if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
"AMDGPU_FLIP_SUBMITTED(%d)\n",
amdgpu_crtc->pflip_status,
AMDGPU_FLIP_SUBMITTED);
- spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
return 0;
}
@@ -3145,7 +3145,7 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
if (works->event)
drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
- spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
drm_crtc_vblank_put(&amdgpu_crtc->base);
schedule_work(&works->unpin_work);
@@ -3233,7 +3233,7 @@ dce_v8_0_encoder_mode_set(struct drm_encoder *encoder,
static void dce_v8_0_encoder_prepare(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(encoder->dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
@@ -3273,7 +3273,7 @@ static void dce_v8_0_encoder_prepare(struct drm_encoder *encoder)
static void dce_v8_0_encoder_commit(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
/* need to call this here as we need the crtc set up */
amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
@@ -3373,7 +3373,7 @@ static void dce_v8_0_encoder_add(struct amdgpu_device *adev,
uint32_t supported_device,
u16 caps)
{
- struct drm_device *dev = adev->ddev;
+ struct drm_device *dev = adev_to_drm(adev);
struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index d5ff7b6331ff..b4d4b76538d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -47,6 +47,9 @@ static void dce_virtual_set_display_funcs(struct amdgpu_device *adev);
static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev);
static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
int index);
+static int dce_virtual_pageflip(struct amdgpu_device *adev,
+ unsigned crtc_id);
+static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vblank_timer);
static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
int crtc,
enum amdgpu_interrupt_state state);
@@ -132,7 +135,7 @@ static const struct drm_crtc_funcs dce_virtual_crtc_funcs = {
static void dce_virtual_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
unsigned type;
@@ -171,8 +174,10 @@ static void dce_virtual_crtc_commit(struct drm_crtc *crtc)
static void dce_virtual_crtc_disable(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
- drm_crtc_vblank_off(crtc);
+ if (dev->num_crtcs)
+ drm_crtc_vblank_off(crtc);
amdgpu_crtc->enabled = false;
amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
@@ -235,7 +240,7 @@ static int dce_virtual_crtc_init(struct amdgpu_device *adev, int index)
if (amdgpu_crtc == NULL)
return -ENOMEM;
- drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_virtual_crtc_funcs);
+ drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_virtual_crtc_funcs);
drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
amdgpu_crtc->crtc_id = index;
@@ -247,6 +252,11 @@ static int dce_virtual_crtc_init(struct amdgpu_device *adev, int index)
amdgpu_crtc->vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE;
drm_crtc_helper_add(&amdgpu_crtc->base, &dce_virtual_crtc_helper_funcs);
+ hrtimer_init(&amdgpu_crtc->vblank_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer_set_expires(&amdgpu_crtc->vblank_timer, DCE_VIRTUAL_VBLANK_PERIOD);
+ amdgpu_crtc->vblank_timer.function = dce_virtual_vblank_timer_handle;
+ hrtimer_start(&amdgpu_crtc->vblank_timer,
+ DCE_VIRTUAL_VBLANK_PERIOD, HRTIMER_MODE_REL);
return 0;
}
@@ -374,24 +384,24 @@ static int dce_virtual_sw_init(void *handle)
if (r)
return r;
- adev->ddev->max_vblank_count = 0;
+ adev_to_drm(adev)->max_vblank_count = 0;
- adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
+ adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
- adev->ddev->mode_config.max_width = 16384;
- adev->ddev->mode_config.max_height = 16384;
+ adev_to_drm(adev)->mode_config.max_width = 16384;
+ adev_to_drm(adev)->mode_config.max_height = 16384;
- adev->ddev->mode_config.preferred_depth = 24;
- adev->ddev->mode_config.prefer_shadow = 1;
+ adev_to_drm(adev)->mode_config.preferred_depth = 24;
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
- adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
+ adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
r = amdgpu_display_modeset_create_props(adev);
if (r)
return r;
- adev->ddev->mode_config.max_width = 16384;
- adev->ddev->mode_config.max_height = 16384;
+ adev_to_drm(adev)->mode_config.max_width = 16384;
+ adev_to_drm(adev)->mode_config.max_height = 16384;
/* allocate crtcs, encoders, connectors */
for (i = 0; i < adev->mode_info.num_crtc; i++) {
@@ -403,7 +413,7 @@ static int dce_virtual_sw_init(void *handle)
return r;
}
- drm_kms_helper_poll_init(adev->ddev);
+ drm_kms_helper_poll_init(adev_to_drm(adev));
adev->mode_info.mode_config_initialized = true;
return 0;
@@ -415,9 +425,9 @@ static int dce_virtual_sw_fini(void *handle)
kfree(adev->mode_info.bios_hardcoded_edid);
- drm_kms_helper_poll_fini(adev->ddev);
+ drm_kms_helper_poll_fini(adev_to_drm(adev));
- drm_mode_config_cleanup(adev->ddev);
+ drm_mode_config_cleanup(adev_to_drm(adev));
/* clear crtcs pointer to avoid dce irq finish routine access freed data */
memset(adev->mode_info.crtcs, 0, sizeof(adev->mode_info.crtcs[0]) * AMDGPU_MAX_CRTCS);
adev->mode_info.mode_config_initialized = false;
@@ -476,7 +486,7 @@ static int dce_virtual_hw_fini(void *handle)
for (i = 0; i<adev->mode_info.num_crtc; i++)
if (adev->mode_info.crtcs[i])
- dce_virtual_set_crtc_vblank_interrupt_state(adev, i, AMDGPU_IRQ_STATE_DISABLE);
+ hrtimer_cancel(&adev->mode_info.crtcs[i]->vblank_timer);
return 0;
}
@@ -602,7 +612,7 @@ static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
if (!encoder)
return -ENOMEM;
encoder->possible_crtcs = 1 << index;
- drm_encoder_init(adev->ddev, encoder, &dce_virtual_encoder_funcs,
+ drm_encoder_init(adev_to_drm(adev), encoder, &dce_virtual_encoder_funcs,
DRM_MODE_ENCODER_VIRTUAL, NULL);
drm_encoder_helper_add(encoder, &dce_virtual_encoder_helper_funcs);
@@ -613,7 +623,7 @@ static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
}
/* add a new connector */
- drm_connector_init(adev->ddev, connector, &dce_virtual_connector_funcs,
+ drm_connector_init(adev_to_drm(adev), connector, &dce_virtual_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
drm_connector_helper_add(connector, &dce_virtual_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
@@ -663,14 +673,14 @@ static int dce_virtual_pageflip(struct amdgpu_device *adev,
if (amdgpu_crtc == NULL)
return 0;
- spin_lock_irqsave(&adev->ddev->event_lock, flags);
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
works = amdgpu_crtc->pflip_works;
if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
"AMDGPU_FLIP_SUBMITTED(%d)\n",
amdgpu_crtc->pflip_status,
AMDGPU_FLIP_SUBMITTED);
- spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
return 0;
}
@@ -682,7 +692,7 @@ static int dce_virtual_pageflip(struct amdgpu_device *adev,
if (works->event)
drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
- spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
drm_crtc_vblank_put(&amdgpu_crtc->base);
amdgpu_bo_unref(&works->old_abo);
@@ -697,10 +707,16 @@ static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vbla
struct amdgpu_crtc *amdgpu_crtc = container_of(vblank_timer,
struct amdgpu_crtc, vblank_timer);
struct drm_device *ddev = amdgpu_crtc->base.dev;
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ struct amdgpu_irq_src *source = adev->irq.client[AMDGPU_IRQ_CLIENTID_LEGACY].sources
+ [VISLANDS30_IV_SRCID_SMU_DISP_TIMER2_TRIGGER];
+ int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev,
+ amdgpu_crtc->crtc_id);
- drm_handle_vblank(ddev, amdgpu_crtc->crtc_id);
- dce_virtual_pageflip(adev, amdgpu_crtc->crtc_id);
+ if (amdgpu_irq_enabled(adev, source, irq_type)) {
+ drm_handle_vblank(ddev, amdgpu_crtc->crtc_id);
+ dce_virtual_pageflip(adev, amdgpu_crtc->crtc_id);
+ }
hrtimer_start(vblank_timer, DCE_VIRTUAL_VBLANK_PERIOD,
HRTIMER_MODE_REL);
@@ -716,21 +732,6 @@ static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *ad
return;
}
- if (state && !adev->mode_info.crtcs[crtc]->vsync_timer_enabled) {
- DRM_DEBUG("Enable software vsync timer\n");
- hrtimer_init(&adev->mode_info.crtcs[crtc]->vblank_timer,
- CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- hrtimer_set_expires(&adev->mode_info.crtcs[crtc]->vblank_timer,
- DCE_VIRTUAL_VBLANK_PERIOD);
- adev->mode_info.crtcs[crtc]->vblank_timer.function =
- dce_virtual_vblank_timer_handle;
- hrtimer_start(&adev->mode_info.crtcs[crtc]->vblank_timer,
- DCE_VIRTUAL_VBLANK_PERIOD, HRTIMER_MODE_REL);
- } else if (!state && adev->mode_info.crtcs[crtc]->vsync_timer_enabled) {
- DRM_DEBUG("Disable software vsync timer\n");
- hrtimer_cancel(&adev->mode_info.crtcs[crtc]->vblank_timer);
- }
-
adev->mode_info.crtcs[crtc]->vsync_timer_enabled = state;
DRM_DEBUG("[FM]set crtc %d vblank interrupt state %d\n", crtc, state);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
index 1ab261836983..7b89fd2aa44a 100644
--- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
+++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
@@ -251,7 +251,7 @@ static ssize_t df_v3_6_get_df_cntr_avail(struct device *dev,
int i, count;
ddev = dev_get_drvdata(dev);
- adev = ddev->dev_private;
+ adev = drm_to_adev(ddev);
count = 0;
for (i = 0; i < DF_V3_6_MAX_COUNTERS; i++) {
@@ -455,7 +455,8 @@ static int df_v3_6_pmc_get_ctrl_settings(struct amdgpu_device *adev,
uint32_t *lo_base_addr,
uint32_t *hi_base_addr,
uint32_t *lo_val,
- uint32_t *hi_val)
+ uint32_t *hi_val,
+ bool is_enable)
{
uint32_t eventsel, instance, unitmask;
@@ -477,7 +478,8 @@ static int df_v3_6_pmc_get_ctrl_settings(struct amdgpu_device *adev,
instance_5432 = (instance >> 2) & 0xf;
instance_76 = (instance >> 6) & 0x3;
- *lo_val = (unitmask << 8) | (instance_10 << 6) | eventsel | (1 << 22);
+ *lo_val = (unitmask << 8) | (instance_10 << 6) | eventsel;
+ *lo_val = is_enable ? *lo_val | (1 << 22) : *lo_val & ~(1 << 22);
*hi_val = (instance_76 << 29) | instance_5432;
DRM_DEBUG_DRIVER("config=%llx addr=%08x:%08x val=%08x:%08x",
@@ -572,14 +574,14 @@ static void df_v3_6_reset_perfmon_cntr(struct amdgpu_device *adev,
}
static int df_v3_6_pmc_start(struct amdgpu_device *adev, uint64_t config,
- int is_enable)
+ int is_add)
{
uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
int err = 0, ret = 0;
switch (adev->asic_type) {
case CHIP_VEGA20:
- if (is_enable)
+ if (is_add)
return df_v3_6_pmc_add_cntr(adev, config);
df_v3_6_reset_perfmon_cntr(adev, config);
@@ -589,7 +591,8 @@ static int df_v3_6_pmc_start(struct amdgpu_device *adev, uint64_t config,
&lo_base_addr,
&hi_base_addr,
&lo_val,
- &hi_val);
+ &hi_val,
+ true);
if (ret)
return ret;
@@ -612,7 +615,7 @@ static int df_v3_6_pmc_start(struct amdgpu_device *adev, uint64_t config,
}
static int df_v3_6_pmc_stop(struct amdgpu_device *adev, uint64_t config,
- int is_disable)
+ int is_remove)
{
uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
int ret = 0;
@@ -624,15 +627,17 @@ static int df_v3_6_pmc_stop(struct amdgpu_device *adev, uint64_t config,
&lo_base_addr,
&hi_base_addr,
&lo_val,
- &hi_val);
+ &hi_val,
+ false);
if (ret)
return ret;
- df_v3_6_reset_perfmon_cntr(adev, config);
- if (is_disable)
+ if (is_remove) {
+ df_v3_6_reset_perfmon_cntr(adev, config);
df_v3_6_pmc_release_cntr(adev, config);
+ }
break;
default:
@@ -646,7 +651,7 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
uint64_t config,
uint64_t *count)
{
- uint32_t lo_base_addr, hi_base_addr, lo_val = 0, hi_val = 0;
+ uint32_t lo_base_addr = 0, hi_base_addr = 0, lo_val = 0, hi_val = 0;
*count = 0;
switch (adev->asic_type) {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 65997ffaed45..3579565e0eab 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -112,6 +112,25 @@
#define mmCP_HYP_ME_UCODE_DATA 0x5817
#define mmCP_HYP_ME_UCODE_DATA_BASE_IDX 1
+//CC_GC_SA_UNIT_DISABLE
+#define mmCC_GC_SA_UNIT_DISABLE 0x0fe9
+#define mmCC_GC_SA_UNIT_DISABLE_BASE_IDX 0
+#define CC_GC_SA_UNIT_DISABLE__SA_DISABLE__SHIFT 0x8
+#define CC_GC_SA_UNIT_DISABLE__SA_DISABLE_MASK 0x0000FF00L
+//GC_USER_SA_UNIT_DISABLE
+#define mmGC_USER_SA_UNIT_DISABLE 0x0fea
+#define mmGC_USER_SA_UNIT_DISABLE_BASE_IDX 0
+#define GC_USER_SA_UNIT_DISABLE__SA_DISABLE__SHIFT 0x8
+#define GC_USER_SA_UNIT_DISABLE__SA_DISABLE_MASK 0x0000FF00L
+//PA_SC_ENHANCE_3
+#define mmPA_SC_ENHANCE_3 0x1085
+#define mmPA_SC_ENHANCE_3_BASE_IDX 0
+#define PA_SC_ENHANCE_3__FORCE_PBB_WORKLOAD_MODE_TO_ZERO__SHIFT 0x3
+#define PA_SC_ENHANCE_3__FORCE_PBB_WORKLOAD_MODE_TO_ZERO_MASK 0x00000008L
+
+#define mmCGTT_SPI_CS_CLK_CTRL 0x507c
+#define mmCGTT_SPI_CS_CLK_CTRL_BASE_IDX 1
+
MODULE_FIRMWARE("amdgpu/navi10_ce.bin");
MODULE_FIRMWARE("amdgpu/navi10_pfp.bin");
MODULE_FIRMWARE("amdgpu/navi10_me.bin");
@@ -3078,6 +3097,7 @@ static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_1_2_nv12[] =
static const struct soc15_reg_golden golden_settings_gc_10_3[] =
{
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CS_CLK_CTRL, 0x78000000, 0x78000100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_PS_CLK_CTRL, 0xff7f0fff, 0x78000100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA0_CLK_CTRL, 0xff7f0fff, 0x30000100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA1_CLK_CTRL, 0xff7f0fff, 0x7e000100),
@@ -3091,6 +3111,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CM_CTRL1, 0xff8fff0f, 0x580f1008),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xf7ffffff, 0x10f80988),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmLDS_CONFIG, 0x00000020, 0x00000020),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_CL_ENHANCE, 0xf17fffff, 0x01200007),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_TIMEOUT_COUNTER, 0xffffffff, 0x00000800),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0xffffffbf, 0x00000820),
@@ -3188,6 +3209,8 @@ static int gfx_v10_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev);
static void gfx_v10_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume);
static void gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume);
static void gfx_v10_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, bool secure);
+static u32 gfx_v10_3_get_disabled_sa(struct amdgpu_device *adev);
+static void gfx_v10_3_program_pbb_mode(struct amdgpu_device *adev);
static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
{
@@ -3307,6 +3330,29 @@ static void gfx_v10_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
adev->gfx.kiq.pmf = &gfx_v10_0_kiq_pm4_funcs;
}
+static void gfx_v10_0_init_spm_golden_registers(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_NAVI10:
+ soc15_program_register_sequence(adev,
+ golden_settings_gc_rlc_spm_10_0_nv10,
+ (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_0_nv10));
+ break;
+ case CHIP_NAVI14:
+ soc15_program_register_sequence(adev,
+ golden_settings_gc_rlc_spm_10_1_nv14,
+ (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_1_nv14));
+ break;
+ case CHIP_NAVI12:
+ soc15_program_register_sequence(adev,
+ golden_settings_gc_rlc_spm_10_1_2_nv12,
+ (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_1_2_nv12));
+ break;
+ default:
+ break;
+ }
+}
+
static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
@@ -3317,9 +3363,6 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
soc15_program_register_sequence(adev,
golden_settings_gc_10_0_nv10,
(const u32)ARRAY_SIZE(golden_settings_gc_10_0_nv10));
- soc15_program_register_sequence(adev,
- golden_settings_gc_rlc_spm_10_0_nv10,
- (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_0_nv10));
break;
case CHIP_NAVI14:
soc15_program_register_sequence(adev,
@@ -3328,9 +3371,6 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
soc15_program_register_sequence(adev,
golden_settings_gc_10_1_nv14,
(const u32)ARRAY_SIZE(golden_settings_gc_10_1_nv14));
- soc15_program_register_sequence(adev,
- golden_settings_gc_rlc_spm_10_1_nv14,
- (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_1_nv14));
break;
case CHIP_NAVI12:
soc15_program_register_sequence(adev,
@@ -3339,9 +3379,6 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
soc15_program_register_sequence(adev,
golden_settings_gc_10_1_2_nv12,
(const u32)ARRAY_SIZE(golden_settings_gc_10_1_2_nv12));
- soc15_program_register_sequence(adev,
- golden_settings_gc_rlc_spm_10_1_2_nv12,
- (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_1_2_nv12));
break;
case CHIP_SIENNA_CICHLID:
soc15_program_register_sequence(adev,
@@ -3360,6 +3397,7 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
default:
break;
}
+ gfx_v10_0_init_spm_golden_registers(adev);
}
static void gfx_v10_0_scratch_init(struct amdgpu_device *adev)
@@ -3545,7 +3583,7 @@ static void gfx_v10_0_check_fw_write_wait(struct amdgpu_device *adev)
break;
}
- if (adev->gfx.cp_fw_write_wait == false)
+ if (!adev->gfx.cp_fw_write_wait)
DRM_WARN_ONCE("CP firmware version too old, please update!");
}
@@ -3571,6 +3609,17 @@ static void gfx_v10_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
}
+static void gfx_v10_0_init_rlc_iram_dram_microcode(struct amdgpu_device *adev)
+{
+ const struct rlc_firmware_header_v2_2 *rlc_hdr;
+
+ rlc_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
+ adev->gfx.rlc.rlc_iram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_iram_ucode_size_bytes);
+ adev->gfx.rlc.rlc_iram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_iram_ucode_offset_bytes);
+ adev->gfx.rlc.rlc_dram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_dram_ucode_size_bytes);
+ adev->gfx.rlc.rlc_dram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_dram_ucode_offset_bytes);
+}
+
static bool gfx_v10_0_navi10_gfxoff_should_enable(struct amdgpu_device *adev)
{
bool ret = false;
@@ -3595,6 +3644,9 @@ static void gfx_v10_0_check_gfxoff_flag(struct amdgpu_device *adev)
if (!gfx_v10_0_navi10_gfxoff_should_enable(adev))
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
break;
+ case CHIP_NAVY_FLOUNDER:
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+ break;
default:
break;
}
@@ -3683,8 +3735,6 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
- if (version_major == 2 && version_minor == 1)
- adev->gfx.rlc.is_rlc_v2_1 = true;
adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
@@ -3726,8 +3776,12 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
- if (adev->gfx.rlc.is_rlc_v2_1)
- gfx_v10_0_init_rlc_ext_microcode(adev);
+ if (version_major == 2) {
+ if (version_minor >= 1)
+ gfx_v10_0_init_rlc_ext_microcode(adev);
+ if (version_minor == 2)
+ gfx_v10_0_init_rlc_iram_dram_microcode(adev);
+ }
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec%s.bin", chip_name, wks);
@@ -3788,8 +3842,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
}
- if (adev->gfx.rlc.is_rlc_v2_1 &&
- adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
+ if (adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
adev->gfx.rlc.save_restore_list_srm_size_bytes) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
@@ -3809,6 +3862,21 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
info->fw = adev->gfx.rlc_fw;
adev->firmware.fw_size +=
ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
+
+ if (adev->gfx.rlc.rlc_iram_ucode_size_bytes &&
+ adev->gfx.rlc.rlc_dram_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_IRAM];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_IRAM;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.rlc_iram_ucode_size_bytes, PAGE_SIZE);
+
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_DRAM];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_DRAM;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.rlc_dram_ucode_size_bytes, PAGE_SIZE);
+ }
}
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
@@ -4022,21 +4090,23 @@ static int gfx_v10_0_mec_init(struct amdgpu_device *adev)
amdgpu_gfx_compute_queue_acquire(adev);
mec_hpd_size = adev->gfx.num_compute_rings * GFX10_MEC_HPD_SIZE;
- r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_GTT,
- &adev->gfx.mec.hpd_eop_obj,
- &adev->gfx.mec.hpd_eop_gpu_addr,
- (void **)&hpd);
- if (r) {
- dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
- gfx_v10_0_mec_fini(adev);
- return r;
- }
+ if (mec_hpd_size) {
+ r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->gfx.mec.hpd_eop_obj,
+ &adev->gfx.mec.hpd_eop_gpu_addr,
+ (void **)&hpd);
+ if (r) {
+ dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
+ gfx_v10_0_mec_fini(adev);
+ return r;
+ }
- memset(hpd, 0, mec_hpd_size);
+ memset(hpd, 0, mec_hpd_size);
- amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
- amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
+ amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
+ amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
+ }
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
@@ -4147,6 +4217,7 @@ static const struct amdgpu_gfx_funcs gfx_v10_0_gfx_funcs = {
.read_wave_sgprs = &gfx_v10_0_read_wave_sgprs,
.read_wave_vgprs = &gfx_v10_0_read_wave_vgprs,
.select_me_pipe_q = &gfx_v10_0_select_me_pipe_q,
+ .init_spm_golden = &gfx_v10_0_init_spm_golden_registers,
};
static void gfx_v10_0_gpu_early_init(struct amdgpu_device *adev)
@@ -4515,12 +4586,17 @@ static void gfx_v10_0_setup_rb(struct amdgpu_device *adev)
int i, j;
u32 data;
u32 active_rbs = 0;
+ u32 bitmap;
u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
adev->gfx.config.max_sh_per_se;
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+ bitmap = i * adev->gfx.config.max_sh_per_se + j;
+ if ((adev->asic_type == CHIP_SIENNA_CICHLID) &&
+ ((gfx_v10_3_get_disabled_sa(adev) >> bitmap) & 1))
+ continue;
gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff);
data = gfx_v10_0_get_rb_active_bitmap(adev);
active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
@@ -6180,7 +6256,7 @@ static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring)
struct v10_gfx_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.gfx_ring[0];
- if (!adev->in_gpu_reset && !adev->in_suspend) {
+ if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
memset((void *)mqd, 0, sizeof(*mqd));
mutex_lock(&adev->srbm_mutex);
nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
@@ -6192,7 +6268,7 @@ static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring)
mutex_unlock(&adev->srbm_mutex);
if (adev->gfx.me.mqd_backup[mqd_idx])
memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
- } else if (adev->in_gpu_reset) {
+ } else if (amdgpu_in_reset(adev)) {
/* reset mqd with the backup copy */
if (adev->gfx.me.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
@@ -6433,6 +6509,10 @@ static int gfx_v10_0_kiq_init_register(struct amdgpu_ring *ring)
struct v10_compute_mqd *mqd = ring->mqd_ptr;
int j;
+ /* inactivate the queue */
+ if (amdgpu_sriov_vf(adev))
+ WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, 0);
+
/* disable wptr polling */
WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
@@ -6541,7 +6621,7 @@ static int gfx_v10_0_kiq_init_queue(struct amdgpu_ring *ring)
gfx_v10_0_kiq_setting(ring);
- if (adev->in_gpu_reset) { /* for GPU_RESET case */
+ if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
/* reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
@@ -6577,7 +6657,7 @@ static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring)
struct v10_compute_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.compute_ring[0];
- if (!adev->in_gpu_reset && !adev->in_suspend) {
+ if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
memset((void *)mqd, 0, sizeof(*mqd));
mutex_lock(&adev->srbm_mutex);
nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
@@ -6587,7 +6667,7 @@ static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring)
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
- } else if (adev->in_gpu_reset) { /* for GPU_RESET case */
+ } else if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
/* reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
@@ -6925,6 +7005,9 @@ static int gfx_v10_0_hw_init(void *handle)
if (r)
return r;
+ if (adev->asic_type == CHIP_SIENNA_CICHLID)
+ gfx_v10_3_program_pbb_mode(adev);
+
return r;
}
@@ -6958,15 +7041,19 @@ static int gfx_v10_0_hw_fini(void *handle)
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
+
+ if (!adev->in_pci_err_recovery) {
#ifndef BRING_UP_DEBUG
- if (amdgpu_async_gfx_ring) {
- r = gfx_v10_0_kiq_disable_kgq(adev);
- if (r)
- DRM_ERROR("KGQ disable failed\n");
- }
+ if (amdgpu_async_gfx_ring) {
+ r = gfx_v10_0_kiq_disable_kgq(adev);
+ if (r)
+ DRM_ERROR("KGQ disable failed\n");
+ }
#endif
- if (amdgpu_gfx_disable_kcq(adev))
- DRM_ERROR("KCQ disable failed\n");
+ if (amdgpu_gfx_disable_kcq(adev))
+ DRM_ERROR("KCQ disable failed\n");
+ }
+
if (amdgpu_sriov_vf(adev)) {
gfx_v10_0_cp_gfx_enable(adev, false);
/* Program KIQ position of RLC_CP_SCHEDULERS during destroy */
@@ -7033,8 +7120,7 @@ static int gfx_v10_0_soft_reset(void *handle)
GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__DB_BUSY_MASK |
GRBM_STATUS__CB_BUSY_MASK | GRBM_STATUS__GDS_BUSY_MASK |
- GRBM_STATUS__SPI_BUSY_MASK | GRBM_STATUS__GE_BUSY_NO_DMA_MASK
- | GRBM_STATUS__BCI_BUSY_MASK)) {
+ GRBM_STATUS__SPI_BUSY_MASK | GRBM_STATUS__GE_BUSY_NO_DMA_MASK)) {
grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
GRBM_SOFT_RESET, SOFT_RESET_CP,
1);
@@ -7159,7 +7245,7 @@ static int gfx_v10_0_early_init(void *handle)
break;
}
- adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
+ adev->gfx.num_compute_rings = amdgpu_num_kcq;
gfx_v10_0_set_kiq_pm4_funcs(adev);
gfx_v10_0_set_ring_funcs(adev);
@@ -7263,10 +7349,8 @@ static void gfx_v10_0_update_medium_grain_clock_gating(struct amdgpu_device *ade
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
- RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
-
- /* only for Vega10 & Raven1 */
- data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK;
+ RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK |
+ RLC_CGTT_MGCG_OVERRIDE__ENABLE_CGTS_LEGACY_MASK);
if (def != data)
WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
@@ -7429,7 +7513,6 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
(AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_CGLS |
AMD_CG_SUPPORT_GFX_CGCG |
- AMD_CG_SUPPORT_GFX_CGLS |
AMD_CG_SUPPORT_GFX_3D_CGCG |
AMD_CG_SUPPORT_GFX_3D_CGLS))
gfx_v10_0_enable_gui_idle_interrupt(adev, enable);
@@ -8738,6 +8821,10 @@ static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev,
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+ bitmap = i * adev->gfx.config.max_sh_per_se + j;
+ if ((adev->asic_type == CHIP_SIENNA_CICHLID) &&
+ ((gfx_v10_3_get_disabled_sa(adev) >> bitmap) & 1))
+ continue;
mask = 1;
ao_bitmap = 0;
counter = 0;
@@ -8772,6 +8859,47 @@ static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev,
return 0;
}
+static u32 gfx_v10_3_get_disabled_sa(struct amdgpu_device *adev)
+{
+ uint32_t efuse_setting, vbios_setting, disabled_sa, max_sa_mask;
+
+ efuse_setting = RREG32_SOC15(GC, 0, mmCC_GC_SA_UNIT_DISABLE);
+ efuse_setting &= CC_GC_SA_UNIT_DISABLE__SA_DISABLE_MASK;
+ efuse_setting >>= CC_GC_SA_UNIT_DISABLE__SA_DISABLE__SHIFT;
+
+ vbios_setting = RREG32_SOC15(GC, 0, mmGC_USER_SA_UNIT_DISABLE);
+ vbios_setting &= GC_USER_SA_UNIT_DISABLE__SA_DISABLE_MASK;
+ vbios_setting >>= GC_USER_SA_UNIT_DISABLE__SA_DISABLE__SHIFT;
+
+ max_sa_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_sh_per_se *
+ adev->gfx.config.max_shader_engines);
+ disabled_sa = efuse_setting | vbios_setting;
+ disabled_sa &= max_sa_mask;
+
+ return disabled_sa;
+}
+
+static void gfx_v10_3_program_pbb_mode(struct amdgpu_device *adev)
+{
+ uint32_t max_sa_per_se, max_sa_per_se_mask, max_shader_engines;
+ uint32_t disabled_sa_mask, se_index, disabled_sa_per_se;
+
+ disabled_sa_mask = gfx_v10_3_get_disabled_sa(adev);
+
+ max_sa_per_se = adev->gfx.config.max_sh_per_se;
+ max_sa_per_se_mask = (1 << max_sa_per_se) - 1;
+ max_shader_engines = adev->gfx.config.max_shader_engines;
+
+ for (se_index = 0; max_shader_engines > se_index; se_index++) {
+ disabled_sa_per_se = disabled_sa_mask >> (se_index * max_sa_per_se);
+ disabled_sa_per_se &= max_sa_per_se_mask;
+ if (disabled_sa_per_se == max_sa_per_se_mask) {
+ WREG32_FIELD15(GC, 0, PA_SC_ENHANCE_3, FORCE_PBB_WORKLOAD_MODE_TO_ZERO, 1);
+ break;
+ }
+ }
+}
+
const struct amdgpu_ip_block_version gfx_v10_0_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_GFX,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 33f1c4a46ebe..94b7e0531d09 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1343,21 +1343,22 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
amdgpu_gfx_compute_queue_acquire(adev);
mec_hpd_size = adev->gfx.num_compute_rings * GFX8_MEC_HPD_SIZE;
+ if (mec_hpd_size) {
+ r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.mec.hpd_eop_obj,
+ &adev->gfx.mec.hpd_eop_gpu_addr,
+ (void **)&hpd);
+ if (r) {
+ dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
+ return r;
+ }
- r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.mec.hpd_eop_obj,
- &adev->gfx.mec.hpd_eop_gpu_addr,
- (void **)&hpd);
- if (r) {
- dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
- return r;
- }
-
- memset(hpd, 0, mec_hpd_size);
+ memset(hpd, 0, mec_hpd_size);
- amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
- amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
+ amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
+ amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
+ }
return 0;
}
@@ -3250,7 +3251,7 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
dev_warn(adev->dev,
"Unknown chip type (%d) in function gfx_v8_0_tiling_mode_table_init() falling through to CHIP_CARRIZO\n",
adev->asic_type);
- /* fall through */
+ fallthrough;
case CHIP_CARRIZO:
modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
@@ -4632,7 +4633,7 @@ static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring)
gfx_v8_0_kiq_setting(ring);
- if (adev->in_gpu_reset) { /* for GPU_RESET case */
+ if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
/* reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
@@ -4669,7 +4670,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
struct vi_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.compute_ring[0];
- if (!adev->in_gpu_reset && !adev->in_suspend) {
+ if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation));
((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
@@ -4681,7 +4682,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation));
- } else if (adev->in_gpu_reset) { /* for GPU_RESET case */
+ } else if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
/* reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
@@ -5294,7 +5295,7 @@ static int gfx_v8_0_early_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->gfx.num_gfx_rings = GFX8_NUM_GFX_RINGS;
- adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
+ adev->gfx.num_compute_rings = amdgpu_num_kcq;
adev->gfx.funcs = &gfx_v8_0_gfx_funcs;
gfx_v8_0_set_ring_funcs(adev);
gfx_v8_0_set_irq_funcs(adev);
@@ -5342,10 +5343,9 @@ static int gfx_v8_0_late_init(void *handle)
static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
bool enable)
{
- if (((adev->asic_type == CHIP_POLARIS11) ||
+ if ((adev->asic_type == CHIP_POLARIS11) ||
(adev->asic_type == CHIP_POLARIS12) ||
- (adev->asic_type == CHIP_VEGAM)) &&
- adev->powerplay.pp_funcs->set_powergating_by_smu)
+ (adev->asic_type == CHIP_VEGAM))
/* Send msg to SMU via Powerplay */
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, enable);
@@ -5879,8 +5879,7 @@ static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_CG,
pp_support_state,
pp_state);
- if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
- amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) {
@@ -5901,8 +5900,7 @@ static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_MG,
pp_support_state,
pp_state);
- if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
- amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
return 0;
@@ -5931,8 +5929,7 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_CG,
pp_support_state,
pp_state);
- if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
- amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_3D_CGCG | AMD_CG_SUPPORT_GFX_3D_CGLS)) {
@@ -5951,8 +5948,7 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_3D,
pp_support_state,
pp_state);
- if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
- amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) {
@@ -5973,8 +5969,7 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_MG,
pp_support_state,
pp_state);
- if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
- amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
@@ -5989,8 +5984,7 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_RLC,
pp_support_state,
pp_state);
- if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
- amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
@@ -6004,8 +5998,7 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_CP,
pp_support_state,
pp_state);
- if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
- amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index cb9d60a4e05e..0d8e203b10ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -49,6 +49,7 @@
#include "amdgpu_ras.h"
#include "gfx_v9_4.h"
+#include "gfx_v9_0.h"
#include "asic_reg/pwr/pwr_10_0_offset.h"
#include "asic_reg/pwr/pwr_10_0_sh_mask.h"
@@ -116,6 +117,13 @@ MODULE_FIRMWARE("amdgpu/renoir_mec.bin");
MODULE_FIRMWARE("amdgpu/renoir_mec2.bin");
MODULE_FIRMWARE("amdgpu/renoir_rlc.bin");
+MODULE_FIRMWARE("amdgpu/green_sardine_ce.bin");
+MODULE_FIRMWARE("amdgpu/green_sardine_pfp.bin");
+MODULE_FIRMWARE("amdgpu/green_sardine_me.bin");
+MODULE_FIRMWARE("amdgpu/green_sardine_mec.bin");
+MODULE_FIRMWARE("amdgpu/green_sardine_mec2.bin");
+MODULE_FIRMWARE("amdgpu/green_sardine_rlc.bin");
+
#define mmTCP_CHAN_STEER_0_ARCT 0x0b03
#define mmTCP_CHAN_STEER_0_ARCT_BASE_IDX 0
#define mmTCP_CHAN_STEER_1_ARCT 0x0b04
@@ -691,6 +699,7 @@ static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_5_ARCT, 0x3ff, 0x135),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_CONFIG, 0xffffffff, 0x011A0000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_FIFO_SIZES, 0xffffffff, 0x00000f00),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_UTCL1_CNTL1, 0x30000000, 0x30000000)
};
static const struct soc15_reg_rlcg rlcg_access_gc_9_0[] = {
@@ -787,7 +796,6 @@ static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev);
static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
struct amdgpu_cu_info *cu_info);
static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev);
-static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring);
static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring);
static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
@@ -1629,7 +1637,10 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
chip_name = "arcturus";
break;
case CHIP_RENOIR:
- chip_name = "renoir";
+ if (adev->apu_flags & AMD_APU_IS_RENOIR)
+ chip_name = "renoir";
+ else
+ chip_name = "green_sardine";
break;
default:
BUG();
@@ -1938,22 +1949,23 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
/* take ownership of the relevant compute queues */
amdgpu_gfx_compute_queue_acquire(adev);
mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
+ if (mec_hpd_size) {
+ r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.mec.hpd_eop_obj,
+ &adev->gfx.mec.hpd_eop_gpu_addr,
+ (void **)&hpd);
+ if (r) {
+ dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
+ gfx_v9_0_mec_fini(adev);
+ return r;
+ }
- r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.mec.hpd_eop_obj,
- &adev->gfx.mec.hpd_eop_gpu_addr,
- (void **)&hpd);
- if (r) {
- dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
- gfx_v9_0_mec_fini(adev);
- return r;
- }
-
- memset(hpd, 0, mec_hpd_size);
+ memset(hpd, 0, mec_hpd_size);
- amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
- amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
+ amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
+ amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
+ }
mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
@@ -2073,6 +2085,7 @@ static const struct amdgpu_gfx_funcs gfx_v9_4_gfx_funcs = {
.ras_error_inject = &gfx_v9_4_ras_error_inject,
.query_ras_error_count = &gfx_v9_4_query_ras_error_count,
.reset_ras_error_count = &gfx_v9_4_reset_ras_error_count,
+ .query_ras_error_status = &gfx_v9_4_query_ras_error_status,
};
static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
@@ -2194,7 +2207,6 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
int mec, int pipe, int queue)
{
- int r;
unsigned irq_type;
struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
unsigned int hw_prio;
@@ -2219,13 +2231,8 @@ static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue) ?
AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
/* type-2 packets are deprecated on MEC, use type-3 instead */
- r = amdgpu_ring_init(adev, ring, 1024,
- &adev->gfx.eop_irq, irq_type, hw_prio);
- if (r)
- return r;
-
-
- return 0;
+ return amdgpu_ring_init(adev, ring, 1024,
+ &adev->gfx.eop_irq, irq_type, hw_prio);
}
static int gfx_v9_0_sw_init(void *handle)
@@ -2400,7 +2407,8 @@ static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev)
/* TODO */
}
-static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance)
+void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num,
+ u32 instance)
{
u32 data;
@@ -2558,14 +2566,14 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
SH_MEM_ALIGNMENT_MODE_UNALIGNED);
tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
- !!amdgpu_noretry);
+ !!adev->gmc.noretry);
WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, 0);
} else {
tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
SH_MEM_ALIGNMENT_MODE_UNALIGNED);
tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
- !!amdgpu_noretry);
+ !!adev->gmc.noretry);
WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
(adev->gmc.private_aperture_start >> 48));
@@ -2798,7 +2806,7 @@ static void pwr_10_0_gfxip_control_over_cgpg(struct amdgpu_device *adev,
uint32_t default_data = 0;
default_data = data = RREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS));
- if (enable == true) {
+ if (enable) {
/* enable GFXIP control over CGPG */
data |= PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
if(default_data != data)
@@ -3684,7 +3692,7 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
gfx_v9_0_kiq_setting(ring);
- if (adev->in_gpu_reset) { /* for GPU_RESET case */
+ if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
/* reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
@@ -3722,7 +3730,7 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
struct v9_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.compute_ring[0];
- if (!adev->in_gpu_reset && !adev->in_suspend) {
+ if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
@@ -3734,7 +3742,7 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
- } else if (adev->in_gpu_reset) { /* for GPU_RESET case */
+ } else if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
/* reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
@@ -3928,7 +3936,7 @@ static int gfx_v9_0_hw_fini(void *handle)
/* Use deinitialize sequence from CAIL when unbinding device from driver,
* otherwise KIQ is hanging when binding back
*/
- if (!adev->in_gpu_reset && !adev->in_suspend) {
+ if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
mutex_lock(&adev->srbm_mutex);
soc15_grbm_select(adev, adev->gfx.kiq.ring.me,
adev->gfx.kiq.ring.pipe,
@@ -4086,7 +4094,7 @@ static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev)
*
* also don't wait anymore for IRQ context
* */
- if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
+ if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
goto failed_kiq_read;
might_sleep();
@@ -4625,7 +4633,7 @@ static int gfx_v9_0_early_init(void *handle)
adev->gfx.num_gfx_rings = 0;
else
adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;
- adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
+ adev->gfx.num_compute_rings = amdgpu_num_kcq;
gfx_v9_0_set_kiq_pm4_funcs(adev);
gfx_v9_0_set_ring_funcs(adev);
gfx_v9_0_set_irq_funcs(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h
index fa5a3fbaf6ab..dfe8d4841f58 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h
@@ -26,9 +26,7 @@
extern const struct amdgpu_ip_block_version gfx_v9_0_ip_block;
-void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num);
-
-uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev);
-int gfx_v9_0_get_cu_info(struct amdgpu_device *adev, struct amdgpu_cu_info *cu_info);
+void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num,
+ u32 instance);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
index 46351db36922..bc699d680ce8 100755..100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
@@ -57,10 +57,10 @@ static const struct soc15_reg_entry gfx_v9_4_edc_counter_regs[] = {
/* SPI */
{ SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), 0, 4, 1 },
/* SQ */
- { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 0, 4, 16 },
- { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_DED_CNT), 0, 4, 16 },
- { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_INFO), 0, 4, 16 },
- { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_SEC_CNT), 0, 4, 16 },
+ { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 0, 8, 16 },
+ { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_DED_CNT), 0, 8, 16 },
+ { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_INFO), 0, 8, 16 },
+ { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_SEC_CNT), 0, 8, 16 },
/* SQC */
{ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 0, 4, 6 },
{ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 0, 4, 6 },
@@ -992,3 +992,32 @@ int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev, void *inject_if)
return ret;
}
+
+static const struct soc15_reg_entry gfx_v9_4_rdrsp_status_regs =
+ { SOC15_REG_ENTRY(GC, 0, mmGCEA_ERR_STATUS), 0, 1, 32 };
+
+void gfx_v9_4_query_ras_error_status(struct amdgpu_device *adev)
+{
+ uint32_t i, j;
+ uint32_t reg_value;
+
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+ return;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+
+ for (i = 0; i < gfx_v9_4_rdrsp_status_regs.se_num; i++) {
+ for (j = 0; j < gfx_v9_4_rdrsp_status_regs.instance;
+ j++) {
+ gfx_v9_4_select_se_sh(adev, i, 0, j);
+ reg_value = RREG32(SOC15_REG_ENTRY_OFFSET(
+ gfx_v9_4_rdrsp_status_regs));
+ if (reg_value)
+ dev_warn(adev->dev, "GCEA err detected at instance: %d, status: 0x%x!\n",
+ j, reg_value);
+ }
+ }
+
+ gfx_v9_4_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h
index 1ffecc5c0f0a..875f18473a98 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h
@@ -34,4 +34,6 @@ int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev,
void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev);
+void gfx_v9_4_query_ras_error_status(struct amdgpu_device *adev);
+
#endif /* __GFX_V9_4_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index 529e46386a50..fad887a66886 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -245,7 +245,7 @@ static void gfxhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
/* Send no-retry XNACK on fault to suppress VM fault storm. */
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
- !amdgpu_noretry);
+ !adev->gmc.noretry);
WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_CNTL,
i * hub->ctx_distance, tmp);
WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
@@ -403,3 +403,13 @@ void gfxhub_v1_0_init(struct amdgpu_device *adev)
hub->eng_addr_distance = mmVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
}
+
+
+const struct amdgpu_gfxhub_funcs gfxhub_v1_0_funcs = {
+ .get_mc_fb_offset = gfxhub_v1_0_get_mc_fb_offset,
+ .setup_vm_pt_regs = gfxhub_v1_0_setup_vm_pt_regs,
+ .gart_enable = gfxhub_v1_0_gart_enable,
+ .gart_disable = gfxhub_v1_0_gart_disable,
+ .set_fault_enable_default = gfxhub_v1_0_set_fault_enable_default,
+ .init = gfxhub_v1_0_init,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
index 92d3a70cd9b1..0c46672bbf49 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
@@ -33,4 +33,5 @@ u64 gfxhub_v1_0_get_mc_fb_offset(struct amdgpu_device *adev);
void gfxhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
uint64_t page_table_base);
+extern const struct amdgpu_gfxhub_funcs gfxhub_v1_0_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c
index c0ab71df0d90..1e24b6d51e41 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c
@@ -21,6 +21,7 @@
*
*/
#include "amdgpu.h"
+#include "gfxhub_v1_0.h"
#include "gfxhub_v1_1.h"
#include "gc/gc_9_2_1_offset.h"
@@ -28,7 +29,7 @@
#include "soc15_common.h"
-int gfxhub_v1_1_get_xgmi_info(struct amdgpu_device *adev)
+static int gfxhub_v1_1_get_xgmi_info(struct amdgpu_device *adev)
{
u32 xgmi_lfb_cntl = RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_CNTL);
u32 max_region =
@@ -66,3 +67,13 @@ int gfxhub_v1_1_get_xgmi_info(struct amdgpu_device *adev)
return 0;
}
+
+const struct amdgpu_gfxhub_funcs gfxhub_v1_1_funcs = {
+ .get_mc_fb_offset = gfxhub_v1_0_get_mc_fb_offset,
+ .setup_vm_pt_regs = gfxhub_v1_0_setup_vm_pt_regs,
+ .gart_enable = gfxhub_v1_0_gart_enable,
+ .gart_disable = gfxhub_v1_0_gart_disable,
+ .set_fault_enable_default = gfxhub_v1_0_set_fault_enable_default,
+ .init = gfxhub_v1_0_init,
+ .get_xgmi_info = gfxhub_v1_1_get_xgmi_info,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.h b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.h
index d753cf28a0a6..ae5759ffbee3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.h
@@ -24,6 +24,6 @@
#ifndef __GFXHUB_V1_1_H__
#define __GFXHUB_V1_1_H__
-int gfxhub_v1_1_get_xgmi_info(struct amdgpu_device *adev);
+extern const struct amdgpu_gfxhub_funcs gfxhub_v1_1_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
index 394e6f56948a..456360bf58fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
@@ -31,7 +31,78 @@
#include "soc15_common.h"
-u64 gfxhub_v2_0_get_fb_location(struct amdgpu_device *adev)
+static const char *gfxhub_client_ids[] = {
+ "CB/DB",
+ "Reserved",
+ "GE1",
+ "GE2",
+ "CPF",
+ "CPC",
+ "CPG",
+ "RLC",
+ "TCP",
+ "SQC (inst)",
+ "SQC (data)",
+ "SQG",
+ "Reserved",
+ "SDMA0",
+ "SDMA1",
+ "GCR",
+ "SDMA2",
+ "SDMA3",
+};
+
+static uint32_t gfxhub_v2_0_get_invalidate_req(unsigned int vmid,
+ uint32_t flush_type)
+{
+ u32 req = 0;
+
+ /* invalidate using legacy mode on vmid*/
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ,
+ PER_VMID_INVALIDATE_REQ, 1 << vmid);
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ,
+ CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
+
+ return req;
+}
+
+static void
+gfxhub_v2_0_print_l2_protection_fault_status(struct amdgpu_device *adev,
+ uint32_t status)
+{
+ u32 cid = REG_GET_FIELD(status,
+ GCVM_L2_PROTECTION_FAULT_STATUS, CID);
+
+ dev_err(adev->dev,
+ "GCVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
+ status);
+ dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
+ cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" : gfxhub_client_ids[cid],
+ cid);
+ dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
+ REG_GET_FIELD(status,
+ GCVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
+ dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
+ REG_GET_FIELD(status,
+ GCVM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
+ dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
+ REG_GET_FIELD(status,
+ GCVM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
+ dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
+ REG_GET_FIELD(status,
+ GCVM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
+ dev_err(adev->dev, "\t RW: 0x%lx\n",
+ REG_GET_FIELD(status,
+ GCVM_L2_PROTECTION_FAULT_STATUS, RW));
+}
+
+static u64 gfxhub_v2_0_get_fb_location(struct amdgpu_device *adev)
{
u64 base = RREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_BASE);
@@ -41,12 +112,12 @@ u64 gfxhub_v2_0_get_fb_location(struct amdgpu_device *adev)
return base;
}
-u64 gfxhub_v2_0_get_mc_fb_offset(struct amdgpu_device *adev)
+static u64 gfxhub_v2_0_get_mc_fb_offset(struct amdgpu_device *adev)
{
return (u64)RREG32_SOC15(GC, 0, mmGCMC_VM_FB_OFFSET) << 24;
}
-void gfxhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+static void gfxhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
uint64_t page_table_base)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
@@ -82,11 +153,6 @@ static void gfxhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev)
uint64_t value;
if (!amdgpu_sriov_vf(adev)) {
- /*
- * the new L1 policy will block SRIOV guest from writing
- * these regs, and they will be programed at host.
- * so skip programing these regs.
- */
/* Disable AGP. */
WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BASE, 0);
WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_TOP, 0);
@@ -247,7 +313,7 @@ static void gfxhub_v2_0_setup_vmid_config(struct amdgpu_device *adev)
/* Send no-retry XNACK on fault to suppress VM fault storm. */
tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
- !amdgpu_noretry);
+ !adev->gmc.noretry);
WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_CNTL,
i * hub->ctx_distance, tmp);
WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
@@ -276,7 +342,7 @@ static void gfxhub_v2_0_program_invalidation(struct amdgpu_device *adev)
}
}
-int gfxhub_v2_0_gart_enable(struct amdgpu_device *adev)
+static int gfxhub_v2_0_gart_enable(struct amdgpu_device *adev)
{
/* GART Enable. */
gfxhub_v2_0_init_gart_aperture_regs(adev);
@@ -292,7 +358,7 @@ int gfxhub_v2_0_gart_enable(struct amdgpu_device *adev)
return 0;
}
-void gfxhub_v2_0_gart_disable(struct amdgpu_device *adev)
+static void gfxhub_v2_0_gart_disable(struct amdgpu_device *adev)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
u32 tmp;
@@ -323,7 +389,7 @@ void gfxhub_v2_0_gart_disable(struct amdgpu_device *adev)
* @adev: amdgpu_device pointer
* @value: true redirects VM faults to the default page
*/
-void gfxhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev,
+static void gfxhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev,
bool value)
{
u32 tmp;
@@ -360,7 +426,12 @@ void gfxhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev,
WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL, tmp);
}
-void gfxhub_v2_0_init(struct amdgpu_device *adev)
+static const struct amdgpu_vmhub_funcs gfxhub_v2_0_vmhub_funcs = {
+ .print_l2_protection_fault_status = gfxhub_v2_0_print_l2_protection_fault_status,
+ .get_invalidate_req = gfxhub_v2_0_get_invalidate_req,
+};
+
+static void gfxhub_v2_0_init(struct amdgpu_device *adev)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
@@ -390,4 +461,24 @@ void gfxhub_v2_0_init(struct amdgpu_device *adev)
mmGCVM_INVALIDATE_ENG0_REQ;
hub->eng_addr_distance = mmGCVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
+
+ hub->vm_cntx_cntl_vm_fault = GCVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ GCVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ GCVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ GCVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ GCVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ GCVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ GCVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
+
+ hub->vmhub_funcs = &gfxhub_v2_0_vmhub_funcs;
}
+
+const struct amdgpu_gfxhub_funcs gfxhub_v2_0_funcs = {
+ .get_fb_location = gfxhub_v2_0_get_fb_location,
+ .get_mc_fb_offset = gfxhub_v2_0_get_mc_fb_offset,
+ .setup_vm_pt_regs = gfxhub_v2_0_setup_vm_pt_regs,
+ .gart_enable = gfxhub_v2_0_gart_enable,
+ .gart_disable = gfxhub_v2_0_gart_disable,
+ .set_fault_enable_default = gfxhub_v2_0_set_fault_enable_default,
+ .init = gfxhub_v2_0_init,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h
index 392b8cd94fc0..9ddc35cd53d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h
@@ -24,14 +24,6 @@
#ifndef __GFXHUB_V2_0_H__
#define __GFXHUB_V2_0_H__
-u64 gfxhub_v2_0_get_fb_location(struct amdgpu_device *adev);
-int gfxhub_v2_0_gart_enable(struct amdgpu_device *adev);
-void gfxhub_v2_0_gart_disable(struct amdgpu_device *adev);
-void gfxhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev,
- bool value);
-void gfxhub_v2_0_init(struct amdgpu_device *adev);
-u64 gfxhub_v2_0_get_mc_fb_offset(struct amdgpu_device *adev);
-void gfxhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
- uint64_t page_table_base);
+extern const struct amdgpu_gfxhub_funcs gfxhub_v2_0_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
index fa0bca3e1f73..724bb29e9bb4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
@@ -31,7 +31,78 @@
#include "soc15_common.h"
-u64 gfxhub_v2_1_get_fb_location(struct amdgpu_device *adev)
+static const char *gfxhub_client_ids[] = {
+ "CB/DB",
+ "Reserved",
+ "GE1",
+ "GE2",
+ "CPF",
+ "CPC",
+ "CPG",
+ "RLC",
+ "TCP",
+ "SQC (inst)",
+ "SQC (data)",
+ "SQG",
+ "Reserved",
+ "SDMA0",
+ "SDMA1",
+ "GCR",
+ "SDMA2",
+ "SDMA3",
+};
+
+static uint32_t gfxhub_v2_1_get_invalidate_req(unsigned int vmid,
+ uint32_t flush_type)
+{
+ u32 req = 0;
+
+ /* invalidate using legacy mode on vmid*/
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ,
+ PER_VMID_INVALIDATE_REQ, 1 << vmid);
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ,
+ CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
+
+ return req;
+}
+
+static void
+gfxhub_v2_1_print_l2_protection_fault_status(struct amdgpu_device *adev,
+ uint32_t status)
+{
+ u32 cid = REG_GET_FIELD(status,
+ GCVM_L2_PROTECTION_FAULT_STATUS, CID);
+
+ dev_err(adev->dev,
+ "GCVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
+ status);
+ dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
+ cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" : gfxhub_client_ids[cid],
+ cid);
+ dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
+ REG_GET_FIELD(status,
+ GCVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
+ dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
+ REG_GET_FIELD(status,
+ GCVM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
+ dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
+ REG_GET_FIELD(status,
+ GCVM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
+ dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
+ REG_GET_FIELD(status,
+ GCVM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
+ dev_err(adev->dev, "\t RW: 0x%lx\n",
+ REG_GET_FIELD(status,
+ GCVM_L2_PROTECTION_FAULT_STATUS, RW));
+}
+
+static u64 gfxhub_v2_1_get_fb_location(struct amdgpu_device *adev)
{
u64 base = RREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_BASE);
@@ -41,12 +112,12 @@ u64 gfxhub_v2_1_get_fb_location(struct amdgpu_device *adev)
return base;
}
-u64 gfxhub_v2_1_get_mc_fb_offset(struct amdgpu_device *adev)
+static u64 gfxhub_v2_1_get_mc_fb_offset(struct amdgpu_device *adev)
{
return (u64)RREG32_SOC15(GC, 0, mmGCMC_VM_FB_OFFSET) << 24;
}
-void gfxhub_v2_1_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+static void gfxhub_v2_1_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
uint64_t page_table_base)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
@@ -135,6 +206,12 @@ static void gfxhub_v2_1_init_cache_regs(struct amdgpu_device *adev)
{
uint32_t tmp;
+ /* These registers are not accessible to VF-SRIOV.
+ * The PF will program them instead.
+ */
+ if (amdgpu_sriov_vf(adev))
+ return;
+
/* Setup L2 cache */
tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_CNTL);
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, ENABLE_L2_CACHE, 1);
@@ -190,6 +267,12 @@ static void gfxhub_v2_1_enable_system_domain(struct amdgpu_device *adev)
static void gfxhub_v2_1_disable_identity_aperture(struct amdgpu_device *adev)
{
+ /* These registers are not accessible to VF-SRIOV.
+ * The PF will program them instead.
+ */
+ if (amdgpu_sriov_vf(adev))
+ return;
+
WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
0xFFFFFFFF);
WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
@@ -236,7 +319,7 @@ static void gfxhub_v2_1_setup_vmid_config(struct amdgpu_device *adev)
/* Send no-retry XNACK on fault to suppress VM fault storm. */
tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
- !amdgpu_noretry);
+ !adev->gmc.noretry);
WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_CNTL,
i * hub->ctx_distance, tmp);
WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
@@ -265,7 +348,7 @@ static void gfxhub_v2_1_program_invalidation(struct amdgpu_device *adev)
}
}
-int gfxhub_v2_1_gart_enable(struct amdgpu_device *adev)
+static int gfxhub_v2_1_gart_enable(struct amdgpu_device *adev)
{
if (amdgpu_sriov_vf(adev)) {
/*
@@ -293,7 +376,7 @@ int gfxhub_v2_1_gart_enable(struct amdgpu_device *adev)
return 0;
}
-void gfxhub_v2_1_gart_disable(struct amdgpu_device *adev)
+static void gfxhub_v2_1_gart_disable(struct amdgpu_device *adev)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
u32 tmp;
@@ -322,10 +405,17 @@ void gfxhub_v2_1_gart_disable(struct amdgpu_device *adev)
* @adev: amdgpu_device pointer
* @value: true redirects VM faults to the default page
*/
-void gfxhub_v2_1_set_fault_enable_default(struct amdgpu_device *adev,
+static void gfxhub_v2_1_set_fault_enable_default(struct amdgpu_device *adev,
bool value)
{
u32 tmp;
+
+ /* These registers are not accessible to VF-SRIOV.
+ * The PF will program them instead.
+ */
+ if (amdgpu_sriov_vf(adev))
+ return;
+
tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL);
tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
@@ -359,7 +449,12 @@ void gfxhub_v2_1_set_fault_enable_default(struct amdgpu_device *adev,
WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL, tmp);
}
-void gfxhub_v2_1_init(struct amdgpu_device *adev)
+static const struct amdgpu_vmhub_funcs gfxhub_v2_1_vmhub_funcs = {
+ .print_l2_protection_fault_status = gfxhub_v2_1_print_l2_protection_fault_status,
+ .get_invalidate_req = gfxhub_v2_1_get_invalidate_req,
+};
+
+static void gfxhub_v2_1_init(struct amdgpu_device *adev)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
@@ -389,9 +484,19 @@ void gfxhub_v2_1_init(struct amdgpu_device *adev)
mmGCVM_INVALIDATE_ENG0_REQ;
hub->eng_addr_distance = mmGCVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
+
+ hub->vm_cntx_cntl_vm_fault = GCVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ GCVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ GCVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ GCVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ GCVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ GCVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ GCVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
+
+ hub->vmhub_funcs = &gfxhub_v2_1_vmhub_funcs;
}
-int gfxhub_v2_1_get_xgmi_info(struct amdgpu_device *adev)
+static int gfxhub_v2_1_get_xgmi_info(struct amdgpu_device *adev)
{
u32 xgmi_lfb_cntl = RREG32_SOC15(GC, 0, mmGCMC_VM_XGMI_LFB_CNTL);
u32 max_region =
@@ -426,3 +531,14 @@ int gfxhub_v2_1_get_xgmi_info(struct amdgpu_device *adev)
return 0;
}
+
+const struct amdgpu_gfxhub_funcs gfxhub_v2_1_funcs = {
+ .get_fb_location = gfxhub_v2_1_get_fb_location,
+ .get_mc_fb_offset = gfxhub_v2_1_get_mc_fb_offset,
+ .setup_vm_pt_regs = gfxhub_v2_1_setup_vm_pt_regs,
+ .gart_enable = gfxhub_v2_1_gart_enable,
+ .gart_disable = gfxhub_v2_1_gart_disable,
+ .set_fault_enable_default = gfxhub_v2_1_set_fault_enable_default,
+ .init = gfxhub_v2_1_init,
+ .get_xgmi_info = gfxhub_v2_1_get_xgmi_info,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.h b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.h
index 3452a4e9a3da..f75c2eccfad9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.h
@@ -24,16 +24,6 @@
#ifndef __GFXHUB_V2_1_H__
#define __GFXHUB_V2_1_H__
-u64 gfxhub_v2_1_get_fb_location(struct amdgpu_device *adev);
-int gfxhub_v2_1_gart_enable(struct amdgpu_device *adev);
-void gfxhub_v2_1_gart_disable(struct amdgpu_device *adev);
-void gfxhub_v2_1_set_fault_enable_default(struct amdgpu_device *adev,
- bool value);
-void gfxhub_v2_1_init(struct amdgpu_device *adev);
-u64 gfxhub_v2_1_get_mc_fb_offset(struct amdgpu_device *adev);
-void gfxhub_v2_1_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
- uint64_t page_table_base);
-
-int gfxhub_v2_1_get_xgmi_info(struct amdgpu_device *adev);
+extern const struct amdgpu_gfxhub_funcs gfxhub_v2_1_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index ec90c62078d9..dbc8b76b9b78 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -25,11 +25,10 @@
#include "amdgpu.h"
#include "amdgpu_atomfirmware.h"
#include "gmc_v10_0.h"
+#include "umc_v8_7.h"
#include "hdp/hdp_5_0_0_offset.h"
#include "hdp/hdp_5_0_0_sh_mask.h"
-#include "gc/gc_10_1_0_sh_mask.h"
-#include "mmhub/mmhub_2_0_0_sh_mask.h"
#include "athub/athub_2_0_0_sh_mask.h"
#include "athub/athub_2_0_0_offset.h"
#include "dcn/dcn_2_0_0_offset.h"
@@ -57,68 +56,31 @@ static const struct soc15_reg_golden golden_settings_navi10_hdp[] =
};
#endif
+static int gmc_v10_0_ecc_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ return 0;
+}
+
static int
gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src, unsigned type,
enum amdgpu_interrupt_state state)
{
- struct amdgpu_vmhub *hub;
- u32 tmp, reg, bits[AMDGPU_MAX_VMHUBS], i;
-
- bits[AMDGPU_GFXHUB_0] = GCVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- GCVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- GCVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- GCVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- GCVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- GCVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- GCVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
-
- bits[AMDGPU_MMHUB_0] = MMVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- MMVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- MMVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- MMVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- MMVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- MMVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- MMVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
-
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
/* MM HUB */
- hub = &adev->vmhub[AMDGPU_MMHUB_0];
- for (i = 0; i < 16; i++) {
- reg = hub->vm_context0_cntl + hub->ctx_distance * i;
- tmp = RREG32(reg);
- tmp &= ~bits[AMDGPU_MMHUB_0];
- WREG32(reg, tmp);
- }
-
+ amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, false);
/* GFX HUB */
- hub = &adev->vmhub[AMDGPU_GFXHUB_0];
- for (i = 0; i < 16; i++) {
- reg = hub->vm_context0_cntl + hub->ctx_distance * i;
- tmp = RREG32(reg);
- tmp &= ~bits[AMDGPU_GFXHUB_0];
- WREG32(reg, tmp);
- }
+ amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false);
break;
case AMDGPU_IRQ_STATE_ENABLE:
/* MM HUB */
- hub = &adev->vmhub[AMDGPU_MMHUB_0];
- for (i = 0; i < 16; i++) {
- reg = hub->vm_context0_cntl + hub->ctx_distance * i;
- tmp = RREG32(reg);
- tmp |= bits[AMDGPU_MMHUB_0];
- WREG32(reg, tmp);
- }
-
+ amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, true);
/* GFX HUB */
- hub = &adev->vmhub[AMDGPU_GFXHUB_0];
- for (i = 0; i < 16; i++) {
- reg = hub->vm_context0_cntl + hub->ctx_distance * i;
- tmp = RREG32(reg);
- tmp |= bits[AMDGPU_GFXHUB_0];
- WREG32(reg, tmp);
- }
+ amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true);
break;
default:
break;
@@ -166,29 +128,8 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
task_info.task_name, task_info.pid);
dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n",
addr, entry->client_id);
- if (!amdgpu_sriov_vf(adev)) {
- dev_err(adev->dev,
- "GCVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
- status);
- dev_err(adev->dev, "\t Faulty UTCL2 client ID: 0x%lx\n",
- REG_GET_FIELD(status,
- GCVM_L2_PROTECTION_FAULT_STATUS, CID));
- dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
- REG_GET_FIELD(status,
- GCVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
- dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
- REG_GET_FIELD(status,
- GCVM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
- dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
- REG_GET_FIELD(status,
- GCVM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
- dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
- REG_GET_FIELD(status,
- GCVM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
- dev_err(adev->dev, "\t RW: 0x%lx\n",
- REG_GET_FIELD(status,
- GCVM_L2_PROTECTION_FAULT_STATUS, RW));
- }
+ if (!amdgpu_sriov_vf(adev))
+ hub->vmhub_funcs->print_l2_protection_fault_status(adev, status);
}
return 0;
@@ -199,30 +140,20 @@ static const struct amdgpu_irq_src_funcs gmc_v10_0_irq_funcs = {
.process = gmc_v10_0_process_interrupt,
};
-static void gmc_v10_0_set_irq_funcs(struct amdgpu_device *adev)
+static const struct amdgpu_irq_src_funcs gmc_v10_0_ecc_funcs = {
+ .set = gmc_v10_0_ecc_interrupt_state,
+ .process = amdgpu_umc_process_ecc_irq,
+};
+
+ static void gmc_v10_0_set_irq_funcs(struct amdgpu_device *adev)
{
adev->gmc.vm_fault.num_types = 1;
adev->gmc.vm_fault.funcs = &gmc_v10_0_irq_funcs;
-}
-static uint32_t gmc_v10_0_get_invalidate_req(unsigned int vmid,
- uint32_t flush_type)
-{
- u32 req = 0;
-
- /* invalidate using legacy mode on vmid*/
- req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ,
- PER_VMID_INVALIDATE_REQ, 1 << vmid);
- req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
- req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
- req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
- req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
- req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
- req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
- req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ,
- CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
-
- return req;
+ if (!amdgpu_sriov_vf(adev)) {
+ adev->gmc.ecc_irq.num_types = 1;
+ adev->gmc.ecc_irq.funcs = &gmc_v10_0_ecc_funcs;
+ }
}
/**
@@ -265,7 +196,7 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
{
bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(adev, vmhub);
struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
- u32 inv_req = gmc_v10_0_get_invalidate_req(vmid, flush_type);
+ u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
u32 tmp;
/* Use register 17 for GART */
const unsigned eng = 17;
@@ -356,16 +287,17 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
*/
if (adev->gfx.kiq.ring.sched.ready &&
(amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
- !adev->in_gpu_reset) {
-
+ down_read_trylock(&adev->reset_sem)) {
struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
const unsigned eng = 17;
- u32 inv_req = gmc_v10_0_get_invalidate_req(vmid, flush_type);
+ u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
1 << vmid);
+
+ up_read(&adev->reset_sem);
return;
}
@@ -381,7 +313,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
if (!adev->mman.buffer_funcs_enabled ||
!adev->ib_pool_ready ||
- adev->in_gpu_reset ||
+ amdgpu_in_reset(adev) ||
ring->sched.ready == false) {
gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_GFXHUB_0, 0);
mutex_unlock(&adev->mman.gtt_window_lock);
@@ -459,7 +391,7 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
spin_unlock(&adev->gfx.kiq.ring_lock);
r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
if (r < 1) {
- DRM_ERROR("wait for kiq fence error: %ld.\n", r);
+ dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
return -ETIME;
}
@@ -491,7 +423,7 @@ static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
{
bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
- uint32_t req = gmc_v10_0_get_invalidate_req(vmid, 0);
+ uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
unsigned eng = ring->vm_inv_eng;
/*
@@ -641,6 +573,28 @@ static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
}
}
+static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev)
+{
+ u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
+ unsigned size;
+
+ if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
+ size = AMDGPU_VBIOS_VGA_ALLOCATION;
+ } else {
+ u32 viewport;
+ u32 pitch;
+
+ viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
+ pitch = RREG32_SOC15(DCE, 0, mmHUBPREQ0_DCSURF_SURFACE_PITCH);
+ size = (REG_GET_FIELD(viewport,
+ HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
+ REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) *
+ 4);
+ }
+
+ return size;
+}
+
static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb,
.flush_gpu_tlb_pasid = gmc_v10_0_flush_gpu_tlb_pasid,
@@ -648,7 +602,8 @@ static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
.emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping,
.map_mtype = gmc_v10_0_map_mtype,
.get_vm_pde = gmc_v10_0_get_vm_pde,
- .get_vm_pte = gmc_v10_0_get_vm_pte
+ .get_vm_pte = gmc_v10_0_get_vm_pte,
+ .get_vbios_fb_size = gmc_v10_0_get_vbios_fb_size,
};
static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev)
@@ -657,12 +612,51 @@ static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev)
adev->gmc.gmc_funcs = &gmc_v10_0_gmc_funcs;
}
+static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_SIENNA_CICHLID:
+ adev->umc.max_ras_err_cnt_per_query = UMC_V8_7_TOTAL_CHANNEL_NUM;
+ adev->umc.channel_inst_num = UMC_V8_7_CHANNEL_INSTANCE_NUM;
+ adev->umc.umc_inst_num = UMC_V8_7_UMC_INSTANCE_NUM;
+ adev->umc.channel_offs = UMC_V8_7_PER_CHANNEL_OFFSET_SIENNA;
+ adev->umc.channel_idx_tbl = &umc_v8_7_channel_idx_tbl[0][0];
+ adev->umc.funcs = &umc_v8_7_funcs;
+ break;
+ default:
+ break;
+ }
+}
+
+
+static void gmc_v10_0_set_mmhub_funcs(struct amdgpu_device *adev)
+{
+ adev->mmhub.funcs = &mmhub_v2_0_funcs;
+}
+
+static void gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_SIENNA_CICHLID:
+ case CHIP_NAVY_FLOUNDER:
+ adev->gfxhub.funcs = &gfxhub_v2_1_funcs;
+ break;
+ default:
+ adev->gfxhub.funcs = &gfxhub_v2_0_funcs;
+ break;
+ }
+}
+
+
static int gmc_v10_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ gmc_v10_0_set_mmhub_funcs(adev);
+ gmc_v10_0_set_gfxhub_funcs(adev);
gmc_v10_0_set_gmc_funcs(adev);
gmc_v10_0_set_irq_funcs(adev);
+ gmc_v10_0_set_umc_funcs(adev);
adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
adev->gmc.shared_aperture_end =
@@ -685,6 +679,10 @@ static int gmc_v10_0_late_init(void *handle)
if (r)
return r;
+ r = amdgpu_gmc_ras_late_init(adev);
+ if (r)
+ return r;
+
return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
}
@@ -693,11 +691,7 @@ static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
{
u64 base = 0;
- if (adev->asic_type == CHIP_SIENNA_CICHLID ||
- adev->asic_type == CHIP_NAVY_FLOUNDER)
- base = gfxhub_v2_1_get_fb_location(adev);
- else
- base = gfxhub_v2_0_get_fb_location(adev);
+ base = adev->gfxhub.funcs->get_fb_location(adev);
/* add the xgmi offset of the physical node */
base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
@@ -706,11 +700,7 @@ static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
amdgpu_gmc_gart_location(adev, mc);
/* base offset of vram pages */
- if (adev->asic_type == CHIP_SIENNA_CICHLID ||
- adev->asic_type == CHIP_NAVY_FLOUNDER)
- adev->vm_manager.vram_base_offset = gfxhub_v2_1_get_mc_fb_offset(adev);
- else
- adev->vm_manager.vram_base_offset = gfxhub_v2_0_get_mc_fb_offset(adev);
+ adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
/* add the xgmi offset of the physical node */
adev->vm_manager.vram_base_offset +=
@@ -789,48 +779,14 @@ static int gmc_v10_0_gart_init(struct amdgpu_device *adev)
return amdgpu_gart_table_vram_alloc(adev);
}
-static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev)
-{
- u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
- unsigned size;
-
- if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
- size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
- } else {
- u32 viewport;
- u32 pitch;
-
- viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
- pitch = RREG32_SOC15(DCE, 0, mmHUBPREQ0_DCSURF_SURFACE_PITCH);
- size = (REG_GET_FIELD(viewport,
- HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
- REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) *
- 4);
- }
- /* return 0 if the pre-OS buffer uses up most of vram */
- if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024)) {
- DRM_ERROR("Warning: pre-OS buffer uses most of vram, \
- be aware of gart table overwrite\n");
- return 0;
- }
-
- return size;
-}
-
-
-
static int gmc_v10_0_sw_init(void *handle)
{
int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (adev->asic_type == CHIP_SIENNA_CICHLID ||
- adev->asic_type == CHIP_NAVY_FLOUNDER)
- gfxhub_v2_1_init(adev);
- else
- gfxhub_v2_0_init(adev);
+ adev->gfxhub.funcs->init(adev);
- mmhub_v2_0_init(adev);
+ adev->mmhub.funcs->init(adev);
spin_lock_init(&adev->gmc.invalidate_lock);
@@ -878,6 +834,14 @@ static int gmc_v10_0_sw_init(void *handle)
if (r)
return r;
+ if (!amdgpu_sriov_vf(adev)) {
+ /* interrupt sent to DF. */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
+ &adev->gmc.ecc_irq);
+ if (r)
+ return r;
+ }
+
/*
* Set the internal MC address mask This is the max address of the GPU's
* internal address space.
@@ -891,7 +855,7 @@ static int gmc_v10_0_sw_init(void *handle)
}
if (adev->gmc.xgmi.supported) {
- r = gfxhub_v2_1_get_xgmi_info(adev);
+ r = adev->gfxhub.funcs->get_xgmi_info(adev);
if (r)
return r;
}
@@ -900,7 +864,7 @@ static int gmc_v10_0_sw_init(void *handle)
if (r)
return r;
- adev->gmc.stolen_size = gmc_v10_0_get_vbios_fb_size(adev);
+ amdgpu_gmc_get_vbios_allocations(adev);
/* Memory manager */
r = amdgpu_bo_init(adev);
@@ -983,15 +947,11 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
if (r)
return r;
- if (adev->asic_type == CHIP_SIENNA_CICHLID ||
- adev->asic_type == CHIP_NAVY_FLOUNDER)
- r = gfxhub_v2_1_gart_enable(adev);
- else
- r = gfxhub_v2_0_gart_enable(adev);
+ r = adev->gfxhub.funcs->gart_enable(adev);
if (r)
return r;
- r = mmhub_v2_0_gart_enable(adev);
+ r = adev->mmhub.funcs->gart_enable(adev);
if (r)
return r;
@@ -1008,12 +968,8 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
false : true;
- if (adev->asic_type == CHIP_SIENNA_CICHLID ||
- adev->asic_type == CHIP_NAVY_FLOUNDER)
- gfxhub_v2_1_set_fault_enable_default(adev, value);
- else
- gfxhub_v2_0_set_fault_enable_default(adev, value);
- mmhub_v2_0_set_fault_enable_default(adev, value);
+ adev->gfxhub.funcs->set_fault_enable_default(adev, value);
+ adev->mmhub.funcs->set_fault_enable_default(adev, value);
gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0);
gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0);
@@ -1038,6 +994,9 @@ static int gmc_v10_0_hw_init(void *handle)
if (r)
return r;
+ if (adev->umc.funcs && adev->umc.funcs->init_registers)
+ adev->umc.funcs->init_registers(adev);
+
return 0;
}
@@ -1050,12 +1009,8 @@ static int gmc_v10_0_hw_init(void *handle)
*/
static void gmc_v10_0_gart_disable(struct amdgpu_device *adev)
{
- if (adev->asic_type == CHIP_SIENNA_CICHLID ||
- adev->asic_type == CHIP_NAVY_FLOUNDER)
- gfxhub_v2_1_gart_disable(adev);
- else
- gfxhub_v2_0_gart_disable(adev);
- mmhub_v2_0_gart_disable(adev);
+ adev->gfxhub.funcs->gart_disable(adev);
+ adev->mmhub.funcs->gart_disable(adev);
amdgpu_gart_table_vram_unpin(adev);
}
@@ -1069,6 +1024,7 @@ static int gmc_v10_0_hw_fini(void *handle)
return 0;
}
+ amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
gmc_v10_0_gart_disable(adev);
@@ -1121,7 +1077,7 @@ static int gmc_v10_0_set_clockgating_state(void *handle,
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = mmhub_v2_0_set_clockgating(adev, state);
+ r = adev->mmhub.funcs->set_clockgating(adev, state);
if (r)
return r;
@@ -1136,7 +1092,7 @@ static void gmc_v10_0_get_clockgating_state(void *handle, u32 *flags)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- mmhub_v2_0_get_clockgating(adev, flags);
+ adev->mmhub.funcs->get_clockgating(adev, flags);
if (adev->asic_type == CHIP_SIENNA_CICHLID ||
adev->asic_type == CHIP_NAVY_FLOUNDER)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 538e7ee35cdf..95a9117e9564 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -805,16 +805,13 @@ static unsigned gmc_v6_0_get_vbios_fb_size(struct amdgpu_device *adev)
unsigned size;
if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
- size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
+ size = AMDGPU_VBIOS_VGA_ALLOCATION;
} else {
u32 viewport = RREG32(mmVIEWPORT_SIZE);
size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
4);
}
- /* return 0 if the pre-OS buffer uses up most of vram */
- if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
- return 0;
return size;
}
@@ -862,7 +859,7 @@ static int gmc_v6_0_sw_init(void *handle)
if (r)
return r;
- adev->gmc.stolen_size = gmc_v6_0_get_vbios_fb_size(adev);
+ amdgpu_gmc_get_vbios_allocations(adev);
r = amdgpu_bo_init(adev);
if (r)
@@ -1136,6 +1133,7 @@ static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs = {
.set_prt = gmc_v6_0_set_prt,
.get_vm_pde = gmc_v6_0_get_vm_pde,
.get_vm_pte = gmc_v6_0_get_vm_pte,
+ .get_vbios_fb_size = gmc_v6_0_get_vbios_fb_size,
};
static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index e18296dc1386..80c146df338a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -434,7 +434,7 @@ static int gmc_v7_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
int vmid;
unsigned int tmp;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EIO;
for (vmid = 1; vmid < 16; vmid++) {
@@ -970,16 +970,14 @@ static unsigned gmc_v7_0_get_vbios_fb_size(struct amdgpu_device *adev)
unsigned size;
if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
- size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
+ size = AMDGPU_VBIOS_VGA_ALLOCATION;
} else {
u32 viewport = RREG32(mmVIEWPORT_SIZE);
size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
4);
}
- /* return 0 if the pre-OS buffer uses up most of vram */
- if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
- return 0;
+
return size;
}
@@ -1035,7 +1033,7 @@ static int gmc_v7_0_sw_init(void *handle)
if (r)
return r;
- adev->gmc.stolen_size = gmc_v7_0_get_vbios_fb_size(adev);
+ amdgpu_gmc_get_vbios_allocations(adev);
/* Memory manager */
r = amdgpu_bo_init(adev);
@@ -1372,7 +1370,8 @@ static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = {
.emit_pasid_mapping = gmc_v7_0_emit_pasid_mapping,
.set_prt = gmc_v7_0_set_prt,
.get_vm_pde = gmc_v7_0_get_vm_pde,
- .get_vm_pte = gmc_v7_0_get_vm_pte
+ .get_vm_pte = gmc_v7_0_get_vm_pte,
+ .get_vbios_fb_size = gmc_v7_0_get_vbios_fb_size,
};
static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index a9e722b8a458..9ab65ca7df77 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -635,7 +635,7 @@ static int gmc_v8_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
int vmid;
unsigned int tmp;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EIO;
for (vmid = 1; vmid < 16; vmid++) {
@@ -1087,16 +1087,14 @@ static unsigned gmc_v8_0_get_vbios_fb_size(struct amdgpu_device *adev)
unsigned size;
if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
- size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
+ size = AMDGPU_VBIOS_VGA_ALLOCATION;
} else {
u32 viewport = RREG32(mmVIEWPORT_SIZE);
size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
4);
}
- /* return 0 if the pre-OS buffer uses up most of vram */
- if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
- return 0;
+
return size;
}
@@ -1160,7 +1158,7 @@ static int gmc_v8_0_sw_init(void *handle)
if (r)
return r;
- adev->gmc.stolen_size = gmc_v8_0_get_vbios_fb_size(adev);
+ amdgpu_gmc_get_vbios_allocations(adev);
/* Memory manager */
r = amdgpu_bo_init(adev);
@@ -1739,7 +1737,8 @@ static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = {
.emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping,
.set_prt = gmc_v8_0_set_prt,
.get_vm_pde = gmc_v8_0_get_vm_pde,
- .get_vm_pte = gmc_v8_0_get_vm_pte
+ .get_vm_pte = gmc_v8_0_get_vm_pte,
+ .get_vbios_fb_size = gmc_v8_0_get_vbios_fb_size,
};
static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 6e4f3ff4810f..3ebbddb63705 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -67,6 +67,221 @@
#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL
#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
+#define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0 0x049d
+#define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0_BASE_IDX 2
+
+
+static const char *gfxhub_client_ids[] = {
+ "CB",
+ "DB",
+ "IA",
+ "WD",
+ "CPF",
+ "CPC",
+ "CPG",
+ "RLC",
+ "TCP",
+ "SQC (inst)",
+ "SQC (data)",
+ "SQG",
+ "PA",
+};
+
+static const char *mmhub_client_ids_raven[][2] = {
+ [0][0] = "MP1",
+ [1][0] = "MP0",
+ [2][0] = "VCN",
+ [3][0] = "VCNU",
+ [4][0] = "HDP",
+ [5][0] = "DCE",
+ [13][0] = "UTCL2",
+ [19][0] = "TLS",
+ [26][0] = "OSS",
+ [27][0] = "SDMA0",
+ [0][1] = "MP1",
+ [1][1] = "MP0",
+ [2][1] = "VCN",
+ [3][1] = "VCNU",
+ [4][1] = "HDP",
+ [5][1] = "XDP",
+ [6][1] = "DBGU0",
+ [7][1] = "DCE",
+ [8][1] = "DCEDWB0",
+ [9][1] = "DCEDWB1",
+ [26][1] = "OSS",
+ [27][1] = "SDMA0",
+};
+
+static const char *mmhub_client_ids_renoir[][2] = {
+ [0][0] = "MP1",
+ [1][0] = "MP0",
+ [2][0] = "HDP",
+ [4][0] = "DCEDMC",
+ [5][0] = "DCEVGA",
+ [13][0] = "UTCL2",
+ [19][0] = "TLS",
+ [26][0] = "OSS",
+ [27][0] = "SDMA0",
+ [28][0] = "VCN",
+ [29][0] = "VCNU",
+ [30][0] = "JPEG",
+ [0][1] = "MP1",
+ [1][1] = "MP0",
+ [2][1] = "HDP",
+ [3][1] = "XDP",
+ [6][1] = "DBGU0",
+ [7][1] = "DCEDMC",
+ [8][1] = "DCEVGA",
+ [9][1] = "DCEDWB",
+ [26][1] = "OSS",
+ [27][1] = "SDMA0",
+ [28][1] = "VCN",
+ [29][1] = "VCNU",
+ [30][1] = "JPEG",
+};
+
+static const char *mmhub_client_ids_vega10[][2] = {
+ [0][0] = "MP0",
+ [1][0] = "UVD",
+ [2][0] = "UVDU",
+ [3][0] = "HDP",
+ [13][0] = "UTCL2",
+ [14][0] = "OSS",
+ [15][0] = "SDMA1",
+ [32+0][0] = "VCE0",
+ [32+1][0] = "VCE0U",
+ [32+2][0] = "XDMA",
+ [32+3][0] = "DCE",
+ [32+4][0] = "MP1",
+ [32+14][0] = "SDMA0",
+ [0][1] = "MP0",
+ [1][1] = "UVD",
+ [2][1] = "UVDU",
+ [3][1] = "DBGU0",
+ [4][1] = "HDP",
+ [5][1] = "XDP",
+ [14][1] = "OSS",
+ [15][1] = "SDMA0",
+ [32+0][1] = "VCE0",
+ [32+1][1] = "VCE0U",
+ [32+2][1] = "XDMA",
+ [32+3][1] = "DCE",
+ [32+4][1] = "DCEDWB",
+ [32+5][1] = "MP1",
+ [32+6][1] = "DBGU1",
+ [32+14][1] = "SDMA1",
+};
+
+static const char *mmhub_client_ids_vega12[][2] = {
+ [0][0] = "MP0",
+ [1][0] = "VCE0",
+ [2][0] = "VCE0U",
+ [3][0] = "HDP",
+ [13][0] = "UTCL2",
+ [14][0] = "OSS",
+ [15][0] = "SDMA1",
+ [32+0][0] = "DCE",
+ [32+1][0] = "XDMA",
+ [32+2][0] = "UVD",
+ [32+3][0] = "UVDU",
+ [32+4][0] = "MP1",
+ [32+15][0] = "SDMA0",
+ [0][1] = "MP0",
+ [1][1] = "VCE0",
+ [2][1] = "VCE0U",
+ [3][1] = "DBGU0",
+ [4][1] = "HDP",
+ [5][1] = "XDP",
+ [14][1] = "OSS",
+ [15][1] = "SDMA0",
+ [32+0][1] = "DCE",
+ [32+1][1] = "DCEDWB",
+ [32+2][1] = "XDMA",
+ [32+3][1] = "UVD",
+ [32+4][1] = "UVDU",
+ [32+5][1] = "MP1",
+ [32+6][1] = "DBGU1",
+ [32+15][1] = "SDMA1",
+};
+
+static const char *mmhub_client_ids_vega20[][2] = {
+ [0][0] = "XDMA",
+ [1][0] = "DCE",
+ [2][0] = "VCE0",
+ [3][0] = "VCE0U",
+ [4][0] = "UVD",
+ [5][0] = "UVD1U",
+ [13][0] = "OSS",
+ [14][0] = "HDP",
+ [15][0] = "SDMA0",
+ [32+0][0] = "UVD",
+ [32+1][0] = "UVDU",
+ [32+2][0] = "MP1",
+ [32+3][0] = "MP0",
+ [32+12][0] = "UTCL2",
+ [32+14][0] = "SDMA1",
+ [0][1] = "XDMA",
+ [1][1] = "DCE",
+ [2][1] = "DCEDWB",
+ [3][1] = "VCE0",
+ [4][1] = "VCE0U",
+ [5][1] = "UVD1",
+ [6][1] = "UVD1U",
+ [7][1] = "DBGU0",
+ [8][1] = "XDP",
+ [13][1] = "OSS",
+ [14][1] = "HDP",
+ [15][1] = "SDMA0",
+ [32+0][1] = "UVD",
+ [32+1][1] = "UVDU",
+ [32+2][1] = "DBGU1",
+ [32+3][1] = "MP1",
+ [32+4][1] = "MP0",
+ [32+14][1] = "SDMA1",
+};
+
+static const char *mmhub_client_ids_arcturus[][2] = {
+ [2][0] = "MP1",
+ [3][0] = "MP0",
+ [10][0] = "UTCL2",
+ [13][0] = "OSS",
+ [14][0] = "HDP",
+ [15][0] = "SDMA0",
+ [32+15][0] = "SDMA1",
+ [64+15][0] = "SDMA2",
+ [96+15][0] = "SDMA3",
+ [128+15][0] = "SDMA4",
+ [160+11][0] = "JPEG",
+ [160+12][0] = "VCN",
+ [160+13][0] = "VCNU",
+ [160+15][0] = "SDMA5",
+ [192+10][0] = "UTCL2",
+ [192+11][0] = "JPEG1",
+ [192+12][0] = "VCN1",
+ [192+13][0] = "VCN1U",
+ [192+15][0] = "SDMA6",
+ [224+15][0] = "SDMA7",
+ [0][1] = "DBGU1",
+ [1][1] = "XDP",
+ [2][1] = "MP1",
+ [3][1] = "MP0",
+ [13][1] = "OSS",
+ [14][1] = "HDP",
+ [15][1] = "SDMA0",
+ [32+15][1] = "SDMA1",
+ [64+15][1] = "SDMA2",
+ [96+15][1] = "SDMA3",
+ [128+15][1] = "SDMA4",
+ [160+11][1] = "JPEG",
+ [160+12][1] = "VCN",
+ [160+13][1] = "VCNU",
+ [160+15][1] = "SDMA5",
+ [192+11][1] = "JPEG1",
+ [192+12][1] = "VCN1",
+ [192+13][1] = "VCN1U",
+ [192+15][1] = "SDMA6",
+ [224+15][1] = "SDMA7",
+};
static const u32 golden_settings_vega10_hdp[] =
{
@@ -300,9 +515,10 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
{
struct amdgpu_vmhub *hub;
bool retry_fault = !!(entry->src_data[1] & 0x80);
- uint32_t status = 0;
+ uint32_t status = 0, cid = 0, rw = 0;
u64 addr;
char hub_name[10];
+ const char *mmhub_cid;
addr = (u64)entry->src_data[0] << 12;
addr |= ((u64)entry->src_data[1] & 0xf) << 44;
@@ -337,6 +553,10 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
RREG32(hub->vm_l2_pro_fault_status);
status = RREG32(hub->vm_l2_pro_fault_status);
+ cid = REG_GET_FIELD(status,
+ VM_L2_PROTECTION_FAULT_STATUS, CID);
+ rw = REG_GET_FIELD(status,
+ VM_L2_PROTECTION_FAULT_STATUS, RW);
WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
}
@@ -359,9 +579,37 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
dev_err(adev->dev,
"VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
status);
- dev_err(adev->dev, "\t Faulty UTCL2 client ID: 0x%lx\n",
- REG_GET_FIELD(status,
- VM_L2_PROTECTION_FAULT_STATUS, CID));
+ if (hub == &adev->vmhub[AMDGPU_GFXHUB_0]) {
+ dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
+ cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" : gfxhub_client_ids[cid],
+ cid);
+ } else {
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ mmhub_cid = mmhub_client_ids_vega10[cid][rw];
+ break;
+ case CHIP_VEGA12:
+ mmhub_cid = mmhub_client_ids_vega12[cid][rw];
+ break;
+ case CHIP_VEGA20:
+ mmhub_cid = mmhub_client_ids_vega20[cid][rw];
+ break;
+ case CHIP_ARCTURUS:
+ mmhub_cid = mmhub_client_ids_arcturus[cid][rw];
+ break;
+ case CHIP_RAVEN:
+ mmhub_cid = mmhub_client_ids_raven[cid][rw];
+ break;
+ case CHIP_RENOIR:
+ mmhub_cid = mmhub_client_ids_renoir[cid][rw];
+ break;
+ default:
+ mmhub_cid = NULL;
+ break;
+ }
+ dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
+ mmhub_cid ? mmhub_cid : "unknown", cid);
+ }
dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
REG_GET_FIELD(status,
VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
@@ -374,10 +622,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
REG_GET_FIELD(status,
VM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
- dev_err(adev->dev, "\t RW: 0x%lx\n",
- REG_GET_FIELD(status,
- VM_L2_PROTECTION_FAULT_STATUS, RW));
-
+ dev_err(adev->dev, "\t RW: 0x%x\n", rw);
}
}
@@ -500,13 +745,14 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
* as GFXOFF under bare metal
*/
if (adev->gfx.kiq.ring.sched.ready &&
- (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
- !adev->in_gpu_reset) {
+ (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
+ down_read_trylock(&adev->reset_sem)) {
uint32_t req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
uint32_t ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
1 << vmid);
+ up_read(&adev->reset_sem);
return;
}
@@ -596,10 +842,10 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EIO;
- if (ring->sched.ready) {
+ if (ring->sched.ready && down_read_trylock(&adev->reset_sem)) {
/* Vega20+XGMI caches PTEs in TC and TLB. Add a
* heavy-weight TLB flush (type 2), which flushes
* both. Due to a race condition with concurrent
@@ -626,6 +872,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
if (r) {
amdgpu_ring_undo(ring);
spin_unlock(&adev->gfx.kiq.ring_lock);
+ up_read(&adev->reset_sem);
return -ETIME;
}
@@ -633,10 +880,11 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
spin_unlock(&adev->gfx.kiq.ring_lock);
r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
if (r < 1) {
- DRM_ERROR("wait for kiq fence error: %ld.\n", r);
+ dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
+ up_read(&adev->reset_sem);
return -ETIME;
}
-
+ up_read(&adev->reset_sem);
return 0;
}
@@ -826,6 +1074,41 @@ static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
*flags |= AMDGPU_PTE_SNOOPED;
}
+static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
+{
+ u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
+ unsigned size;
+
+ if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
+ size = AMDGPU_VBIOS_VGA_ALLOCATION;
+ } else {
+ u32 viewport;
+
+ switch (adev->asic_type) {
+ case CHIP_RAVEN:
+ case CHIP_RENOIR:
+ viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
+ size = (REG_GET_FIELD(viewport,
+ HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
+ REG_GET_FIELD(viewport,
+ HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
+ 4);
+ break;
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
+ case CHIP_VEGA20:
+ default:
+ viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
+ size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
+ REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
+ 4);
+ break;
+ }
+ }
+
+ return size;
+}
+
static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
.flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid,
@@ -833,7 +1116,8 @@ static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
.map_mtype = gmc_v9_0_map_mtype,
.get_vm_pde = gmc_v9_0_get_vm_pde,
- .get_vm_pte = gmc_v9_0_get_vm_pte
+ .get_vm_pte = gmc_v9_0_get_vm_pte,
+ .get_vbios_fb_size = gmc_v9_0_get_vbios_fb_size,
};
static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
@@ -871,13 +1155,24 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
- case CHIP_VEGA20:
+ case CHIP_ARCTURUS:
+ adev->mmhub.funcs = &mmhub_v9_4_funcs;
+ break;
+ default:
adev->mmhub.funcs = &mmhub_v1_0_funcs;
break;
+ }
+}
+
+static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
case CHIP_ARCTURUS:
- adev->mmhub.funcs = &mmhub_v9_4_funcs;
+ case CHIP_VEGA20:
+ adev->gfxhub.funcs = &gfxhub_v1_1_funcs;
break;
default:
+ adev->gfxhub.funcs = &gfxhub_v1_0_funcs;
break;
}
}
@@ -890,6 +1185,7 @@ static int gmc_v9_0_early_init(void *handle)
gmc_v9_0_set_irq_funcs(adev);
gmc_v9_0_set_umc_funcs(adev);
gmc_v9_0_set_mmhub_funcs(adev);
+ gmc_v9_0_set_gfxhub_funcs(adev);
adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
adev->gmc.shared_aperture_end =
@@ -901,57 +1197,26 @@ static int gmc_v9_0_early_init(void *handle)
return 0;
}
-static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev)
-{
-
- /*
- * TODO:
- * Currently there is a bug where some memory client outside
- * of the driver writes to first 8M of VRAM on S3 resume,
- * this overrides GART which by default gets placed in first 8M and
- * causes VM_FAULTS once GTT is accessed.
- * Keep the stolen memory reservation until the while this is not solved.
- * Also check code in gmc_v9_0_get_vbios_fb_size and gmc_v9_0_late_init
- */
- switch (adev->asic_type) {
- case CHIP_VEGA10:
- case CHIP_RAVEN:
- case CHIP_ARCTURUS:
- case CHIP_RENOIR:
- return true;
- case CHIP_VEGA12:
- case CHIP_VEGA20:
- default:
- return false;
- }
-}
-
static int gmc_v9_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
- if (!gmc_v9_0_keep_stolen_memory(adev))
- amdgpu_bo_late_init(adev);
+ amdgpu_bo_late_init(adev);
r = amdgpu_gmc_allocate_vm_inv_eng(adev);
if (r)
return r;
- /* Check if ecc is available */
+
+ /*
+ * Workaround performance drop issue with VBIOS enables partial
+ * writes, while disables HBM ECC for vega10.
+ */
if (!amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_VEGA10)) {
- r = amdgpu_atomfirmware_mem_ecc_supported(adev);
- if (!r) {
- DRM_INFO("ECC is not present.\n");
+ if (!(adev->ras_features & (1 << AMDGPU_RAS_BLOCK__UMC))) {
if (adev->df.funcs->enable_ecc_force_par_wr_rmw)
adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
- } else
- DRM_INFO("ECC is active.\n");
-
- r = amdgpu_atomfirmware_sram_ecc_supported(adev);
- if (!r)
- DRM_INFO("SRAM ECC is not present.\n");
- else
- DRM_INFO("SRAM ECC is active.\n");
+ }
}
if (adev->mmhub.funcs && adev->mmhub.funcs->reset_ras_error_count)
@@ -969,10 +1234,8 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
{
u64 base = 0;
- if (adev->asic_type == CHIP_ARCTURUS)
- base = mmhub_v9_4_get_fb_location(adev);
- else if (!amdgpu_sriov_vf(adev))
- base = mmhub_v1_0_get_fb_location(adev);
+ if (!amdgpu_sriov_vf(adev))
+ base = adev->mmhub.funcs->get_fb_location(adev);
/* add the xgmi offset of the physical node */
base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
@@ -980,7 +1243,7 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
amdgpu_gmc_gart_location(adev, mc);
amdgpu_gmc_agp_location(adev, mc);
/* base offset of vram pages */
- adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
+ adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
/* XXX: add the xgmi offset of the physical node? */
adev->vm_manager.vram_base_offset +=
@@ -1015,7 +1278,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
#ifdef CONFIG_X86_64
if (adev->flags & AMD_IS_APU) {
- adev->gmc.aper_base = gfxhub_v1_0_get_mc_fb_offset(adev);
+ adev->gmc.aper_base = adev->gfxhub.funcs->get_mc_fb_offset(adev);
adev->gmc.aper_size = adev->gmc.real_vram_size;
}
#endif
@@ -1066,50 +1329,18 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
return amdgpu_gart_table_vram_alloc(adev);
}
-static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
+/**
+ * gmc_v9_0_save_registers - saves regs
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * This saves potential register values that should be
+ * restored upon resume
+ */
+static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
{
- u32 d1vga_control;
- unsigned size;
-
- /*
- * TODO Remove once GART corruption is resolved
- * Check related code in gmc_v9_0_sw_fini
- * */
- if (gmc_v9_0_keep_stolen_memory(adev))
- return 9 * 1024 * 1024;
-
- d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
- if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
- size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
- } else {
- u32 viewport;
-
- switch (adev->asic_type) {
- case CHIP_RAVEN:
- case CHIP_RENOIR:
- viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
- size = (REG_GET_FIELD(viewport,
- HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
- REG_GET_FIELD(viewport,
- HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
- 4);
- break;
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_VEGA20:
- default:
- viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
- size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
- REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
- 4);
- break;
- }
- }
- /* return 0 if the pre-OS buffer uses up most of vram */
- if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
- return 0;
-
- return size;
+ if (adev->asic_type == CHIP_RAVEN)
+ adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
}
static int gmc_v9_0_sw_init(void *handle)
@@ -1117,11 +1348,9 @@ static int gmc_v9_0_sw_init(void *handle)
int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- gfxhub_v1_0_init(adev);
- if (adev->asic_type == CHIP_ARCTURUS)
- mmhub_v9_4_init(adev);
- else
- mmhub_v1_0_init(adev);
+ adev->gfxhub.funcs->init(adev);
+
+ adev->mmhub.funcs->init(adev);
spin_lock_init(&adev->gmc.invalidate_lock);
@@ -1233,7 +1462,7 @@ static int gmc_v9_0_sw_init(void *handle)
adev->need_swiotlb = drm_need_swiotlb(44);
if (adev->gmc.xgmi.supported) {
- r = gfxhub_v1_1_get_xgmi_info(adev);
+ r = adev->gfxhub.funcs->get_xgmi_info(adev);
if (r)
return r;
}
@@ -1242,7 +1471,7 @@ static int gmc_v9_0_sw_init(void *handle)
if (r)
return r;
- adev->gmc.stolen_size = gmc_v9_0_get_vbios_fb_size(adev);
+ amdgpu_gmc_get_vbios_allocations(adev);
/* Memory manager */
r = amdgpu_bo_init(adev);
@@ -1268,21 +1497,18 @@ static int gmc_v9_0_sw_init(void *handle)
amdgpu_vm_manager_init(adev);
+ gmc_v9_0_save_registers(adev);
+
return 0;
}
static int gmc_v9_0_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- void *stolen_vga_buf;
amdgpu_gmc_ras_fini(adev);
amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev);
-
- if (gmc_v9_0_keep_stolen_memory(adev))
- amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, &stolen_vga_buf);
-
amdgpu_gart_table_vram_free(adev);
amdgpu_bo_fini(adev);
amdgpu_gart_fini(adev);
@@ -1297,7 +1523,7 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
case CHIP_VEGA10:
if (amdgpu_sriov_vf(adev))
break;
- /* fall through */
+ fallthrough;
case CHIP_VEGA20:
soc15_program_register_sequence(adev,
golden_settings_mmhub_1_0_0,
@@ -1326,10 +1552,13 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
*
* This restores register values, saved at suspend.
*/
-static void gmc_v9_0_restore_registers(struct amdgpu_device *adev)
+void gmc_v9_0_restore_registers(struct amdgpu_device *adev)
{
- if (adev->asic_type == CHIP_RAVEN)
- WREG32(mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register);
+ if (adev->asic_type == CHIP_RAVEN) {
+ WREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register);
+ WARN_ON(adev->gmc.sdpif_register !=
+ RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0));
+ }
}
/**
@@ -1349,14 +1578,11 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
if (r)
return r;
- r = gfxhub_v1_0_gart_enable(adev);
+ r = adev->gfxhub.funcs->gart_enable(adev);
if (r)
return r;
- if (adev->asic_type == CHIP_ARCTURUS)
- r = mmhub_v9_4_gart_enable(adev);
- else
- r = mmhub_v1_0_gart_enable(adev);
+ r = adev->mmhub.funcs->gart_enable(adev);
if (r)
return r;
@@ -1391,11 +1617,10 @@ static int gmc_v9_0_hw_init(void *handle)
golden_settings_vega10_hdp,
ARRAY_SIZE(golden_settings_vega10_hdp));
+ if (adev->mmhub.funcs->update_power_gating)
+ adev->mmhub.funcs->update_power_gating(adev, true);
+
switch (adev->asic_type) {
- case CHIP_RAVEN:
- /* TODO for renoir */
- mmhub_v1_0_update_power_gating(adev, true);
- break;
case CHIP_ARCTURUS:
WREG32_FIELD15(HDP, 0, HDP_MMHUB_CNTL, HDP_MMHUB_GCC, 1);
break;
@@ -1420,11 +1645,8 @@ static int gmc_v9_0_hw_init(void *handle)
value = true;
if (!amdgpu_sriov_vf(adev)) {
- gfxhub_v1_0_set_fault_enable_default(adev, value);
- if (adev->asic_type == CHIP_ARCTURUS)
- mmhub_v9_4_set_fault_enable_default(adev, value);
- else
- mmhub_v1_0_set_fault_enable_default(adev, value);
+ adev->gfxhub.funcs->set_fault_enable_default(adev, value);
+ adev->mmhub.funcs->set_fault_enable_default(adev, value);
}
for (i = 0; i < adev->num_vmhubs; ++i)
gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
@@ -1438,20 +1660,6 @@ static int gmc_v9_0_hw_init(void *handle)
}
/**
- * gmc_v9_0_save_registers - saves regs
- *
- * @adev: amdgpu_device pointer
- *
- * This saves potential register values that should be
- * restored upon resume
- */
-static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
-{
- if (adev->asic_type == CHIP_RAVEN)
- adev->gmc.sdpif_register = RREG32(mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
-}
-
-/**
* gmc_v9_0_gart_disable - gart disable
*
* @adev: amdgpu_device pointer
@@ -1460,11 +1668,8 @@ static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
*/
static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
{
- gfxhub_v1_0_gart_disable(adev);
- if (adev->asic_type == CHIP_ARCTURUS)
- mmhub_v9_4_gart_disable(adev);
- else
- mmhub_v1_0_gart_disable(adev);
+ adev->gfxhub.funcs->gart_disable(adev);
+ adev->mmhub.funcs->gart_disable(adev);
amdgpu_gart_table_vram_unpin(adev);
}
@@ -1487,16 +1692,9 @@ static int gmc_v9_0_hw_fini(void *handle)
static int gmc_v9_0_suspend(void *handle)
{
- int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = gmc_v9_0_hw_fini(adev);
- if (r)
- return r;
-
- gmc_v9_0_save_registers(adev);
-
- return 0;
+ return gmc_v9_0_hw_fini(adev);
}
static int gmc_v9_0_resume(void *handle)
@@ -1504,7 +1702,6 @@ static int gmc_v9_0_resume(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- gmc_v9_0_restore_registers(adev);
r = gmc_v9_0_hw_init(adev);
if (r)
return r;
@@ -1537,10 +1734,7 @@ static int gmc_v9_0_set_clockgating_state(void *handle,
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (adev->asic_type == CHIP_ARCTURUS)
- mmhub_v9_4_set_clockgating(adev, state);
- else
- mmhub_v1_0_set_clockgating(adev, state);
+ adev->mmhub.funcs->set_clockgating(adev, state);
athub_v1_0_set_clockgating(adev, state);
@@ -1551,10 +1745,7 @@ static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (adev->asic_type == CHIP_ARCTURUS)
- mmhub_v9_4_get_clockgating(adev, flags);
- else
- mmhub_v1_0_get_clockgating(adev, flags);
+ adev->mmhub.funcs->get_clockgating(adev, flags);
athub_v1_0_get_clockgating(adev, flags);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h
index e0585e8c6c1b..c415c439f690 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h
@@ -26,4 +26,6 @@
extern const struct amd_ip_funcs gmc_v9_0_ip_funcs;
extern const struct amdgpu_ip_block_version gmc_v9_0_ip_block;
+
+void gmc_v9_0_restore_registers(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
index bc300283b6ab..c600b61b5f45 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
@@ -33,6 +33,7 @@
static void jpeg_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v1_0_set_irq_funcs(struct amdgpu_device *adev);
+static void jpeg_v1_0_ring_begin_use(struct amdgpu_ring *ring);
static void jpeg_v1_0_decode_ring_patch_wreg(struct amdgpu_ring *ring, uint32_t *ptr, uint32_t reg_offset, uint32_t val)
{
@@ -564,8 +565,8 @@ static const struct amdgpu_ring_funcs jpeg_v1_0_decode_ring_vm_funcs = {
.insert_start = jpeg_v1_0_decode_ring_insert_start,
.insert_end = jpeg_v1_0_decode_ring_insert_end,
.pad_ib = amdgpu_ring_generic_pad_ib,
- .begin_use = vcn_v1_0_ring_begin_use,
- .end_use = amdgpu_vcn_ring_end_use,
+ .begin_use = jpeg_v1_0_ring_begin_use,
+ .end_use = vcn_v1_0_ring_end_use,
.emit_wreg = jpeg_v1_0_decode_ring_emit_wreg,
.emit_reg_wait = jpeg_v1_0_decode_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
@@ -586,3 +587,22 @@ static void jpeg_v1_0_set_irq_funcs(struct amdgpu_device *adev)
{
adev->jpeg.inst->irq.funcs = &jpeg_v1_0_irq_funcs;
}
+
+static void jpeg_v1_0_ring_begin_use(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
+ int cnt = 0;
+
+ mutex_lock(&adev->vcn.vcn1_jpeg1_workaround);
+
+ if (amdgpu_fence_wait_empty(&adev->vcn.inst->ring_dec))
+ DRM_ERROR("JPEG dec: vcn dec ring may not be empty\n");
+
+ for (cnt = 0; cnt < adev->vcn.num_enc_rings; cnt++) {
+ if (amdgpu_fence_wait_empty(&adev->vcn.inst->ring_enc[cnt]))
+ DRM_ERROR("JPEG dec: vcn enc ring[%d] may not be empty\n", cnt);
+ }
+
+ vcn_v1_0_set_pg_for_begin_use(ring, set_clocks);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
index 7a51c615d22d..845306f63cdb 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
@@ -55,22 +55,18 @@ static int amdgpu_ih_clientid_jpeg[] = {
static int jpeg_v2_5_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (adev->asic_type == CHIP_ARCTURUS) {
- u32 harvest;
- int i;
-
- adev->jpeg.num_jpeg_inst = JPEG25_MAX_HW_INSTANCES_ARCTURUS;
- for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
- harvest = RREG32_SOC15(JPEG, i, mmCC_UVD_HARVESTING);
- if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
- adev->jpeg.harvest_config |= 1 << i;
- }
+ u32 harvest;
+ int i;
- if (adev->jpeg.harvest_config == (AMDGPU_JPEG_HARVEST_JPEG0 |
- AMDGPU_JPEG_HARVEST_JPEG1))
- return -ENOENT;
- } else
- adev->jpeg.num_jpeg_inst = 1;
+ adev->jpeg.num_jpeg_inst = JPEG25_MAX_HW_INSTANCES_ARCTURUS;
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
+ harvest = RREG32_SOC15(JPEG, i, mmCC_UVD_HARVESTING);
+ if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
+ adev->jpeg.harvest_config |= 1 << i;
+ }
+ if (adev->jpeg.harvest_config == (AMDGPU_JPEG_HARVEST_JPEG0 |
+ AMDGPU_JPEG_HARVEST_JPEG1))
+ return -ENOENT;
jpeg_v2_5_set_dec_ring_funcs(adev);
jpeg_v2_5_set_irq_funcs(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
index c41e5590a701..3a0dff53654d 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
@@ -460,15 +460,10 @@ static bool jpeg_v3_0_is_idle(void *handle)
static int jpeg_v3_0_wait_for_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- int ret;
- ret = SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_JRBC_STATUS,
+ return SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_JRBC_STATUS,
UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
- if (ret)
- return ret;
-
- return ret;
}
static int jpeg_v3_0_set_clockgating_state(void *handle,
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
deleted file mode 100644
index 4b3faaccecb9..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ /dev/null
@@ -1,3382 +0,0 @@
-/*
- * Copyright 2013 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "amdgpu.h"
-#include "amdgpu_pm.h"
-#include "cikd.h"
-#include "atom.h"
-#include "amdgpu_atombios.h"
-#include "amdgpu_dpm.h"
-#include "kv_dpm.h"
-#include "gfx_v7_0.h"
-#include <linux/seq_file.h>
-
-#include "smu/smu_7_0_0_d.h"
-#include "smu/smu_7_0_0_sh_mask.h"
-
-#include "gca/gfx_7_2_d.h"
-#include "gca/gfx_7_2_sh_mask.h"
-
-#define KV_MAX_DEEPSLEEP_DIVIDER_ID 5
-#define KV_MINIMUM_ENGINE_CLOCK 800
-#define SMC_RAM_END 0x40000
-
-static const struct amd_pm_funcs kv_dpm_funcs;
-
-static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev);
-static int kv_enable_nb_dpm(struct amdgpu_device *adev,
- bool enable);
-static void kv_init_graphics_levels(struct amdgpu_device *adev);
-static int kv_calculate_ds_divider(struct amdgpu_device *adev);
-static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev);
-static int kv_calculate_dpm_settings(struct amdgpu_device *adev);
-static void kv_enable_new_levels(struct amdgpu_device *adev);
-static void kv_program_nbps_index_settings(struct amdgpu_device *adev,
- struct amdgpu_ps *new_rps);
-static int kv_set_enabled_level(struct amdgpu_device *adev, u32 level);
-static int kv_set_enabled_levels(struct amdgpu_device *adev);
-static int kv_force_dpm_highest(struct amdgpu_device *adev);
-static int kv_force_dpm_lowest(struct amdgpu_device *adev);
-static void kv_apply_state_adjust_rules(struct amdgpu_device *adev,
- struct amdgpu_ps *new_rps,
- struct amdgpu_ps *old_rps);
-static int kv_set_thermal_temperature_range(struct amdgpu_device *adev,
- int min_temp, int max_temp);
-static int kv_init_fps_limits(struct amdgpu_device *adev);
-
-static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate);
-static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate);
-
-
-static u32 kv_convert_vid2_to_vid7(struct amdgpu_device *adev,
- struct sumo_vid_mapping_table *vid_mapping_table,
- u32 vid_2bit)
-{
- struct amdgpu_clock_voltage_dependency_table *vddc_sclk_table =
- &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
- u32 i;
-
- if (vddc_sclk_table && vddc_sclk_table->count) {
- if (vid_2bit < vddc_sclk_table->count)
- return vddc_sclk_table->entries[vid_2bit].v;
- else
- return vddc_sclk_table->entries[vddc_sclk_table->count - 1].v;
- } else {
- for (i = 0; i < vid_mapping_table->num_entries; i++) {
- if (vid_mapping_table->entries[i].vid_2bit == vid_2bit)
- return vid_mapping_table->entries[i].vid_7bit;
- }
- return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit;
- }
-}
-
-static u32 kv_convert_vid7_to_vid2(struct amdgpu_device *adev,
- struct sumo_vid_mapping_table *vid_mapping_table,
- u32 vid_7bit)
-{
- struct amdgpu_clock_voltage_dependency_table *vddc_sclk_table =
- &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
- u32 i;
-
- if (vddc_sclk_table && vddc_sclk_table->count) {
- for (i = 0; i < vddc_sclk_table->count; i++) {
- if (vddc_sclk_table->entries[i].v == vid_7bit)
- return i;
- }
- return vddc_sclk_table->count - 1;
- } else {
- for (i = 0; i < vid_mapping_table->num_entries; i++) {
- if (vid_mapping_table->entries[i].vid_7bit == vid_7bit)
- return vid_mapping_table->entries[i].vid_2bit;
- }
-
- return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit;
- }
-}
-
-static void sumo_take_smu_control(struct amdgpu_device *adev, bool enable)
-{
-/* This bit selects who handles display phy powergating.
- * Clear the bit to let atom handle it.
- * Set it to let the driver handle it.
- * For now we just let atom handle it.
- */
-#if 0
- u32 v = RREG32(mmDOUT_SCRATCH3);
-
- if (enable)
- v |= 0x4;
- else
- v &= 0xFFFFFFFB;
-
- WREG32(mmDOUT_SCRATCH3, v);
-#endif
-}
-
-static void sumo_construct_sclk_voltage_mapping_table(struct amdgpu_device *adev,
- struct sumo_sclk_voltage_mapping_table *sclk_voltage_mapping_table,
- ATOM_AVAILABLE_SCLK_LIST *table)
-{
- u32 i;
- u32 n = 0;
- u32 prev_sclk = 0;
-
- for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) {
- if (table[i].ulSupportedSCLK > prev_sclk) {
- sclk_voltage_mapping_table->entries[n].sclk_frequency =
- table[i].ulSupportedSCLK;
- sclk_voltage_mapping_table->entries[n].vid_2bit =
- table[i].usVoltageIndex;
- prev_sclk = table[i].ulSupportedSCLK;
- n++;
- }
- }
-
- sclk_voltage_mapping_table->num_max_dpm_entries = n;
-}
-
-static void sumo_construct_vid_mapping_table(struct amdgpu_device *adev,
- struct sumo_vid_mapping_table *vid_mapping_table,
- ATOM_AVAILABLE_SCLK_LIST *table)
-{
- u32 i, j;
-
- for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) {
- if (table[i].ulSupportedSCLK != 0) {
- vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit =
- table[i].usVoltageID;
- vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit =
- table[i].usVoltageIndex;
- }
- }
-
- for (i = 0; i < SUMO_MAX_NUMBER_VOLTAGES; i++) {
- if (vid_mapping_table->entries[i].vid_7bit == 0) {
- for (j = i + 1; j < SUMO_MAX_NUMBER_VOLTAGES; j++) {
- if (vid_mapping_table->entries[j].vid_7bit != 0) {
- vid_mapping_table->entries[i] =
- vid_mapping_table->entries[j];
- vid_mapping_table->entries[j].vid_7bit = 0;
- break;
- }
- }
-
- if (j == SUMO_MAX_NUMBER_VOLTAGES)
- break;
- }
- }
-
- vid_mapping_table->num_entries = i;
-}
-
-#if 0
-static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] =
-{
- { 0, 4, 1 },
- { 1, 4, 1 },
- { 2, 5, 1 },
- { 3, 4, 2 },
- { 4, 1, 1 },
- { 5, 5, 2 },
- { 6, 6, 1 },
- { 7, 9, 2 },
- { 0xffffffff }
-};
-
-static const struct kv_lcac_config_values mc0_local_cac_cfg_kv[] =
-{
- { 0, 4, 1 },
- { 0xffffffff }
-};
-
-static const struct kv_lcac_config_values mc1_local_cac_cfg_kv[] =
-{
- { 0, 4, 1 },
- { 0xffffffff }
-};
-
-static const struct kv_lcac_config_values mc2_local_cac_cfg_kv[] =
-{
- { 0, 4, 1 },
- { 0xffffffff }
-};
-
-static const struct kv_lcac_config_values mc3_local_cac_cfg_kv[] =
-{
- { 0, 4, 1 },
- { 0xffffffff }
-};
-
-static const struct kv_lcac_config_values cpl_local_cac_cfg_kv[] =
-{
- { 0, 4, 1 },
- { 1, 4, 1 },
- { 2, 5, 1 },
- { 3, 4, 1 },
- { 4, 1, 1 },
- { 5, 5, 1 },
- { 6, 6, 1 },
- { 7, 9, 1 },
- { 8, 4, 1 },
- { 9, 2, 1 },
- { 10, 3, 1 },
- { 11, 6, 1 },
- { 12, 8, 2 },
- { 13, 1, 1 },
- { 14, 2, 1 },
- { 15, 3, 1 },
- { 16, 1, 1 },
- { 17, 4, 1 },
- { 18, 3, 1 },
- { 19, 1, 1 },
- { 20, 8, 1 },
- { 21, 5, 1 },
- { 22, 1, 1 },
- { 23, 1, 1 },
- { 24, 4, 1 },
- { 27, 6, 1 },
- { 28, 1, 1 },
- { 0xffffffff }
-};
-
-static const struct kv_lcac_config_reg sx0_cac_config_reg[] =
-{
- { 0xc0400d00, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
-};
-
-static const struct kv_lcac_config_reg mc0_cac_config_reg[] =
-{
- { 0xc0400d30, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
-};
-
-static const struct kv_lcac_config_reg mc1_cac_config_reg[] =
-{
- { 0xc0400d3c, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
-};
-
-static const struct kv_lcac_config_reg mc2_cac_config_reg[] =
-{
- { 0xc0400d48, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
-};
-
-static const struct kv_lcac_config_reg mc3_cac_config_reg[] =
-{
- { 0xc0400d54, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
-};
-
-static const struct kv_lcac_config_reg cpl_cac_config_reg[] =
-{
- { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
-};
-#endif
-
-static const struct kv_pt_config_reg didt_config_kv[] =
-{
- { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
- { 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
- { 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
- { 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
- { 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
- { 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
- { 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
- { 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
- { 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
- { 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
- { 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
- { 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
- { 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
- { 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
- { 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
- { 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
- { 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
- { 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
- { 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
- { 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
- { 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
- { 0xFFFFFFFF }
-};
-
-static struct kv_ps *kv_get_ps(struct amdgpu_ps *rps)
-{
- struct kv_ps *ps = rps->ps_priv;
-
- return ps;
-}
-
-static struct kv_power_info *kv_get_pi(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = adev->pm.dpm.priv;
-
- return pi;
-}
-
-#if 0
-static void kv_program_local_cac_table(struct amdgpu_device *adev,
- const struct kv_lcac_config_values *local_cac_table,
- const struct kv_lcac_config_reg *local_cac_reg)
-{
- u32 i, count, data;
- const struct kv_lcac_config_values *values = local_cac_table;
-
- while (values->block_id != 0xffffffff) {
- count = values->signal_id;
- for (i = 0; i < count; i++) {
- data = ((values->block_id << local_cac_reg->block_shift) &
- local_cac_reg->block_mask);
- data |= ((i << local_cac_reg->signal_shift) &
- local_cac_reg->signal_mask);
- data |= ((values->t << local_cac_reg->t_shift) &
- local_cac_reg->t_mask);
- data |= ((1 << local_cac_reg->enable_shift) &
- local_cac_reg->enable_mask);
- WREG32_SMC(local_cac_reg->cntl, data);
- }
- values++;
- }
-}
-#endif
-
-static int kv_program_pt_config_registers(struct amdgpu_device *adev,
- const struct kv_pt_config_reg *cac_config_regs)
-{
- const struct kv_pt_config_reg *config_regs = cac_config_regs;
- u32 data;
- u32 cache = 0;
-
- if (config_regs == NULL)
- return -EINVAL;
-
- while (config_regs->offset != 0xFFFFFFFF) {
- if (config_regs->type == KV_CONFIGREG_CACHE) {
- cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
- } else {
- switch (config_regs->type) {
- case KV_CONFIGREG_SMC_IND:
- data = RREG32_SMC(config_regs->offset);
- break;
- case KV_CONFIGREG_DIDT_IND:
- data = RREG32_DIDT(config_regs->offset);
- break;
- default:
- data = RREG32(config_regs->offset);
- break;
- }
-
- data &= ~config_regs->mask;
- data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
- data |= cache;
- cache = 0;
-
- switch (config_regs->type) {
- case KV_CONFIGREG_SMC_IND:
- WREG32_SMC(config_regs->offset, data);
- break;
- case KV_CONFIGREG_DIDT_IND:
- WREG32_DIDT(config_regs->offset, data);
- break;
- default:
- WREG32(config_regs->offset, data);
- break;
- }
- }
- config_regs++;
- }
-
- return 0;
-}
-
-static void kv_do_enable_didt(struct amdgpu_device *adev, bool enable)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- u32 data;
-
- if (pi->caps_sq_ramping) {
- data = RREG32_DIDT(ixDIDT_SQ_CTRL0);
- if (enable)
- data |= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
- else
- data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
- WREG32_DIDT(ixDIDT_SQ_CTRL0, data);
- }
-
- if (pi->caps_db_ramping) {
- data = RREG32_DIDT(ixDIDT_DB_CTRL0);
- if (enable)
- data |= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
- else
- data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
- WREG32_DIDT(ixDIDT_DB_CTRL0, data);
- }
-
- if (pi->caps_td_ramping) {
- data = RREG32_DIDT(ixDIDT_TD_CTRL0);
- if (enable)
- data |= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
- else
- data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
- WREG32_DIDT(ixDIDT_TD_CTRL0, data);
- }
-
- if (pi->caps_tcp_ramping) {
- data = RREG32_DIDT(ixDIDT_TCP_CTRL0);
- if (enable)
- data |= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
- else
- data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
- WREG32_DIDT(ixDIDT_TCP_CTRL0, data);
- }
-}
-
-static int kv_enable_didt(struct amdgpu_device *adev, bool enable)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- int ret;
-
- if (pi->caps_sq_ramping ||
- pi->caps_db_ramping ||
- pi->caps_td_ramping ||
- pi->caps_tcp_ramping) {
- amdgpu_gfx_rlc_enter_safe_mode(adev);
-
- if (enable) {
- ret = kv_program_pt_config_registers(adev, didt_config_kv);
- if (ret) {
- amdgpu_gfx_rlc_exit_safe_mode(adev);
- return ret;
- }
- }
-
- kv_do_enable_didt(adev, enable);
-
- amdgpu_gfx_rlc_exit_safe_mode(adev);
- }
-
- return 0;
-}
-
-#if 0
-static void kv_initialize_hardware_cac_manager(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
-
- if (pi->caps_cac) {
- WREG32_SMC(ixLCAC_SX0_OVR_SEL, 0);
- WREG32_SMC(ixLCAC_SX0_OVR_VAL, 0);
- kv_program_local_cac_table(adev, sx_local_cac_cfg_kv, sx0_cac_config_reg);
-
- WREG32_SMC(ixLCAC_MC0_OVR_SEL, 0);
- WREG32_SMC(ixLCAC_MC0_OVR_VAL, 0);
- kv_program_local_cac_table(adev, mc0_local_cac_cfg_kv, mc0_cac_config_reg);
-
- WREG32_SMC(ixLCAC_MC1_OVR_SEL, 0);
- WREG32_SMC(ixLCAC_MC1_OVR_VAL, 0);
- kv_program_local_cac_table(adev, mc1_local_cac_cfg_kv, mc1_cac_config_reg);
-
- WREG32_SMC(ixLCAC_MC2_OVR_SEL, 0);
- WREG32_SMC(ixLCAC_MC2_OVR_VAL, 0);
- kv_program_local_cac_table(adev, mc2_local_cac_cfg_kv, mc2_cac_config_reg);
-
- WREG32_SMC(ixLCAC_MC3_OVR_SEL, 0);
- WREG32_SMC(ixLCAC_MC3_OVR_VAL, 0);
- kv_program_local_cac_table(adev, mc3_local_cac_cfg_kv, mc3_cac_config_reg);
-
- WREG32_SMC(ixLCAC_CPL_OVR_SEL, 0);
- WREG32_SMC(ixLCAC_CPL_OVR_VAL, 0);
- kv_program_local_cac_table(adev, cpl_local_cac_cfg_kv, cpl_cac_config_reg);
- }
-}
-#endif
-
-static int kv_enable_smc_cac(struct amdgpu_device *adev, bool enable)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- int ret = 0;
-
- if (pi->caps_cac) {
- if (enable) {
- ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_EnableCac);
- if (ret)
- pi->cac_enabled = false;
- else
- pi->cac_enabled = true;
- } else if (pi->cac_enabled) {
- amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DisableCac);
- pi->cac_enabled = false;
- }
- }
-
- return ret;
-}
-
-static int kv_process_firmware_header(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- u32 tmp;
- int ret;
-
- ret = amdgpu_kv_read_smc_sram_dword(adev, SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU7_Firmware_Header, DpmTable),
- &tmp, pi->sram_end);
-
- if (ret == 0)
- pi->dpm_table_start = tmp;
-
- ret = amdgpu_kv_read_smc_sram_dword(adev, SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU7_Firmware_Header, SoftRegisters),
- &tmp, pi->sram_end);
-
- if (ret == 0)
- pi->soft_regs_start = tmp;
-
- return ret;
-}
-
-static int kv_enable_dpm_voltage_scaling(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- int ret;
-
- pi->graphics_voltage_change_enable = 1;
-
- ret = amdgpu_kv_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Fusion_DpmTable, GraphicsVoltageChangeEnable),
- &pi->graphics_voltage_change_enable,
- sizeof(u8), pi->sram_end);
-
- return ret;
-}
-
-static int kv_set_dpm_interval(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- int ret;
-
- pi->graphics_interval = 1;
-
- ret = amdgpu_kv_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Fusion_DpmTable, GraphicsInterval),
- &pi->graphics_interval,
- sizeof(u8), pi->sram_end);
-
- return ret;
-}
-
-static int kv_set_dpm_boot_state(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- int ret;
-
- ret = amdgpu_kv_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Fusion_DpmTable, GraphicsBootLevel),
- &pi->graphics_boot_level,
- sizeof(u8), pi->sram_end);
-
- return ret;
-}
-
-static void kv_program_vc(struct amdgpu_device *adev)
-{
- WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0x3FFFC100);
-}
-
-static void kv_clear_vc(struct amdgpu_device *adev)
-{
- WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0);
-}
-
-static int kv_set_divider_value(struct amdgpu_device *adev,
- u32 index, u32 sclk)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- struct atom_clock_dividers dividers;
- int ret;
-
- ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
- sclk, false, &dividers);
- if (ret)
- return ret;
-
- pi->graphics_level[index].SclkDid = (u8)dividers.post_div;
- pi->graphics_level[index].SclkFrequency = cpu_to_be32(sclk);
-
- return 0;
-}
-
-static u16 kv_convert_8bit_index_to_voltage(struct amdgpu_device *adev,
- u16 voltage)
-{
- return 6200 - (voltage * 25);
-}
-
-static u16 kv_convert_2bit_index_to_voltage(struct amdgpu_device *adev,
- u32 vid_2bit)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- u32 vid_8bit = kv_convert_vid2_to_vid7(adev,
- &pi->sys_info.vid_mapping_table,
- vid_2bit);
-
- return kv_convert_8bit_index_to_voltage(adev, (u16)vid_8bit);
-}
-
-
-static int kv_set_vid(struct amdgpu_device *adev, u32 index, u32 vid)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
-
- pi->graphics_level[index].VoltageDownH = (u8)pi->voltage_drop_t;
- pi->graphics_level[index].MinVddNb =
- cpu_to_be32(kv_convert_2bit_index_to_voltage(adev, vid));
-
- return 0;
-}
-
-static int kv_set_at(struct amdgpu_device *adev, u32 index, u32 at)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
-
- pi->graphics_level[index].AT = cpu_to_be16((u16)at);
-
- return 0;
-}
-
-static void kv_dpm_power_level_enable(struct amdgpu_device *adev,
- u32 index, bool enable)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
-
- pi->graphics_level[index].EnabledForActivity = enable ? 1 : 0;
-}
-
-static void kv_start_dpm(struct amdgpu_device *adev)
-{
- u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
-
- tmp |= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;
- WREG32_SMC(ixGENERAL_PWRMGT, tmp);
-
- amdgpu_kv_smc_dpm_enable(adev, true);
-}
-
-static void kv_stop_dpm(struct amdgpu_device *adev)
-{
- amdgpu_kv_smc_dpm_enable(adev, false);
-}
-
-static void kv_start_am(struct amdgpu_device *adev)
-{
- u32 sclk_pwrmgt_cntl = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
-
- sclk_pwrmgt_cntl &= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK |
- SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
- sclk_pwrmgt_cntl |= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;
-
- WREG32_SMC(ixSCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl);
-}
-
-static void kv_reset_am(struct amdgpu_device *adev)
-{
- u32 sclk_pwrmgt_cntl = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
-
- sclk_pwrmgt_cntl |= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK |
- SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
-
- WREG32_SMC(ixSCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl);
-}
-
-static int kv_freeze_sclk_dpm(struct amdgpu_device *adev, bool freeze)
-{
- return amdgpu_kv_notify_message_to_smu(adev, freeze ?
- PPSMC_MSG_SCLKDPM_FreezeLevel : PPSMC_MSG_SCLKDPM_UnfreezeLevel);
-}
-
-static int kv_force_lowest_valid(struct amdgpu_device *adev)
-{
- return kv_force_dpm_lowest(adev);
-}
-
-static int kv_unforce_levels(struct amdgpu_device *adev)
-{
- if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS)
- return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NoForcedLevel);
- else
- return kv_set_enabled_levels(adev);
-}
-
-static int kv_update_sclk_t(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- u32 low_sclk_interrupt_t = 0;
- int ret = 0;
-
- if (pi->caps_sclk_throttle_low_notification) {
- low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
-
- ret = amdgpu_kv_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Fusion_DpmTable, LowSclkInterruptT),
- (u8 *)&low_sclk_interrupt_t,
- sizeof(u32), pi->sram_end);
- }
- return ret;
-}
-
-static int kv_program_bootup_state(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- u32 i;
- struct amdgpu_clock_voltage_dependency_table *table =
- &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
-
- if (table && table->count) {
- for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
- if (table->entries[i].clk == pi->boot_pl.sclk)
- break;
- }
-
- pi->graphics_boot_level = (u8)i;
- kv_dpm_power_level_enable(adev, i, true);
- } else {
- struct sumo_sclk_voltage_mapping_table *table =
- &pi->sys_info.sclk_voltage_mapping_table;
-
- if (table->num_max_dpm_entries == 0)
- return -EINVAL;
-
- for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
- if (table->entries[i].sclk_frequency == pi->boot_pl.sclk)
- break;
- }
-
- pi->graphics_boot_level = (u8)i;
- kv_dpm_power_level_enable(adev, i, true);
- }
- return 0;
-}
-
-static int kv_enable_auto_thermal_throttling(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- int ret;
-
- pi->graphics_therm_throttle_enable = 1;
-
- ret = amdgpu_kv_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Fusion_DpmTable, GraphicsThermThrottleEnable),
- &pi->graphics_therm_throttle_enable,
- sizeof(u8), pi->sram_end);
-
- return ret;
-}
-
-static int kv_upload_dpm_settings(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- int ret;
-
- ret = amdgpu_kv_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Fusion_DpmTable, GraphicsLevel),
- (u8 *)&pi->graphics_level,
- sizeof(SMU7_Fusion_GraphicsLevel) * SMU7_MAX_LEVELS_GRAPHICS,
- pi->sram_end);
-
- if (ret)
- return ret;
-
- ret = amdgpu_kv_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Fusion_DpmTable, GraphicsDpmLevelCount),
- &pi->graphics_dpm_level_count,
- sizeof(u8), pi->sram_end);
-
- return ret;
-}
-
-static u32 kv_get_clock_difference(u32 a, u32 b)
-{
- return (a >= b) ? a - b : b - a;
-}
-
-static u32 kv_get_clk_bypass(struct amdgpu_device *adev, u32 clk)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- u32 value;
-
- if (pi->caps_enable_dfs_bypass) {
- if (kv_get_clock_difference(clk, 40000) < 200)
- value = 3;
- else if (kv_get_clock_difference(clk, 30000) < 200)
- value = 2;
- else if (kv_get_clock_difference(clk, 20000) < 200)
- value = 7;
- else if (kv_get_clock_difference(clk, 15000) < 200)
- value = 6;
- else if (kv_get_clock_difference(clk, 10000) < 200)
- value = 8;
- else
- value = 0;
- } else {
- value = 0;
- }
-
- return value;
-}
-
-static int kv_populate_uvd_table(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- struct amdgpu_uvd_clock_voltage_dependency_table *table =
- &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
- struct atom_clock_dividers dividers;
- int ret;
- u32 i;
-
- if (table == NULL || table->count == 0)
- return 0;
-
- pi->uvd_level_count = 0;
- for (i = 0; i < table->count; i++) {
- if (pi->high_voltage_t &&
- (pi->high_voltage_t < table->entries[i].v))
- break;
-
- pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk);
- pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk);
- pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v);
-
- pi->uvd_level[i].VClkBypassCntl =
- (u8)kv_get_clk_bypass(adev, table->entries[i].vclk);
- pi->uvd_level[i].DClkBypassCntl =
- (u8)kv_get_clk_bypass(adev, table->entries[i].dclk);
-
- ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
- table->entries[i].vclk, false, &dividers);
- if (ret)
- return ret;
- pi->uvd_level[i].VclkDivider = (u8)dividers.post_div;
-
- ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
- table->entries[i].dclk, false, &dividers);
- if (ret)
- return ret;
- pi->uvd_level[i].DclkDivider = (u8)dividers.post_div;
-
- pi->uvd_level_count++;
- }
-
- ret = amdgpu_kv_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Fusion_DpmTable, UvdLevelCount),
- (u8 *)&pi->uvd_level_count,
- sizeof(u8), pi->sram_end);
- if (ret)
- return ret;
-
- pi->uvd_interval = 1;
-
- ret = amdgpu_kv_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Fusion_DpmTable, UVDInterval),
- &pi->uvd_interval,
- sizeof(u8), pi->sram_end);
- if (ret)
- return ret;
-
- ret = amdgpu_kv_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Fusion_DpmTable, UvdLevel),
- (u8 *)&pi->uvd_level,
- sizeof(SMU7_Fusion_UvdLevel) * SMU7_MAX_LEVELS_UVD,
- pi->sram_end);
-
- return ret;
-
-}
-
-static int kv_populate_vce_table(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- int ret;
- u32 i;
- struct amdgpu_vce_clock_voltage_dependency_table *table =
- &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
- struct atom_clock_dividers dividers;
-
- if (table == NULL || table->count == 0)
- return 0;
-
- pi->vce_level_count = 0;
- for (i = 0; i < table->count; i++) {
- if (pi->high_voltage_t &&
- pi->high_voltage_t < table->entries[i].v)
- break;
-
- pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk);
- pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
-
- pi->vce_level[i].ClkBypassCntl =
- (u8)kv_get_clk_bypass(adev, table->entries[i].evclk);
-
- ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
- table->entries[i].evclk, false, &dividers);
- if (ret)
- return ret;
- pi->vce_level[i].Divider = (u8)dividers.post_div;
-
- pi->vce_level_count++;
- }
-
- ret = amdgpu_kv_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Fusion_DpmTable, VceLevelCount),
- (u8 *)&pi->vce_level_count,
- sizeof(u8),
- pi->sram_end);
- if (ret)
- return ret;
-
- pi->vce_interval = 1;
-
- ret = amdgpu_kv_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Fusion_DpmTable, VCEInterval),
- (u8 *)&pi->vce_interval,
- sizeof(u8),
- pi->sram_end);
- if (ret)
- return ret;
-
- ret = amdgpu_kv_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Fusion_DpmTable, VceLevel),
- (u8 *)&pi->vce_level,
- sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_VCE,
- pi->sram_end);
-
- return ret;
-}
-
-static int kv_populate_samu_table(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- struct amdgpu_clock_voltage_dependency_table *table =
- &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
- struct atom_clock_dividers dividers;
- int ret;
- u32 i;
-
- if (table == NULL || table->count == 0)
- return 0;
-
- pi->samu_level_count = 0;
- for (i = 0; i < table->count; i++) {
- if (pi->high_voltage_t &&
- pi->high_voltage_t < table->entries[i].v)
- break;
-
- pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk);
- pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
-
- pi->samu_level[i].ClkBypassCntl =
- (u8)kv_get_clk_bypass(adev, table->entries[i].clk);
-
- ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
- table->entries[i].clk, false, &dividers);
- if (ret)
- return ret;
- pi->samu_level[i].Divider = (u8)dividers.post_div;
-
- pi->samu_level_count++;
- }
-
- ret = amdgpu_kv_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Fusion_DpmTable, SamuLevelCount),
- (u8 *)&pi->samu_level_count,
- sizeof(u8),
- pi->sram_end);
- if (ret)
- return ret;
-
- pi->samu_interval = 1;
-
- ret = amdgpu_kv_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Fusion_DpmTable, SAMUInterval),
- (u8 *)&pi->samu_interval,
- sizeof(u8),
- pi->sram_end);
- if (ret)
- return ret;
-
- ret = amdgpu_kv_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Fusion_DpmTable, SamuLevel),
- (u8 *)&pi->samu_level,
- sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_SAMU,
- pi->sram_end);
- if (ret)
- return ret;
-
- return ret;
-}
-
-
-static int kv_populate_acp_table(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- struct amdgpu_clock_voltage_dependency_table *table =
- &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
- struct atom_clock_dividers dividers;
- int ret;
- u32 i;
-
- if (table == NULL || table->count == 0)
- return 0;
-
- pi->acp_level_count = 0;
- for (i = 0; i < table->count; i++) {
- pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk);
- pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
-
- ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
- table->entries[i].clk, false, &dividers);
- if (ret)
- return ret;
- pi->acp_level[i].Divider = (u8)dividers.post_div;
-
- pi->acp_level_count++;
- }
-
- ret = amdgpu_kv_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Fusion_DpmTable, AcpLevelCount),
- (u8 *)&pi->acp_level_count,
- sizeof(u8),
- pi->sram_end);
- if (ret)
- return ret;
-
- pi->acp_interval = 1;
-
- ret = amdgpu_kv_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Fusion_DpmTable, ACPInterval),
- (u8 *)&pi->acp_interval,
- sizeof(u8),
- pi->sram_end);
- if (ret)
- return ret;
-
- ret = amdgpu_kv_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Fusion_DpmTable, AcpLevel),
- (u8 *)&pi->acp_level,
- sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_ACP,
- pi->sram_end);
- if (ret)
- return ret;
-
- return ret;
-}
-
-static void kv_calculate_dfs_bypass_settings(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- u32 i;
- struct amdgpu_clock_voltage_dependency_table *table =
- &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
-
- if (table && table->count) {
- for (i = 0; i < pi->graphics_dpm_level_count; i++) {
- if (pi->caps_enable_dfs_bypass) {
- if (kv_get_clock_difference(table->entries[i].clk, 40000) < 200)
- pi->graphics_level[i].ClkBypassCntl = 3;
- else if (kv_get_clock_difference(table->entries[i].clk, 30000) < 200)
- pi->graphics_level[i].ClkBypassCntl = 2;
- else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200)
- pi->graphics_level[i].ClkBypassCntl = 7;
- else if (kv_get_clock_difference(table->entries[i].clk , 20000) < 200)
- pi->graphics_level[i].ClkBypassCntl = 6;
- else if (kv_get_clock_difference(table->entries[i].clk , 10000) < 200)
- pi->graphics_level[i].ClkBypassCntl = 8;
- else
- pi->graphics_level[i].ClkBypassCntl = 0;
- } else {
- pi->graphics_level[i].ClkBypassCntl = 0;
- }
- }
- } else {
- struct sumo_sclk_voltage_mapping_table *table =
- &pi->sys_info.sclk_voltage_mapping_table;
- for (i = 0; i < pi->graphics_dpm_level_count; i++) {
- if (pi->caps_enable_dfs_bypass) {
- if (kv_get_clock_difference(table->entries[i].sclk_frequency, 40000) < 200)
- pi->graphics_level[i].ClkBypassCntl = 3;
- else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 30000) < 200)
- pi->graphics_level[i].ClkBypassCntl = 2;
- else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 26600) < 200)
- pi->graphics_level[i].ClkBypassCntl = 7;
- else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 20000) < 200)
- pi->graphics_level[i].ClkBypassCntl = 6;
- else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 10000) < 200)
- pi->graphics_level[i].ClkBypassCntl = 8;
- else
- pi->graphics_level[i].ClkBypassCntl = 0;
- } else {
- pi->graphics_level[i].ClkBypassCntl = 0;
- }
- }
- }
-}
-
-static int kv_enable_ulv(struct amdgpu_device *adev, bool enable)
-{
- return amdgpu_kv_notify_message_to_smu(adev, enable ?
- PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV);
-}
-
-static void kv_reset_acp_boot_level(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
-
- pi->acp_boot_level = 0xff;
-}
-
-static void kv_update_current_ps(struct amdgpu_device *adev,
- struct amdgpu_ps *rps)
-{
- struct kv_ps *new_ps = kv_get_ps(rps);
- struct kv_power_info *pi = kv_get_pi(adev);
-
- pi->current_rps = *rps;
- pi->current_ps = *new_ps;
- pi->current_rps.ps_priv = &pi->current_ps;
- adev->pm.dpm.current_ps = &pi->current_rps;
-}
-
-static void kv_update_requested_ps(struct amdgpu_device *adev,
- struct amdgpu_ps *rps)
-{
- struct kv_ps *new_ps = kv_get_ps(rps);
- struct kv_power_info *pi = kv_get_pi(adev);
-
- pi->requested_rps = *rps;
- pi->requested_ps = *new_ps;
- pi->requested_rps.ps_priv = &pi->requested_ps;
- adev->pm.dpm.requested_ps = &pi->requested_rps;
-}
-
-static void kv_dpm_enable_bapm(void *handle, bool enable)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct kv_power_info *pi = kv_get_pi(adev);
- int ret;
-
- if (pi->bapm_enable) {
- ret = amdgpu_kv_smc_bapm_enable(adev, enable);
- if (ret)
- DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
- }
-}
-
-static int kv_dpm_enable(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- int ret;
-
- ret = kv_process_firmware_header(adev);
- if (ret) {
- DRM_ERROR("kv_process_firmware_header failed\n");
- return ret;
- }
- kv_init_fps_limits(adev);
- kv_init_graphics_levels(adev);
- ret = kv_program_bootup_state(adev);
- if (ret) {
- DRM_ERROR("kv_program_bootup_state failed\n");
- return ret;
- }
- kv_calculate_dfs_bypass_settings(adev);
- ret = kv_upload_dpm_settings(adev);
- if (ret) {
- DRM_ERROR("kv_upload_dpm_settings failed\n");
- return ret;
- }
- ret = kv_populate_uvd_table(adev);
- if (ret) {
- DRM_ERROR("kv_populate_uvd_table failed\n");
- return ret;
- }
- ret = kv_populate_vce_table(adev);
- if (ret) {
- DRM_ERROR("kv_populate_vce_table failed\n");
- return ret;
- }
- ret = kv_populate_samu_table(adev);
- if (ret) {
- DRM_ERROR("kv_populate_samu_table failed\n");
- return ret;
- }
- ret = kv_populate_acp_table(adev);
- if (ret) {
- DRM_ERROR("kv_populate_acp_table failed\n");
- return ret;
- }
- kv_program_vc(adev);
-#if 0
- kv_initialize_hardware_cac_manager(adev);
-#endif
- kv_start_am(adev);
- if (pi->enable_auto_thermal_throttling) {
- ret = kv_enable_auto_thermal_throttling(adev);
- if (ret) {
- DRM_ERROR("kv_enable_auto_thermal_throttling failed\n");
- return ret;
- }
- }
- ret = kv_enable_dpm_voltage_scaling(adev);
- if (ret) {
- DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n");
- return ret;
- }
- ret = kv_set_dpm_interval(adev);
- if (ret) {
- DRM_ERROR("kv_set_dpm_interval failed\n");
- return ret;
- }
- ret = kv_set_dpm_boot_state(adev);
- if (ret) {
- DRM_ERROR("kv_set_dpm_boot_state failed\n");
- return ret;
- }
- ret = kv_enable_ulv(adev, true);
- if (ret) {
- DRM_ERROR("kv_enable_ulv failed\n");
- return ret;
- }
- kv_start_dpm(adev);
- ret = kv_enable_didt(adev, true);
- if (ret) {
- DRM_ERROR("kv_enable_didt failed\n");
- return ret;
- }
- ret = kv_enable_smc_cac(adev, true);
- if (ret) {
- DRM_ERROR("kv_enable_smc_cac failed\n");
- return ret;
- }
-
- kv_reset_acp_boot_level(adev);
-
- ret = amdgpu_kv_smc_bapm_enable(adev, false);
- if (ret) {
- DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
- return ret;
- }
-
- if (adev->irq.installed &&
- amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) {
- ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX);
- if (ret) {
- DRM_ERROR("kv_set_thermal_temperature_range failed\n");
- return ret;
- }
- amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq,
- AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
- amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq,
- AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
- }
-
- return ret;
-}
-
-static void kv_dpm_disable(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
-
- amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
- AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
- amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
- AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
-
- amdgpu_kv_smc_bapm_enable(adev, false);
-
- if (adev->asic_type == CHIP_MULLINS)
- kv_enable_nb_dpm(adev, false);
-
- /* powerup blocks */
- kv_dpm_powergate_acp(adev, false);
- kv_dpm_powergate_samu(adev, false);
- if (pi->caps_vce_pg) /* power on the VCE block */
- amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
- if (pi->caps_uvd_pg) /* power on the UVD block */
- amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON);
-
- kv_enable_smc_cac(adev, false);
- kv_enable_didt(adev, false);
- kv_clear_vc(adev);
- kv_stop_dpm(adev);
- kv_enable_ulv(adev, false);
- kv_reset_am(adev);
-
- kv_update_current_ps(adev, adev->pm.dpm.boot_ps);
-}
-
-#if 0
-static int kv_write_smc_soft_register(struct amdgpu_device *adev,
- u16 reg_offset, u32 value)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
-
- return amdgpu_kv_copy_bytes_to_smc(adev, pi->soft_regs_start + reg_offset,
- (u8 *)&value, sizeof(u16), pi->sram_end);
-}
-
-static int kv_read_smc_soft_register(struct amdgpu_device *adev,
- u16 reg_offset, u32 *value)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
-
- return amdgpu_kv_read_smc_sram_dword(adev, pi->soft_regs_start + reg_offset,
- value, pi->sram_end);
-}
-#endif
-
-static void kv_init_sclk_t(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
-
- pi->low_sclk_interrupt_t = 0;
-}
-
-static int kv_init_fps_limits(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- int ret = 0;
-
- if (pi->caps_fps) {
- u16 tmp;
-
- tmp = 45;
- pi->fps_high_t = cpu_to_be16(tmp);
- ret = amdgpu_kv_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Fusion_DpmTable, FpsHighT),
- (u8 *)&pi->fps_high_t,
- sizeof(u16), pi->sram_end);
-
- tmp = 30;
- pi->fps_low_t = cpu_to_be16(tmp);
-
- ret = amdgpu_kv_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Fusion_DpmTable, FpsLowT),
- (u8 *)&pi->fps_low_t,
- sizeof(u16), pi->sram_end);
-
- }
- return ret;
-}
-
-static void kv_init_powergate_state(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
-
- pi->uvd_power_gated = false;
- pi->vce_power_gated = false;
- pi->samu_power_gated = false;
- pi->acp_power_gated = false;
-
-}
-
-static int kv_enable_uvd_dpm(struct amdgpu_device *adev, bool enable)
-{
- return amdgpu_kv_notify_message_to_smu(adev, enable ?
- PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable);
-}
-
-static int kv_enable_vce_dpm(struct amdgpu_device *adev, bool enable)
-{
- return amdgpu_kv_notify_message_to_smu(adev, enable ?
- PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable);
-}
-
-static int kv_enable_samu_dpm(struct amdgpu_device *adev, bool enable)
-{
- return amdgpu_kv_notify_message_to_smu(adev, enable ?
- PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable);
-}
-
-static int kv_enable_acp_dpm(struct amdgpu_device *adev, bool enable)
-{
- return amdgpu_kv_notify_message_to_smu(adev, enable ?
- PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable);
-}
-
-static int kv_update_uvd_dpm(struct amdgpu_device *adev, bool gate)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- struct amdgpu_uvd_clock_voltage_dependency_table *table =
- &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
- int ret;
- u32 mask;
-
- if (!gate) {
- if (table->count)
- pi->uvd_boot_level = table->count - 1;
- else
- pi->uvd_boot_level = 0;
-
- if (!pi->caps_uvd_dpm || pi->caps_stable_p_state) {
- mask = 1 << pi->uvd_boot_level;
- } else {
- mask = 0x1f;
- }
-
- ret = amdgpu_kv_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Fusion_DpmTable, UvdBootLevel),
- (uint8_t *)&pi->uvd_boot_level,
- sizeof(u8), pi->sram_end);
- if (ret)
- return ret;
-
- amdgpu_kv_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_UVDDPM_SetEnabledMask,
- mask);
- }
-
- return kv_enable_uvd_dpm(adev, !gate);
-}
-
-static u8 kv_get_vce_boot_level(struct amdgpu_device *adev, u32 evclk)
-{
- u8 i;
- struct amdgpu_vce_clock_voltage_dependency_table *table =
- &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
-
- for (i = 0; i < table->count; i++) {
- if (table->entries[i].evclk >= evclk)
- break;
- }
-
- return i;
-}
-
-static int kv_update_vce_dpm(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_new_state,
- struct amdgpu_ps *amdgpu_current_state)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- struct amdgpu_vce_clock_voltage_dependency_table *table =
- &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
- int ret;
-
- if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) {
- if (pi->caps_stable_p_state)
- pi->vce_boot_level = table->count - 1;
- else
- pi->vce_boot_level = kv_get_vce_boot_level(adev, amdgpu_new_state->evclk);
-
- ret = amdgpu_kv_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Fusion_DpmTable, VceBootLevel),
- (u8 *)&pi->vce_boot_level,
- sizeof(u8),
- pi->sram_end);
- if (ret)
- return ret;
-
- if (pi->caps_stable_p_state)
- amdgpu_kv_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_VCEDPM_SetEnabledMask,
- (1 << pi->vce_boot_level));
- kv_enable_vce_dpm(adev, true);
- } else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) {
- kv_enable_vce_dpm(adev, false);
- }
-
- return 0;
-}
-
-static int kv_update_samu_dpm(struct amdgpu_device *adev, bool gate)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- struct amdgpu_clock_voltage_dependency_table *table =
- &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
- int ret;
-
- if (!gate) {
- if (pi->caps_stable_p_state)
- pi->samu_boot_level = table->count - 1;
- else
- pi->samu_boot_level = 0;
-
- ret = amdgpu_kv_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Fusion_DpmTable, SamuBootLevel),
- (u8 *)&pi->samu_boot_level,
- sizeof(u8),
- pi->sram_end);
- if (ret)
- return ret;
-
- if (pi->caps_stable_p_state)
- amdgpu_kv_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_SAMUDPM_SetEnabledMask,
- (1 << pi->samu_boot_level));
- }
-
- return kv_enable_samu_dpm(adev, !gate);
-}
-
-static u8 kv_get_acp_boot_level(struct amdgpu_device *adev)
-{
- u8 i;
- struct amdgpu_clock_voltage_dependency_table *table =
- &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
-
- for (i = 0; i < table->count; i++) {
- if (table->entries[i].clk >= 0) /* XXX */
- break;
- }
-
- if (i >= table->count)
- i = table->count - 1;
-
- return i;
-}
-
-static void kv_update_acp_boot_level(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- u8 acp_boot_level;
-
- if (!pi->caps_stable_p_state) {
- acp_boot_level = kv_get_acp_boot_level(adev);
- if (acp_boot_level != pi->acp_boot_level) {
- pi->acp_boot_level = acp_boot_level;
- amdgpu_kv_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_ACPDPM_SetEnabledMask,
- (1 << pi->acp_boot_level));
- }
- }
-}
-
-static int kv_update_acp_dpm(struct amdgpu_device *adev, bool gate)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- struct amdgpu_clock_voltage_dependency_table *table =
- &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
- int ret;
-
- if (!gate) {
- if (pi->caps_stable_p_state)
- pi->acp_boot_level = table->count - 1;
- else
- pi->acp_boot_level = kv_get_acp_boot_level(adev);
-
- ret = amdgpu_kv_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Fusion_DpmTable, AcpBootLevel),
- (u8 *)&pi->acp_boot_level,
- sizeof(u8),
- pi->sram_end);
- if (ret)
- return ret;
-
- if (pi->caps_stable_p_state)
- amdgpu_kv_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_ACPDPM_SetEnabledMask,
- (1 << pi->acp_boot_level));
- }
-
- return kv_enable_acp_dpm(adev, !gate);
-}
-
-static void kv_dpm_powergate_uvd(void *handle, bool gate)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct kv_power_info *pi = kv_get_pi(adev);
- int ret;
-
- pi->uvd_power_gated = gate;
-
- if (gate) {
- /* stop the UVD block */
- ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
- AMD_PG_STATE_GATE);
- kv_update_uvd_dpm(adev, gate);
- if (pi->caps_uvd_pg)
- /* power off the UVD block */
- amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerOFF);
- } else {
- if (pi->caps_uvd_pg)
- /* power on the UVD block */
- amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON);
- /* re-init the UVD block */
- kv_update_uvd_dpm(adev, gate);
-
- ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
- AMD_PG_STATE_UNGATE);
- }
-}
-
-static void kv_dpm_powergate_vce(void *handle, bool gate)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct kv_power_info *pi = kv_get_pi(adev);
- int ret;
-
- pi->vce_power_gated = gate;
-
- if (gate) {
- /* stop the VCE block */
- ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_PG_STATE_GATE);
- kv_enable_vce_dpm(adev, false);
- if (pi->caps_vce_pg) /* power off the VCE block */
- amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF);
- } else {
- if (pi->caps_vce_pg) /* power on the VCE block */
- amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
- kv_enable_vce_dpm(adev, true);
- /* re-init the VCE block */
- ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_PG_STATE_UNGATE);
- }
-}
-
-
-static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
-
- if (pi->samu_power_gated == gate)
- return;
-
- pi->samu_power_gated = gate;
-
- if (gate) {
- kv_update_samu_dpm(adev, true);
- if (pi->caps_samu_pg)
- amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SAMPowerOFF);
- } else {
- if (pi->caps_samu_pg)
- amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SAMPowerON);
- kv_update_samu_dpm(adev, false);
- }
-}
-
-static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
-
- if (pi->acp_power_gated == gate)
- return;
-
- if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS)
- return;
-
- pi->acp_power_gated = gate;
-
- if (gate) {
- kv_update_acp_dpm(adev, true);
- if (pi->caps_acp_pg)
- amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_ACPPowerOFF);
- } else {
- if (pi->caps_acp_pg)
- amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_ACPPowerON);
- kv_update_acp_dpm(adev, false);
- }
-}
-
-static void kv_set_valid_clock_range(struct amdgpu_device *adev,
- struct amdgpu_ps *new_rps)
-{
- struct kv_ps *new_ps = kv_get_ps(new_rps);
- struct kv_power_info *pi = kv_get_pi(adev);
- u32 i;
- struct amdgpu_clock_voltage_dependency_table *table =
- &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
-
- if (table && table->count) {
- for (i = 0; i < pi->graphics_dpm_level_count; i++) {
- if ((table->entries[i].clk >= new_ps->levels[0].sclk) ||
- (i == (pi->graphics_dpm_level_count - 1))) {
- pi->lowest_valid = i;
- break;
- }
- }
-
- for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
- if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk)
- break;
- }
- pi->highest_valid = i;
-
- if (pi->lowest_valid > pi->highest_valid) {
- if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) >
- (table->entries[pi->lowest_valid].clk - new_ps->levels[new_ps->num_levels - 1].sclk))
- pi->highest_valid = pi->lowest_valid;
- else
- pi->lowest_valid = pi->highest_valid;
- }
- } else {
- struct sumo_sclk_voltage_mapping_table *table =
- &pi->sys_info.sclk_voltage_mapping_table;
-
- for (i = 0; i < (int)pi->graphics_dpm_level_count; i++) {
- if (table->entries[i].sclk_frequency >= new_ps->levels[0].sclk ||
- i == (int)(pi->graphics_dpm_level_count - 1)) {
- pi->lowest_valid = i;
- break;
- }
- }
-
- for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
- if (table->entries[i].sclk_frequency <=
- new_ps->levels[new_ps->num_levels - 1].sclk)
- break;
- }
- pi->highest_valid = i;
-
- if (pi->lowest_valid > pi->highest_valid) {
- if ((new_ps->levels[0].sclk -
- table->entries[pi->highest_valid].sclk_frequency) >
- (table->entries[pi->lowest_valid].sclk_frequency -
- new_ps->levels[new_ps->num_levels -1].sclk))
- pi->highest_valid = pi->lowest_valid;
- else
- pi->lowest_valid = pi->highest_valid;
- }
- }
-}
-
-static int kv_update_dfs_bypass_settings(struct amdgpu_device *adev,
- struct amdgpu_ps *new_rps)
-{
- struct kv_ps *new_ps = kv_get_ps(new_rps);
- struct kv_power_info *pi = kv_get_pi(adev);
- int ret = 0;
- u8 clk_bypass_cntl;
-
- if (pi->caps_enable_dfs_bypass) {
- clk_bypass_cntl = new_ps->need_dfs_bypass ?
- pi->graphics_level[pi->graphics_boot_level].ClkBypassCntl : 0;
- ret = amdgpu_kv_copy_bytes_to_smc(adev,
- (pi->dpm_table_start +
- offsetof(SMU7_Fusion_DpmTable, GraphicsLevel) +
- (pi->graphics_boot_level * sizeof(SMU7_Fusion_GraphicsLevel)) +
- offsetof(SMU7_Fusion_GraphicsLevel, ClkBypassCntl)),
- &clk_bypass_cntl,
- sizeof(u8), pi->sram_end);
- }
-
- return ret;
-}
-
-static int kv_enable_nb_dpm(struct amdgpu_device *adev,
- bool enable)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- int ret = 0;
-
- if (enable) {
- if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) {
- ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NBDPM_Enable);
- if (ret == 0)
- pi->nb_dpm_enabled = true;
- }
- } else {
- if (pi->enable_nb_dpm && pi->nb_dpm_enabled) {
- ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NBDPM_Disable);
- if (ret == 0)
- pi->nb_dpm_enabled = false;
- }
- }
-
- return ret;
-}
-
-static int kv_dpm_force_performance_level(void *handle,
- enum amd_dpm_forced_level level)
-{
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (level == AMD_DPM_FORCED_LEVEL_HIGH) {
- ret = kv_force_dpm_highest(adev);
- if (ret)
- return ret;
- } else if (level == AMD_DPM_FORCED_LEVEL_LOW) {
- ret = kv_force_dpm_lowest(adev);
- if (ret)
- return ret;
- } else if (level == AMD_DPM_FORCED_LEVEL_AUTO) {
- ret = kv_unforce_levels(adev);
- if (ret)
- return ret;
- }
-
- adev->pm.dpm.forced_level = level;
-
- return 0;
-}
-
-static int kv_dpm_pre_set_power_state(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct kv_power_info *pi = kv_get_pi(adev);
- struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
- struct amdgpu_ps *new_ps = &requested_ps;
-
- kv_update_requested_ps(adev, new_ps);
-
- kv_apply_state_adjust_rules(adev,
- &pi->requested_rps,
- &pi->current_rps);
-
- return 0;
-}
-
-static int kv_dpm_set_power_state(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct kv_power_info *pi = kv_get_pi(adev);
- struct amdgpu_ps *new_ps = &pi->requested_rps;
- struct amdgpu_ps *old_ps = &pi->current_rps;
- int ret;
-
- if (pi->bapm_enable) {
- ret = amdgpu_kv_smc_bapm_enable(adev, adev->pm.ac_power);
- if (ret) {
- DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
- return ret;
- }
- }
-
- if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) {
- if (pi->enable_dpm) {
- kv_set_valid_clock_range(adev, new_ps);
- kv_update_dfs_bypass_settings(adev, new_ps);
- ret = kv_calculate_ds_divider(adev);
- if (ret) {
- DRM_ERROR("kv_calculate_ds_divider failed\n");
- return ret;
- }
- kv_calculate_nbps_level_settings(adev);
- kv_calculate_dpm_settings(adev);
- kv_force_lowest_valid(adev);
- kv_enable_new_levels(adev);
- kv_upload_dpm_settings(adev);
- kv_program_nbps_index_settings(adev, new_ps);
- kv_unforce_levels(adev);
- kv_set_enabled_levels(adev);
- kv_force_lowest_valid(adev);
- kv_unforce_levels(adev);
-
- ret = kv_update_vce_dpm(adev, new_ps, old_ps);
- if (ret) {
- DRM_ERROR("kv_update_vce_dpm failed\n");
- return ret;
- }
- kv_update_sclk_t(adev);
- if (adev->asic_type == CHIP_MULLINS)
- kv_enable_nb_dpm(adev, true);
- }
- } else {
- if (pi->enable_dpm) {
- kv_set_valid_clock_range(adev, new_ps);
- kv_update_dfs_bypass_settings(adev, new_ps);
- ret = kv_calculate_ds_divider(adev);
- if (ret) {
- DRM_ERROR("kv_calculate_ds_divider failed\n");
- return ret;
- }
- kv_calculate_nbps_level_settings(adev);
- kv_calculate_dpm_settings(adev);
- kv_freeze_sclk_dpm(adev, true);
- kv_upload_dpm_settings(adev);
- kv_program_nbps_index_settings(adev, new_ps);
- kv_freeze_sclk_dpm(adev, false);
- kv_set_enabled_levels(adev);
- ret = kv_update_vce_dpm(adev, new_ps, old_ps);
- if (ret) {
- DRM_ERROR("kv_update_vce_dpm failed\n");
- return ret;
- }
- kv_update_acp_boot_level(adev);
- kv_update_sclk_t(adev);
- kv_enable_nb_dpm(adev, true);
- }
- }
-
- return 0;
-}
-
-static void kv_dpm_post_set_power_state(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct kv_power_info *pi = kv_get_pi(adev);
- struct amdgpu_ps *new_ps = &pi->requested_rps;
-
- kv_update_current_ps(adev, new_ps);
-}
-
-static void kv_dpm_setup_asic(struct amdgpu_device *adev)
-{
- sumo_take_smu_control(adev, true);
- kv_init_powergate_state(adev);
- kv_init_sclk_t(adev);
-}
-
-#if 0
-static void kv_dpm_reset_asic(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
-
- if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) {
- kv_force_lowest_valid(adev);
- kv_init_graphics_levels(adev);
- kv_program_bootup_state(adev);
- kv_upload_dpm_settings(adev);
- kv_force_lowest_valid(adev);
- kv_unforce_levels(adev);
- } else {
- kv_init_graphics_levels(adev);
- kv_program_bootup_state(adev);
- kv_freeze_sclk_dpm(adev, true);
- kv_upload_dpm_settings(adev);
- kv_freeze_sclk_dpm(adev, false);
- kv_set_enabled_level(adev, pi->graphics_boot_level);
- }
-}
-#endif
-
-static void kv_construct_max_power_limits_table(struct amdgpu_device *adev,
- struct amdgpu_clock_and_voltage_limits *table)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
-
- if (pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries > 0) {
- int idx = pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries - 1;
- table->sclk =
- pi->sys_info.sclk_voltage_mapping_table.entries[idx].sclk_frequency;
- table->vddc =
- kv_convert_2bit_index_to_voltage(adev,
- pi->sys_info.sclk_voltage_mapping_table.entries[idx].vid_2bit);
- }
-
- table->mclk = pi->sys_info.nbp_memory_clock[0];
-}
-
-static void kv_patch_voltage_values(struct amdgpu_device *adev)
-{
- int i;
- struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table =
- &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
- struct amdgpu_vce_clock_voltage_dependency_table *vce_table =
- &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
- struct amdgpu_clock_voltage_dependency_table *samu_table =
- &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
- struct amdgpu_clock_voltage_dependency_table *acp_table =
- &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
-
- if (uvd_table->count) {
- for (i = 0; i < uvd_table->count; i++)
- uvd_table->entries[i].v =
- kv_convert_8bit_index_to_voltage(adev,
- uvd_table->entries[i].v);
- }
-
- if (vce_table->count) {
- for (i = 0; i < vce_table->count; i++)
- vce_table->entries[i].v =
- kv_convert_8bit_index_to_voltage(adev,
- vce_table->entries[i].v);
- }
-
- if (samu_table->count) {
- for (i = 0; i < samu_table->count; i++)
- samu_table->entries[i].v =
- kv_convert_8bit_index_to_voltage(adev,
- samu_table->entries[i].v);
- }
-
- if (acp_table->count) {
- for (i = 0; i < acp_table->count; i++)
- acp_table->entries[i].v =
- kv_convert_8bit_index_to_voltage(adev,
- acp_table->entries[i].v);
- }
-
-}
-
-static void kv_construct_boot_state(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
-
- pi->boot_pl.sclk = pi->sys_info.bootup_sclk;
- pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index;
- pi->boot_pl.ds_divider_index = 0;
- pi->boot_pl.ss_divider_index = 0;
- pi->boot_pl.allow_gnb_slow = 1;
- pi->boot_pl.force_nbp_state = 0;
- pi->boot_pl.display_wm = 0;
- pi->boot_pl.vce_wm = 0;
-}
-
-static int kv_force_dpm_highest(struct amdgpu_device *adev)
-{
- int ret;
- u32 enable_mask, i;
-
- ret = amdgpu_kv_dpm_get_enable_mask(adev, &enable_mask);
- if (ret)
- return ret;
-
- for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i > 0; i--) {
- if (enable_mask & (1 << i))
- break;
- }
-
- if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS)
- return amdgpu_kv_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DPM_ForceState, i);
- else
- return kv_set_enabled_level(adev, i);
-}
-
-static int kv_force_dpm_lowest(struct amdgpu_device *adev)
-{
- int ret;
- u32 enable_mask, i;
-
- ret = amdgpu_kv_dpm_get_enable_mask(adev, &enable_mask);
- if (ret)
- return ret;
-
- for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) {
- if (enable_mask & (1 << i))
- break;
- }
-
- if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS)
- return amdgpu_kv_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DPM_ForceState, i);
- else
- return kv_set_enabled_level(adev, i);
-}
-
-static u8 kv_get_sleep_divider_id_from_clock(struct amdgpu_device *adev,
- u32 sclk, u32 min_sclk_in_sr)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- u32 i;
- u32 temp;
- u32 min = max(min_sclk_in_sr, (u32)KV_MINIMUM_ENGINE_CLOCK);
-
- if (sclk < min)
- return 0;
-
- if (!pi->caps_sclk_ds)
- return 0;
-
- for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i > 0; i--) {
- temp = sclk >> i;
- if (temp >= min)
- break;
- }
-
- return (u8)i;
-}
-
-static int kv_get_high_voltage_limit(struct amdgpu_device *adev, int *limit)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- struct amdgpu_clock_voltage_dependency_table *table =
- &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
- int i;
-
- if (table && table->count) {
- for (i = table->count - 1; i >= 0; i--) {
- if (pi->high_voltage_t &&
- (kv_convert_8bit_index_to_voltage(adev, table->entries[i].v) <=
- pi->high_voltage_t)) {
- *limit = i;
- return 0;
- }
- }
- } else {
- struct sumo_sclk_voltage_mapping_table *table =
- &pi->sys_info.sclk_voltage_mapping_table;
-
- for (i = table->num_max_dpm_entries - 1; i >= 0; i--) {
- if (pi->high_voltage_t &&
- (kv_convert_2bit_index_to_voltage(adev, table->entries[i].vid_2bit) <=
- pi->high_voltage_t)) {
- *limit = i;
- return 0;
- }
- }
- }
-
- *limit = 0;
- return 0;
-}
-
-static void kv_apply_state_adjust_rules(struct amdgpu_device *adev,
- struct amdgpu_ps *new_rps,
- struct amdgpu_ps *old_rps)
-{
- struct kv_ps *ps = kv_get_ps(new_rps);
- struct kv_power_info *pi = kv_get_pi(adev);
- u32 min_sclk = 10000; /* ??? */
- u32 sclk, mclk = 0;
- int i, limit;
- bool force_high;
- struct amdgpu_clock_voltage_dependency_table *table =
- &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
- u32 stable_p_state_sclk = 0;
- struct amdgpu_clock_and_voltage_limits *max_limits =
- &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
-
- if (new_rps->vce_active) {
- new_rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
- new_rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk;
- } else {
- new_rps->evclk = 0;
- new_rps->ecclk = 0;
- }
-
- mclk = max_limits->mclk;
- sclk = min_sclk;
-
- if (pi->caps_stable_p_state) {
- stable_p_state_sclk = (max_limits->sclk * 75) / 100;
-
- for (i = table->count - 1; i >= 0; i--) {
- if (stable_p_state_sclk >= table->entries[i].clk) {
- stable_p_state_sclk = table->entries[i].clk;
- break;
- }
- }
-
- if (i > 0)
- stable_p_state_sclk = table->entries[0].clk;
-
- sclk = stable_p_state_sclk;
- }
-
- if (new_rps->vce_active) {
- if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk)
- sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk;
- }
-
- ps->need_dfs_bypass = true;
-
- for (i = 0; i < ps->num_levels; i++) {
- if (ps->levels[i].sclk < sclk)
- ps->levels[i].sclk = sclk;
- }
-
- if (table && table->count) {
- for (i = 0; i < ps->num_levels; i++) {
- if (pi->high_voltage_t &&
- (pi->high_voltage_t <
- kv_convert_8bit_index_to_voltage(adev, ps->levels[i].vddc_index))) {
- kv_get_high_voltage_limit(adev, &limit);
- ps->levels[i].sclk = table->entries[limit].clk;
- }
- }
- } else {
- struct sumo_sclk_voltage_mapping_table *table =
- &pi->sys_info.sclk_voltage_mapping_table;
-
- for (i = 0; i < ps->num_levels; i++) {
- if (pi->high_voltage_t &&
- (pi->high_voltage_t <
- kv_convert_8bit_index_to_voltage(adev, ps->levels[i].vddc_index))) {
- kv_get_high_voltage_limit(adev, &limit);
- ps->levels[i].sclk = table->entries[limit].sclk_frequency;
- }
- }
- }
-
- if (pi->caps_stable_p_state) {
- for (i = 0; i < ps->num_levels; i++) {
- ps->levels[i].sclk = stable_p_state_sclk;
- }
- }
-
- pi->video_start = new_rps->dclk || new_rps->vclk ||
- new_rps->evclk || new_rps->ecclk;
-
- if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
- ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
- pi->battery_state = true;
- else
- pi->battery_state = false;
-
- if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) {
- ps->dpm0_pg_nb_ps_lo = 0x1;
- ps->dpm0_pg_nb_ps_hi = 0x0;
- ps->dpmx_nb_ps_lo = 0x1;
- ps->dpmx_nb_ps_hi = 0x0;
- } else {
- ps->dpm0_pg_nb_ps_lo = 0x3;
- ps->dpm0_pg_nb_ps_hi = 0x0;
- ps->dpmx_nb_ps_lo = 0x3;
- ps->dpmx_nb_ps_hi = 0x0;
-
- if (pi->sys_info.nb_dpm_enable) {
- force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) ||
- pi->video_start || (adev->pm.dpm.new_active_crtc_count >= 3) ||
- pi->disable_nb_ps3_in_battery;
- ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3;
- ps->dpm0_pg_nb_ps_hi = 0x2;
- ps->dpmx_nb_ps_lo = force_high ? 0x2 : 0x3;
- ps->dpmx_nb_ps_hi = 0x2;
- }
- }
-}
-
-static void kv_dpm_power_level_enabled_for_throttle(struct amdgpu_device *adev,
- u32 index, bool enable)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
-
- pi->graphics_level[index].EnabledForThrottle = enable ? 1 : 0;
-}
-
-static int kv_calculate_ds_divider(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- u32 sclk_in_sr = 10000; /* ??? */
- u32 i;
-
- if (pi->lowest_valid > pi->highest_valid)
- return -EINVAL;
-
- for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
- pi->graphics_level[i].DeepSleepDivId =
- kv_get_sleep_divider_id_from_clock(adev,
- be32_to_cpu(pi->graphics_level[i].SclkFrequency),
- sclk_in_sr);
- }
- return 0;
-}
-
-static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- u32 i;
- bool force_high;
- struct amdgpu_clock_and_voltage_limits *max_limits =
- &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
- u32 mclk = max_limits->mclk;
-
- if (pi->lowest_valid > pi->highest_valid)
- return -EINVAL;
-
- if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) {
- for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
- pi->graphics_level[i].GnbSlow = 1;
- pi->graphics_level[i].ForceNbPs1 = 0;
- pi->graphics_level[i].UpH = 0;
- }
-
- if (!pi->sys_info.nb_dpm_enable)
- return 0;
-
- force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) ||
- (adev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start);
-
- if (force_high) {
- for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
- pi->graphics_level[i].GnbSlow = 0;
- } else {
- if (pi->battery_state)
- pi->graphics_level[0].ForceNbPs1 = 1;
-
- pi->graphics_level[1].GnbSlow = 0;
- pi->graphics_level[2].GnbSlow = 0;
- pi->graphics_level[3].GnbSlow = 0;
- pi->graphics_level[4].GnbSlow = 0;
- }
- } else {
- for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
- pi->graphics_level[i].GnbSlow = 1;
- pi->graphics_level[i].ForceNbPs1 = 0;
- pi->graphics_level[i].UpH = 0;
- }
-
- if (pi->sys_info.nb_dpm_enable && pi->battery_state) {
- pi->graphics_level[pi->lowest_valid].UpH = 0x28;
- pi->graphics_level[pi->lowest_valid].GnbSlow = 0;
- if (pi->lowest_valid != pi->highest_valid)
- pi->graphics_level[pi->lowest_valid].ForceNbPs1 = 1;
- }
- }
- return 0;
-}
-
-static int kv_calculate_dpm_settings(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- u32 i;
-
- if (pi->lowest_valid > pi->highest_valid)
- return -EINVAL;
-
- for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
- pi->graphics_level[i].DisplayWatermark = (i == pi->highest_valid) ? 1 : 0;
-
- return 0;
-}
-
-static void kv_init_graphics_levels(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- u32 i;
- struct amdgpu_clock_voltage_dependency_table *table =
- &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
-
- if (table && table->count) {
- u32 vid_2bit;
-
- pi->graphics_dpm_level_count = 0;
- for (i = 0; i < table->count; i++) {
- if (pi->high_voltage_t &&
- (pi->high_voltage_t <
- kv_convert_8bit_index_to_voltage(adev, table->entries[i].v)))
- break;
-
- kv_set_divider_value(adev, i, table->entries[i].clk);
- vid_2bit = kv_convert_vid7_to_vid2(adev,
- &pi->sys_info.vid_mapping_table,
- table->entries[i].v);
- kv_set_vid(adev, i, vid_2bit);
- kv_set_at(adev, i, pi->at[i]);
- kv_dpm_power_level_enabled_for_throttle(adev, i, true);
- pi->graphics_dpm_level_count++;
- }
- } else {
- struct sumo_sclk_voltage_mapping_table *table =
- &pi->sys_info.sclk_voltage_mapping_table;
-
- pi->graphics_dpm_level_count = 0;
- for (i = 0; i < table->num_max_dpm_entries; i++) {
- if (pi->high_voltage_t &&
- pi->high_voltage_t <
- kv_convert_2bit_index_to_voltage(adev, table->entries[i].vid_2bit))
- break;
-
- kv_set_divider_value(adev, i, table->entries[i].sclk_frequency);
- kv_set_vid(adev, i, table->entries[i].vid_2bit);
- kv_set_at(adev, i, pi->at[i]);
- kv_dpm_power_level_enabled_for_throttle(adev, i, true);
- pi->graphics_dpm_level_count++;
- }
- }
-
- for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++)
- kv_dpm_power_level_enable(adev, i, false);
-}
-
-static void kv_enable_new_levels(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- u32 i;
-
- for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) {
- if (i >= pi->lowest_valid && i <= pi->highest_valid)
- kv_dpm_power_level_enable(adev, i, true);
- }
-}
-
-static int kv_set_enabled_level(struct amdgpu_device *adev, u32 level)
-{
- u32 new_mask = (1 << level);
-
- return amdgpu_kv_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_SCLKDPM_SetEnabledMask,
- new_mask);
-}
-
-static int kv_set_enabled_levels(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- u32 i, new_mask = 0;
-
- for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
- new_mask |= (1 << i);
-
- return amdgpu_kv_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_SCLKDPM_SetEnabledMask,
- new_mask);
-}
-
-static void kv_program_nbps_index_settings(struct amdgpu_device *adev,
- struct amdgpu_ps *new_rps)
-{
- struct kv_ps *new_ps = kv_get_ps(new_rps);
- struct kv_power_info *pi = kv_get_pi(adev);
- u32 nbdpmconfig1;
-
- if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS)
- return;
-
- if (pi->sys_info.nb_dpm_enable) {
- nbdpmconfig1 = RREG32_SMC(ixNB_DPM_CONFIG_1);
- nbdpmconfig1 &= ~(NB_DPM_CONFIG_1__Dpm0PgNbPsLo_MASK |
- NB_DPM_CONFIG_1__Dpm0PgNbPsHi_MASK |
- NB_DPM_CONFIG_1__DpmXNbPsLo_MASK |
- NB_DPM_CONFIG_1__DpmXNbPsHi_MASK);
- nbdpmconfig1 |= (new_ps->dpm0_pg_nb_ps_lo << NB_DPM_CONFIG_1__Dpm0PgNbPsLo__SHIFT) |
- (new_ps->dpm0_pg_nb_ps_hi << NB_DPM_CONFIG_1__Dpm0PgNbPsHi__SHIFT) |
- (new_ps->dpmx_nb_ps_lo << NB_DPM_CONFIG_1__DpmXNbPsLo__SHIFT) |
- (new_ps->dpmx_nb_ps_hi << NB_DPM_CONFIG_1__DpmXNbPsHi__SHIFT);
- WREG32_SMC(ixNB_DPM_CONFIG_1, nbdpmconfig1);
- }
-}
-
-static int kv_set_thermal_temperature_range(struct amdgpu_device *adev,
- int min_temp, int max_temp)
-{
- int low_temp = 0 * 1000;
- int high_temp = 255 * 1000;
- u32 tmp;
-
- if (low_temp < min_temp)
- low_temp = min_temp;
- if (high_temp > max_temp)
- high_temp = max_temp;
- if (high_temp < low_temp) {
- DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
- return -EINVAL;
- }
-
- tmp = RREG32_SMC(ixCG_THERMAL_INT_CTRL);
- tmp &= ~(CG_THERMAL_INT_CTRL__DIG_THERM_INTH_MASK |
- CG_THERMAL_INT_CTRL__DIG_THERM_INTL_MASK);
- tmp |= ((49 + (high_temp / 1000)) << CG_THERMAL_INT_CTRL__DIG_THERM_INTH__SHIFT) |
- ((49 + (low_temp / 1000)) << CG_THERMAL_INT_CTRL__DIG_THERM_INTL__SHIFT);
- WREG32_SMC(ixCG_THERMAL_INT_CTRL, tmp);
-
- adev->pm.dpm.thermal.min_temp = low_temp;
- adev->pm.dpm.thermal.max_temp = high_temp;
-
- return 0;
-}
-
-union igp_info {
- struct _ATOM_INTEGRATED_SYSTEM_INFO info;
- struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
- struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5;
- struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
- struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
- struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
-};
-
-static int kv_parse_sys_info_table(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- struct amdgpu_mode_info *mode_info = &adev->mode_info;
- int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
- union igp_info *igp_info;
- u8 frev, crev;
- u16 data_offset;
- int i;
-
- if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
- &frev, &crev, &data_offset)) {
- igp_info = (union igp_info *)(mode_info->atom_context->bios +
- data_offset);
-
- if (crev != 8) {
- DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
- return -EINVAL;
- }
- pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock);
- pi->sys_info.bootup_uma_clk = le32_to_cpu(igp_info->info_8.ulBootUpUMAClock);
- pi->sys_info.bootup_nb_voltage_index =
- le16_to_cpu(igp_info->info_8.usBootUpNBVoltage);
- if (igp_info->info_8.ucHtcTmpLmt == 0)
- pi->sys_info.htc_tmp_lmt = 203;
- else
- pi->sys_info.htc_tmp_lmt = igp_info->info_8.ucHtcTmpLmt;
- if (igp_info->info_8.ucHtcHystLmt == 0)
- pi->sys_info.htc_hyst_lmt = 5;
- else
- pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt;
- if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) {
- DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n");
- }
-
- if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3))
- pi->sys_info.nb_dpm_enable = true;
- else
- pi->sys_info.nb_dpm_enable = false;
-
- for (i = 0; i < KV_NUM_NBPSTATES; i++) {
- pi->sys_info.nbp_memory_clock[i] =
- le32_to_cpu(igp_info->info_8.ulNbpStateMemclkFreq[i]);
- pi->sys_info.nbp_n_clock[i] =
- le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]);
- }
- if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) &
- SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS)
- pi->caps_enable_dfs_bypass = true;
-
- sumo_construct_sclk_voltage_mapping_table(adev,
- &pi->sys_info.sclk_voltage_mapping_table,
- igp_info->info_8.sAvail_SCLK);
-
- sumo_construct_vid_mapping_table(adev,
- &pi->sys_info.vid_mapping_table,
- igp_info->info_8.sAvail_SCLK);
-
- kv_construct_max_power_limits_table(adev,
- &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
- }
- return 0;
-}
-
-union power_info {
- struct _ATOM_POWERPLAY_INFO info;
- struct _ATOM_POWERPLAY_INFO_V2 info_2;
- struct _ATOM_POWERPLAY_INFO_V3 info_3;
- struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
- struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
- struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
-};
-
-union pplib_clock_info {
- struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
- struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
- struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
- struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
-};
-
-union pplib_power_state {
- struct _ATOM_PPLIB_STATE v1;
- struct _ATOM_PPLIB_STATE_V2 v2;
-};
-
-static void kv_patch_boot_state(struct amdgpu_device *adev,
- struct kv_ps *ps)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
-
- ps->num_levels = 1;
- ps->levels[0] = pi->boot_pl;
-}
-
-static void kv_parse_pplib_non_clock_info(struct amdgpu_device *adev,
- struct amdgpu_ps *rps,
- struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
- u8 table_rev)
-{
- struct kv_ps *ps = kv_get_ps(rps);
-
- rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
- rps->class = le16_to_cpu(non_clock_info->usClassification);
- rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
-
- if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
- rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
- rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
- } else {
- rps->vclk = 0;
- rps->dclk = 0;
- }
-
- if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
- adev->pm.dpm.boot_ps = rps;
- kv_patch_boot_state(adev, ps);
- }
- if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
- adev->pm.dpm.uvd_ps = rps;
-}
-
-static void kv_parse_pplib_clock_info(struct amdgpu_device *adev,
- struct amdgpu_ps *rps, int index,
- union pplib_clock_info *clock_info)
-{
- struct kv_power_info *pi = kv_get_pi(adev);
- struct kv_ps *ps = kv_get_ps(rps);
- struct kv_pl *pl = &ps->levels[index];
- u32 sclk;
-
- sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
- sclk |= clock_info->sumo.ucEngineClockHigh << 16;
- pl->sclk = sclk;
- pl->vddc_index = clock_info->sumo.vddcIndex;
-
- ps->num_levels = index + 1;
-
- if (pi->caps_sclk_ds) {
- pl->ds_divider_index = 5;
- pl->ss_divider_index = 5;
- }
-}
-
-static int kv_parse_power_table(struct amdgpu_device *adev)
-{
- struct amdgpu_mode_info *mode_info = &adev->mode_info;
- struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
- union pplib_power_state *power_state;
- int i, j, k, non_clock_array_index, clock_array_index;
- union pplib_clock_info *clock_info;
- struct _StateArray *state_array;
- struct _ClockInfoArray *clock_info_array;
- struct _NonClockInfoArray *non_clock_info_array;
- union power_info *power_info;
- int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
- u16 data_offset;
- u8 frev, crev;
- u8 *power_state_offset;
- struct kv_ps *ps;
-
- if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
- &frev, &crev, &data_offset))
- return -EINVAL;
- power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
-
- amdgpu_add_thermal_controller(adev);
-
- state_array = (struct _StateArray *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib.usStateArrayOffset));
- clock_info_array = (struct _ClockInfoArray *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
- non_clock_info_array = (struct _NonClockInfoArray *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
-
- adev->pm.dpm.ps = kcalloc(state_array->ucNumEntries,
- sizeof(struct amdgpu_ps),
- GFP_KERNEL);
- if (!adev->pm.dpm.ps)
- return -ENOMEM;
- power_state_offset = (u8 *)state_array->states;
- for (i = 0; i < state_array->ucNumEntries; i++) {
- u8 *idx;
- power_state = (union pplib_power_state *)power_state_offset;
- non_clock_array_index = power_state->v2.nonClockInfoIndex;
- non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
- &non_clock_info_array->nonClockInfo[non_clock_array_index];
- ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL);
- if (ps == NULL) {
- kfree(adev->pm.dpm.ps);
- return -ENOMEM;
- }
- adev->pm.dpm.ps[i].ps_priv = ps;
- k = 0;
- idx = (u8 *)&power_state->v2.clockInfoIndex[0];
- for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
- clock_array_index = idx[j];
- if (clock_array_index >= clock_info_array->ucNumEntries)
- continue;
- if (k >= SUMO_MAX_HARDWARE_POWERLEVELS)
- break;
- clock_info = (union pplib_clock_info *)
- ((u8 *)&clock_info_array->clockInfo[0] +
- (clock_array_index * clock_info_array->ucEntrySize));
- kv_parse_pplib_clock_info(adev,
- &adev->pm.dpm.ps[i], k,
- clock_info);
- k++;
- }
- kv_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
- non_clock_info,
- non_clock_info_array->ucEntrySize);
- power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
- }
- adev->pm.dpm.num_ps = state_array->ucNumEntries;
-
- /* fill in the vce power states */
- for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
- u32 sclk;
- clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
- clock_info = (union pplib_clock_info *)
- &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
- sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
- sclk |= clock_info->sumo.ucEngineClockHigh << 16;
- adev->pm.dpm.vce_states[i].sclk = sclk;
- adev->pm.dpm.vce_states[i].mclk = 0;
- }
-
- return 0;
-}
-
-static int kv_dpm_init(struct amdgpu_device *adev)
-{
- struct kv_power_info *pi;
- int ret, i;
-
- pi = kzalloc(sizeof(struct kv_power_info), GFP_KERNEL);
- if (pi == NULL)
- return -ENOMEM;
- adev->pm.dpm.priv = pi;
-
- ret = amdgpu_get_platform_caps(adev);
- if (ret)
- return ret;
-
- ret = amdgpu_parse_extended_power_table(adev);
- if (ret)
- return ret;
-
- for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
- pi->at[i] = TRINITY_AT_DFLT;
-
- pi->sram_end = SMC_RAM_END;
-
- pi->enable_nb_dpm = true;
-
- pi->caps_power_containment = true;
- pi->caps_cac = true;
- pi->enable_didt = false;
- if (pi->enable_didt) {
- pi->caps_sq_ramping = true;
- pi->caps_db_ramping = true;
- pi->caps_td_ramping = true;
- pi->caps_tcp_ramping = true;
- }
-
- if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
- pi->caps_sclk_ds = true;
- else
- pi->caps_sclk_ds = false;
-
- pi->enable_auto_thermal_throttling = true;
- pi->disable_nb_ps3_in_battery = false;
- if (amdgpu_bapm == 0)
- pi->bapm_enable = false;
- else
- pi->bapm_enable = true;
- pi->voltage_drop_t = 0;
- pi->caps_sclk_throttle_low_notification = false;
- pi->caps_fps = false; /* true? */
- pi->caps_uvd_pg = (adev->pg_flags & AMD_PG_SUPPORT_UVD) ? true : false;
- pi->caps_uvd_dpm = true;
- pi->caps_vce_pg = (adev->pg_flags & AMD_PG_SUPPORT_VCE) ? true : false;
- pi->caps_samu_pg = (adev->pg_flags & AMD_PG_SUPPORT_SAMU) ? true : false;
- pi->caps_acp_pg = (adev->pg_flags & AMD_PG_SUPPORT_ACP) ? true : false;
- pi->caps_stable_p_state = false;
-
- ret = kv_parse_sys_info_table(adev);
- if (ret)
- return ret;
-
- kv_patch_voltage_values(adev);
- kv_construct_boot_state(adev);
-
- ret = kv_parse_power_table(adev);
- if (ret)
- return ret;
-
- pi->enable_dpm = true;
-
- return 0;
-}
-
-static void
-kv_dpm_debugfs_print_current_performance_level(void *handle,
- struct seq_file *m)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct kv_power_info *pi = kv_get_pi(adev);
- u32 current_index =
- (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
- TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
- TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
- u32 sclk, tmp;
- u16 vddc;
-
- if (current_index >= SMU__NUM_SCLK_DPM_STATE) {
- seq_printf(m, "invalid dpm profile %d\n", current_index);
- } else {
- sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency);
- tmp = (RREG32_SMC(ixSMU_VOLTAGE_STATUS) &
- SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK) >>
- SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT;
- vddc = kv_convert_8bit_index_to_voltage(adev, (u16)tmp);
- seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en");
- seq_printf(m, "vce %sabled\n", pi->vce_power_gated ? "dis" : "en");
- seq_printf(m, "power level %d sclk: %u vddc: %u\n",
- current_index, sclk, vddc);
- }
-}
-
-static void
-kv_dpm_print_power_state(void *handle, void *request_ps)
-{
- int i;
- struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps;
- struct kv_ps *ps = kv_get_ps(rps);
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- amdgpu_dpm_print_class_info(rps->class, rps->class2);
- amdgpu_dpm_print_cap_info(rps->caps);
- printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
- for (i = 0; i < ps->num_levels; i++) {
- struct kv_pl *pl = &ps->levels[i];
- printk("\t\tpower level %d sclk: %u vddc: %u\n",
- i, pl->sclk,
- kv_convert_8bit_index_to_voltage(adev, pl->vddc_index));
- }
- amdgpu_dpm_print_ps_status(adev, rps);
-}
-
-static void kv_dpm_fini(struct amdgpu_device *adev)
-{
- int i;
-
- for (i = 0; i < adev->pm.dpm.num_ps; i++) {
- kfree(adev->pm.dpm.ps[i].ps_priv);
- }
- kfree(adev->pm.dpm.ps);
- kfree(adev->pm.dpm.priv);
- amdgpu_free_extended_power_table(adev);
-}
-
-static void kv_dpm_display_configuration_changed(void *handle)
-{
-
-}
-
-static u32 kv_dpm_get_sclk(void *handle, bool low)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct kv_power_info *pi = kv_get_pi(adev);
- struct kv_ps *requested_state = kv_get_ps(&pi->requested_rps);
-
- if (low)
- return requested_state->levels[0].sclk;
- else
- return requested_state->levels[requested_state->num_levels - 1].sclk;
-}
-
-static u32 kv_dpm_get_mclk(void *handle, bool low)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct kv_power_info *pi = kv_get_pi(adev);
-
- return pi->sys_info.bootup_uma_clk;
-}
-
-/* get temperature in millidegrees */
-static int kv_dpm_get_temp(void *handle)
-{
- u32 temp;
- int actual_temp = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- temp = RREG32_SMC(0xC0300E0C);
-
- if (temp)
- actual_temp = (temp / 8) - 49;
- else
- actual_temp = 0;
-
- actual_temp = actual_temp * 1000;
-
- return actual_temp;
-}
-
-static int kv_dpm_early_init(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- adev->powerplay.pp_funcs = &kv_dpm_funcs;
- adev->powerplay.pp_handle = adev;
- kv_dpm_set_irq_funcs(adev);
-
- return 0;
-}
-
-static int kv_dpm_late_init(void *handle)
-{
- /* powerdown unused blocks for now */
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (!adev->pm.dpm_enabled)
- return 0;
-
- kv_dpm_powergate_acp(adev, true);
- kv_dpm_powergate_samu(adev, true);
-
- return 0;
-}
-
-static int kv_dpm_sw_init(void *handle)
-{
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 230,
- &adev->pm.dpm.thermal.irq);
- if (ret)
- return ret;
-
- ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 231,
- &adev->pm.dpm.thermal.irq);
- if (ret)
- return ret;
-
- /* default to balanced state */
- adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
- adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
- adev->pm.dpm.forced_level = AMD_DPM_FORCED_LEVEL_AUTO;
- adev->pm.default_sclk = adev->clock.default_sclk;
- adev->pm.default_mclk = adev->clock.default_mclk;
- adev->pm.current_sclk = adev->clock.default_sclk;
- adev->pm.current_mclk = adev->clock.default_mclk;
- adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
-
- if (amdgpu_dpm == 0)
- return 0;
-
- INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
- mutex_lock(&adev->pm.mutex);
- ret = kv_dpm_init(adev);
- if (ret)
- goto dpm_failed;
- adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
- if (amdgpu_dpm == 1)
- amdgpu_pm_print_power_states(adev);
- mutex_unlock(&adev->pm.mutex);
- DRM_INFO("amdgpu: dpm initialized\n");
-
- return 0;
-
-dpm_failed:
- kv_dpm_fini(adev);
- mutex_unlock(&adev->pm.mutex);
- DRM_ERROR("amdgpu: dpm initialization failed\n");
- return ret;
-}
-
-static int kv_dpm_sw_fini(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- flush_work(&adev->pm.dpm.thermal.work);
-
- mutex_lock(&adev->pm.mutex);
- kv_dpm_fini(adev);
- mutex_unlock(&adev->pm.mutex);
-
- return 0;
-}
-
-static int kv_dpm_hw_init(void *handle)
-{
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (!amdgpu_dpm)
- return 0;
-
- mutex_lock(&adev->pm.mutex);
- kv_dpm_setup_asic(adev);
- ret = kv_dpm_enable(adev);
- if (ret)
- adev->pm.dpm_enabled = false;
- else
- adev->pm.dpm_enabled = true;
- mutex_unlock(&adev->pm.mutex);
- amdgpu_pm_compute_clocks(adev);
- return ret;
-}
-
-static int kv_dpm_hw_fini(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->pm.dpm_enabled) {
- mutex_lock(&adev->pm.mutex);
- kv_dpm_disable(adev);
- mutex_unlock(&adev->pm.mutex);
- }
-
- return 0;
-}
-
-static int kv_dpm_suspend(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->pm.dpm_enabled) {
- mutex_lock(&adev->pm.mutex);
- /* disable dpm */
- kv_dpm_disable(adev);
- /* reset the power state */
- adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
- mutex_unlock(&adev->pm.mutex);
- }
- return 0;
-}
-
-static int kv_dpm_resume(void *handle)
-{
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->pm.dpm_enabled) {
- /* asic init will reset to the boot state */
- mutex_lock(&adev->pm.mutex);
- kv_dpm_setup_asic(adev);
- ret = kv_dpm_enable(adev);
- if (ret)
- adev->pm.dpm_enabled = false;
- else
- adev->pm.dpm_enabled = true;
- mutex_unlock(&adev->pm.mutex);
- if (adev->pm.dpm_enabled)
- amdgpu_pm_compute_clocks(adev);
- }
- return 0;
-}
-
-static bool kv_dpm_is_idle(void *handle)
-{
- return true;
-}
-
-static int kv_dpm_wait_for_idle(void *handle)
-{
- return 0;
-}
-
-
-static int kv_dpm_soft_reset(void *handle)
-{
- return 0;
-}
-
-static int kv_dpm_set_interrupt_state(struct amdgpu_device *adev,
- struct amdgpu_irq_src *src,
- unsigned type,
- enum amdgpu_interrupt_state state)
-{
- u32 cg_thermal_int;
-
- switch (type) {
- case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH:
- switch (state) {
- case AMDGPU_IRQ_STATE_DISABLE:
- cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL);
- cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
- WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int);
- break;
- case AMDGPU_IRQ_STATE_ENABLE:
- cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL);
- cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
- WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int);
- break;
- default:
- break;
- }
- break;
-
- case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW:
- switch (state) {
- case AMDGPU_IRQ_STATE_DISABLE:
- cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL);
- cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
- WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int);
- break;
- case AMDGPU_IRQ_STATE_ENABLE:
- cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL);
- cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
- WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int);
- break;
- default:
- break;
- }
- break;
-
- default:
- break;
- }
- return 0;
-}
-
-static int kv_dpm_process_interrupt(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
-{
- bool queue_thermal = false;
-
- if (entry == NULL)
- return -EINVAL;
-
- switch (entry->src_id) {
- case 230: /* thermal low to high */
- DRM_DEBUG("IH: thermal low to high\n");
- adev->pm.dpm.thermal.high_to_low = false;
- queue_thermal = true;
- break;
- case 231: /* thermal high to low */
- DRM_DEBUG("IH: thermal high to low\n");
- adev->pm.dpm.thermal.high_to_low = true;
- queue_thermal = true;
- break;
- default:
- break;
- }
-
- if (queue_thermal)
- schedule_work(&adev->pm.dpm.thermal.work);
-
- return 0;
-}
-
-static int kv_dpm_set_clockgating_state(void *handle,
- enum amd_clockgating_state state)
-{
- return 0;
-}
-
-static int kv_dpm_set_powergating_state(void *handle,
- enum amd_powergating_state state)
-{
- return 0;
-}
-
-static inline bool kv_are_power_levels_equal(const struct kv_pl *kv_cpl1,
- const struct kv_pl *kv_cpl2)
-{
- return ((kv_cpl1->sclk == kv_cpl2->sclk) &&
- (kv_cpl1->vddc_index == kv_cpl2->vddc_index) &&
- (kv_cpl1->ds_divider_index == kv_cpl2->ds_divider_index) &&
- (kv_cpl1->force_nbp_state == kv_cpl2->force_nbp_state));
-}
-
-static int kv_check_state_equal(void *handle,
- void *current_ps,
- void *request_ps,
- bool *equal)
-{
- struct kv_ps *kv_cps;
- struct kv_ps *kv_rps;
- int i;
- struct amdgpu_ps *cps = (struct amdgpu_ps *)current_ps;
- struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
- return -EINVAL;
-
- kv_cps = kv_get_ps(cps);
- kv_rps = kv_get_ps(rps);
-
- if (kv_cps == NULL) {
- *equal = false;
- return 0;
- }
-
- if (kv_cps->num_levels != kv_rps->num_levels) {
- *equal = false;
- return 0;
- }
-
- for (i = 0; i < kv_cps->num_levels; i++) {
- if (!kv_are_power_levels_equal(&(kv_cps->levels[i]),
- &(kv_rps->levels[i]))) {
- *equal = false;
- return 0;
- }
- }
-
- /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
- *equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk));
- *equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk));
-
- return 0;
-}
-
-static int kv_dpm_read_sensor(void *handle, int idx,
- void *value, int *size)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct kv_power_info *pi = kv_get_pi(adev);
- uint32_t sclk;
- u32 pl_index =
- (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
- TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
- TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
-
- /* size must be at least 4 bytes for all sensors */
- if (*size < 4)
- return -EINVAL;
-
- switch (idx) {
- case AMDGPU_PP_SENSOR_GFX_SCLK:
- if (pl_index < SMU__NUM_SCLK_DPM_STATE) {
- sclk = be32_to_cpu(
- pi->graphics_level[pl_index].SclkFrequency);
- *((uint32_t *)value) = sclk;
- *size = 4;
- return 0;
- }
- return -EINVAL;
- case AMDGPU_PP_SENSOR_GPU_TEMP:
- *((uint32_t *)value) = kv_dpm_get_temp(adev);
- *size = 4;
- return 0;
- default:
- return -EINVAL;
- }
-}
-
-static int kv_set_powergating_by_smu(void *handle,
- uint32_t block_type, bool gate)
-{
- switch (block_type) {
- case AMD_IP_BLOCK_TYPE_UVD:
- kv_dpm_powergate_uvd(handle, gate);
- break;
- case AMD_IP_BLOCK_TYPE_VCE:
- kv_dpm_powergate_vce(handle, gate);
- break;
- default:
- break;
- }
- return 0;
-}
-
-static const struct amd_ip_funcs kv_dpm_ip_funcs = {
- .name = "kv_dpm",
- .early_init = kv_dpm_early_init,
- .late_init = kv_dpm_late_init,
- .sw_init = kv_dpm_sw_init,
- .sw_fini = kv_dpm_sw_fini,
- .hw_init = kv_dpm_hw_init,
- .hw_fini = kv_dpm_hw_fini,
- .suspend = kv_dpm_suspend,
- .resume = kv_dpm_resume,
- .is_idle = kv_dpm_is_idle,
- .wait_for_idle = kv_dpm_wait_for_idle,
- .soft_reset = kv_dpm_soft_reset,
- .set_clockgating_state = kv_dpm_set_clockgating_state,
- .set_powergating_state = kv_dpm_set_powergating_state,
-};
-
-const struct amdgpu_ip_block_version kv_smu_ip_block =
-{
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &kv_dpm_ip_funcs,
-};
-
-static const struct amd_pm_funcs kv_dpm_funcs = {
- .pre_set_power_state = &kv_dpm_pre_set_power_state,
- .set_power_state = &kv_dpm_set_power_state,
- .post_set_power_state = &kv_dpm_post_set_power_state,
- .display_configuration_changed = &kv_dpm_display_configuration_changed,
- .get_sclk = &kv_dpm_get_sclk,
- .get_mclk = &kv_dpm_get_mclk,
- .print_power_state = &kv_dpm_print_power_state,
- .debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level,
- .force_performance_level = &kv_dpm_force_performance_level,
- .set_powergating_by_smu = kv_set_powergating_by_smu,
- .enable_bapm = &kv_dpm_enable_bapm,
- .get_vce_clock_state = amdgpu_get_vce_clock_state,
- .check_state_equal = kv_check_state_equal,
- .read_sensor = &kv_dpm_read_sensor,
-};
-
-static const struct amdgpu_irq_src_funcs kv_dpm_irq_funcs = {
- .set = kv_dpm_set_interrupt_state,
- .process = kv_dpm_process_interrupt,
-};
-
-static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev)
-{
- adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
- adev->pm.dpm.thermal.irq.funcs = &kv_dpm_irq_funcs;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.h b/drivers/gpu/drm/amd/amdgpu/kv_dpm.h
deleted file mode 100644
index 6df0ed41317c..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.h
+++ /dev/null
@@ -1,229 +0,0 @@
-/*
- * Copyright 2013 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef __KV_DPM_H__
-#define __KV_DPM_H__
-
-#define SMU__NUM_SCLK_DPM_STATE 8
-#define SMU__NUM_MCLK_DPM_LEVELS 4
-#define SMU__NUM_LCLK_DPM_LEVELS 8
-#define SMU__NUM_PCIE_DPM_LEVELS 0 /* ??? */
-#include "smu7_fusion.h"
-#include "ppsmc.h"
-
-#define SUMO_MAX_HARDWARE_POWERLEVELS 5
-
-#define SUMO_MAX_NUMBER_VOLTAGES 4
-
-struct sumo_vid_mapping_entry {
- u16 vid_2bit;
- u16 vid_7bit;
-};
-
-struct sumo_vid_mapping_table {
- u32 num_entries;
- struct sumo_vid_mapping_entry entries[SUMO_MAX_NUMBER_VOLTAGES];
-};
-
-struct sumo_sclk_voltage_mapping_entry {
- u32 sclk_frequency;
- u16 vid_2bit;
- u16 rsv;
-};
-
-struct sumo_sclk_voltage_mapping_table {
- u32 num_max_dpm_entries;
- struct sumo_sclk_voltage_mapping_entry entries[SUMO_MAX_HARDWARE_POWERLEVELS];
-};
-
-#define TRINITY_AT_DFLT 30
-
-#define KV_NUM_NBPSTATES 4
-
-enum kv_pt_config_reg_type {
- KV_CONFIGREG_MMR = 0,
- KV_CONFIGREG_SMC_IND,
- KV_CONFIGREG_DIDT_IND,
- KV_CONFIGREG_CACHE,
- KV_CONFIGREG_MAX
-};
-
-struct kv_pt_config_reg {
- u32 offset;
- u32 mask;
- u32 shift;
- u32 value;
- enum kv_pt_config_reg_type type;
-};
-
-struct kv_lcac_config_values {
- u32 block_id;
- u32 signal_id;
- u32 t;
-};
-
-struct kv_lcac_config_reg {
- u32 cntl;
- u32 block_mask;
- u32 block_shift;
- u32 signal_mask;
- u32 signal_shift;
- u32 t_mask;
- u32 t_shift;
- u32 enable_mask;
- u32 enable_shift;
-};
-
-struct kv_pl {
- u32 sclk;
- u8 vddc_index;
- u8 ds_divider_index;
- u8 ss_divider_index;
- u8 allow_gnb_slow;
- u8 force_nbp_state;
- u8 display_wm;
- u8 vce_wm;
-};
-
-struct kv_ps {
- struct kv_pl levels[SUMO_MAX_HARDWARE_POWERLEVELS];
- u32 num_levels;
- bool need_dfs_bypass;
- u8 dpm0_pg_nb_ps_lo;
- u8 dpm0_pg_nb_ps_hi;
- u8 dpmx_nb_ps_lo;
- u8 dpmx_nb_ps_hi;
-};
-
-struct kv_sys_info {
- u32 bootup_uma_clk;
- u32 bootup_sclk;
- u32 dentist_vco_freq;
- u32 nb_dpm_enable;
- u32 nbp_memory_clock[KV_NUM_NBPSTATES];
- u32 nbp_n_clock[KV_NUM_NBPSTATES];
- u16 bootup_nb_voltage_index;
- u8 htc_tmp_lmt;
- u8 htc_hyst_lmt;
- struct sumo_sclk_voltage_mapping_table sclk_voltage_mapping_table;
- struct sumo_vid_mapping_table vid_mapping_table;
- u32 uma_channel_number;
-};
-
-struct kv_power_info {
- u32 at[SUMO_MAX_HARDWARE_POWERLEVELS];
- u32 voltage_drop_t;
- struct kv_sys_info sys_info;
- struct kv_pl boot_pl;
- bool enable_nb_ps_policy;
- bool disable_nb_ps3_in_battery;
- bool video_start;
- bool battery_state;
- u32 lowest_valid;
- u32 highest_valid;
- u16 high_voltage_t;
- bool cac_enabled;
- bool bapm_enable;
- /* smc offsets */
- u32 sram_end;
- u32 dpm_table_start;
- u32 soft_regs_start;
- /* dpm SMU tables */
- u8 graphics_dpm_level_count;
- u8 uvd_level_count;
- u8 vce_level_count;
- u8 acp_level_count;
- u8 samu_level_count;
- u16 fps_high_t;
- SMU7_Fusion_GraphicsLevel graphics_level[SMU__NUM_SCLK_DPM_STATE];
- SMU7_Fusion_ACPILevel acpi_level;
- SMU7_Fusion_UvdLevel uvd_level[SMU7_MAX_LEVELS_UVD];
- SMU7_Fusion_ExtClkLevel vce_level[SMU7_MAX_LEVELS_VCE];
- SMU7_Fusion_ExtClkLevel acp_level[SMU7_MAX_LEVELS_ACP];
- SMU7_Fusion_ExtClkLevel samu_level[SMU7_MAX_LEVELS_SAMU];
- u8 uvd_boot_level;
- u8 vce_boot_level;
- u8 acp_boot_level;
- u8 samu_boot_level;
- u8 uvd_interval;
- u8 vce_interval;
- u8 acp_interval;
- u8 samu_interval;
- u8 graphics_boot_level;
- u8 graphics_interval;
- u8 graphics_therm_throttle_enable;
- u8 graphics_voltage_change_enable;
- u8 graphics_clk_slow_enable;
- u8 graphics_clk_slow_divider;
- u8 fps_low_t;
- u32 low_sclk_interrupt_t;
- bool uvd_power_gated;
- bool vce_power_gated;
- bool acp_power_gated;
- bool samu_power_gated;
- bool nb_dpm_enabled;
- /* flags */
- bool enable_didt;
- bool enable_dpm;
- bool enable_auto_thermal_throttling;
- bool enable_nb_dpm;
- /* caps */
- bool caps_cac;
- bool caps_power_containment;
- bool caps_sq_ramping;
- bool caps_db_ramping;
- bool caps_td_ramping;
- bool caps_tcp_ramping;
- bool caps_sclk_throttle_low_notification;
- bool caps_fps;
- bool caps_uvd_dpm;
- bool caps_uvd_pg;
- bool caps_vce_pg;
- bool caps_samu_pg;
- bool caps_acp_pg;
- bool caps_stable_p_state;
- bool caps_enable_dfs_bypass;
- bool caps_sclk_ds;
- struct amdgpu_ps current_rps;
- struct kv_ps current_ps;
- struct amdgpu_ps requested_rps;
- struct kv_ps requested_ps;
-};
-
-/* XXX are these ok? */
-#define KV_TEMP_RANGE_MIN (90 * 1000)
-#define KV_TEMP_RANGE_MAX (120 * 1000)
-
-/* kv_smc.c */
-int amdgpu_kv_notify_message_to_smu(struct amdgpu_device *adev, u32 id);
-int amdgpu_kv_dpm_get_enable_mask(struct amdgpu_device *adev, u32 *enable_mask);
-int amdgpu_kv_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
- PPSMC_Msg msg, u32 parameter);
-int amdgpu_kv_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
- u32 *value, u32 limit);
-int amdgpu_kv_smc_dpm_enable(struct amdgpu_device *adev, bool enable);
-int amdgpu_kv_smc_bapm_enable(struct amdgpu_device *adev, bool enable);
-int amdgpu_kv_copy_bytes_to_smc(struct amdgpu_device *adev,
- u32 smc_start_address,
- const u8 *src, u32 byte_count, u32 limit);
-
-#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_smc.c b/drivers/gpu/drm/amd/amdgpu/kv_smc.c
deleted file mode 100644
index 2d9ab6b8be66..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/kv_smc.c
+++ /dev/null
@@ -1,218 +0,0 @@
-/*
- * Copyright 2013 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Alex Deucher
- */
-
-#include "amdgpu.h"
-#include "cikd.h"
-#include "kv_dpm.h"
-
-#include "smu/smu_7_0_0_d.h"
-#include "smu/smu_7_0_0_sh_mask.h"
-
-int amdgpu_kv_notify_message_to_smu(struct amdgpu_device *adev, u32 id)
-{
- u32 i;
- u32 tmp = 0;
-
- WREG32(mmSMC_MESSAGE_0, id & SMC_MESSAGE_0__SMC_MSG_MASK);
-
- for (i = 0; i < adev->usec_timeout; i++) {
- if ((RREG32(mmSMC_RESP_0) & SMC_RESP_0__SMC_RESP_MASK) != 0)
- break;
- udelay(1);
- }
- tmp = RREG32(mmSMC_RESP_0) & SMC_RESP_0__SMC_RESP_MASK;
-
- if (tmp != 1) {
- if (tmp == 0xFF)
- return -EINVAL;
- else if (tmp == 0xFE)
- return -EINVAL;
- }
-
- return 0;
-}
-
-int amdgpu_kv_dpm_get_enable_mask(struct amdgpu_device *adev, u32 *enable_mask)
-{
- int ret;
-
- ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SCLKDPM_GetEnabledMask);
-
- if (ret == 0)
- *enable_mask = RREG32_SMC(ixSMC_SYSCON_MSG_ARG_0);
-
- return ret;
-}
-
-int amdgpu_kv_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
- PPSMC_Msg msg, u32 parameter)
-{
-
- WREG32(mmSMC_MSG_ARG_0, parameter);
-
- return amdgpu_kv_notify_message_to_smu(adev, msg);
-}
-
-static int kv_set_smc_sram_address(struct amdgpu_device *adev,
- u32 smc_address, u32 limit)
-{
- if (smc_address & 3)
- return -EINVAL;
- if ((smc_address + 3) > limit)
- return -EINVAL;
-
- WREG32(mmSMC_IND_INDEX_0, smc_address);
- WREG32_P(mmSMC_IND_ACCESS_CNTL, 0,
- ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK);
-
- return 0;
-}
-
-int amdgpu_kv_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
- u32 *value, u32 limit)
-{
- int ret;
-
- ret = kv_set_smc_sram_address(adev, smc_address, limit);
- if (ret)
- return ret;
-
- *value = RREG32(mmSMC_IND_DATA_0);
- return 0;
-}
-
-int amdgpu_kv_smc_dpm_enable(struct amdgpu_device *adev, bool enable)
-{
- if (enable)
- return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DPM_Enable);
- else
- return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DPM_Disable);
-}
-
-int amdgpu_kv_smc_bapm_enable(struct amdgpu_device *adev, bool enable)
-{
- if (enable)
- return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_EnableBAPM);
- else
- return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DisableBAPM);
-}
-
-int amdgpu_kv_copy_bytes_to_smc(struct amdgpu_device *adev,
- u32 smc_start_address,
- const u8 *src, u32 byte_count, u32 limit)
-{
- int ret;
- u32 data, original_data, addr, extra_shift, t_byte, count, mask;
-
- if ((smc_start_address + byte_count) > limit)
- return -EINVAL;
-
- addr = smc_start_address;
- t_byte = addr & 3;
-
- /* RMW for the initial bytes */
- if (t_byte != 0) {
- addr -= t_byte;
-
- ret = kv_set_smc_sram_address(adev, addr, limit);
- if (ret)
- return ret;
-
- original_data = RREG32(mmSMC_IND_DATA_0);
-
- data = 0;
- mask = 0;
- count = 4;
- while (count > 0) {
- if (t_byte > 0) {
- mask = (mask << 8) | 0xff;
- t_byte--;
- } else if (byte_count > 0) {
- data = (data << 8) + *src++;
- byte_count--;
- mask <<= 8;
- } else {
- data <<= 8;
- mask = (mask << 8) | 0xff;
- }
- count--;
- }
-
- data |= original_data & mask;
-
- ret = kv_set_smc_sram_address(adev, addr, limit);
- if (ret)
- return ret;
-
- WREG32(mmSMC_IND_DATA_0, data);
-
- addr += 4;
- }
-
- while (byte_count >= 4) {
- /* SMC address space is BE */
- data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
-
- ret = kv_set_smc_sram_address(adev, addr, limit);
- if (ret)
- return ret;
-
- WREG32(mmSMC_IND_DATA_0, data);
-
- src += 4;
- byte_count -= 4;
- addr += 4;
- }
-
- /* RMW for the final bytes */
- if (byte_count > 0) {
- data = 0;
-
- ret = kv_set_smc_sram_address(adev, addr, limit);
- if (ret)
- return ret;
-
- original_data = RREG32(mmSMC_IND_DATA_0);
-
- extra_shift = 8 * (4 - byte_count);
-
- while (byte_count > 0) {
- /* SMC address space is BE */
- data = (data << 8) + *src++;
- byte_count--;
- }
-
- data <<= extra_shift;
-
- data |= (original_data & ~((~0UL) << extra_shift));
-
- ret = kv_set_smc_sram_address(adev, addr, limit);
- if (ret)
- return ret;
-
- WREG32(mmSMC_IND_DATA_0, data);
- }
- return 0;
-}
-
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
index 4b746584a797..1c22d8393b21 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
@@ -832,7 +832,6 @@ static int mes_v10_1_queue_init(struct amdgpu_device *adev)
static int mes_v10_1_ring_init(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
- int r;
ring = &adev->mes.ring;
@@ -849,11 +848,7 @@ static int mes_v10_1_ring_init(struct amdgpu_device *adev)
ring->no_scheduler = true;
sprintf(ring->name, "mes_%d.%d.%d", ring->me, ring->pipe, ring->queue);
- r = amdgpu_ring_init(adev, ring, 1024, NULL, 0, AMDGPU_RING_PRIO_DEFAULT);
- if (r)
- return r;
-
- return 0;
+ return amdgpu_ring_init(adev, ring, 1024, NULL, 0, AMDGPU_RING_PRIO_DEFAULT);
}
static int mes_v10_1_mqd_sw_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index dffcb93ecee5..f84701c562bf 100755..100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -34,7 +34,7 @@
#define mmDAGB0_CNTL_MISC2_RV 0x008f
#define mmDAGB0_CNTL_MISC2_RV_BASE_IDX 0
-u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev)
+static u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev)
{
u64 base = RREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE);
u64 top = RREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_TOP);
@@ -51,7 +51,7 @@ u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev)
return base;
}
-void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+static void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
uint64_t page_table_base)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
@@ -268,7 +268,7 @@ static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
/* Send no-retry XNACK on fault to suppress VM fault storm. */
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
- !amdgpu_noretry);
+ !adev->gmc.noretry);
WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL,
i * hub->ctx_distance, tmp);
WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
@@ -297,20 +297,19 @@ static void mmhub_v1_0_program_invalidation(struct amdgpu_device *adev)
}
}
-void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
+static void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
bool enable)
{
if (amdgpu_sriov_vf(adev))
return;
if (enable && adev->pg_flags & AMD_PG_SUPPORT_MMHUB) {
- if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_powergating_by_smu)
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GMC, true);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GMC, true);
}
}
-int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
+static int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
{
if (amdgpu_sriov_vf(adev)) {
/*
@@ -338,7 +337,7 @@ int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
return 0;
}
-void mmhub_v1_0_gart_disable(struct amdgpu_device *adev)
+static void mmhub_v1_0_gart_disable(struct amdgpu_device *adev)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
u32 tmp;
@@ -373,7 +372,7 @@ void mmhub_v1_0_gart_disable(struct amdgpu_device *adev)
* @adev: amdgpu_device pointer
* @value: true redirects VM faults to the default page
*/
-void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
+static void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
{
u32 tmp;
@@ -415,7 +414,7 @@ void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL, tmp);
}
-void mmhub_v1_0_init(struct amdgpu_device *adev)
+static void mmhub_v1_0_init(struct amdgpu_device *adev)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
@@ -525,7 +524,7 @@ static void mmhub_v1_0_update_medium_grain_light_sleep(struct amdgpu_device *ade
WREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG, data);
}
-int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
+static int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
enum amd_clockgating_state state)
{
if (amdgpu_sriov_vf(adev))
@@ -549,7 +548,7 @@ int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
return 0;
}
-void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags)
+static void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags)
{
int data, data1;
@@ -781,4 +780,13 @@ const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs = {
.ras_late_init = amdgpu_mmhub_ras_late_init,
.query_ras_error_count = mmhub_v1_0_query_ras_error_count,
.reset_ras_error_count = mmhub_v1_0_reset_ras_error_count,
+ .get_fb_location = mmhub_v1_0_get_fb_location,
+ .init = mmhub_v1_0_init,
+ .gart_enable = mmhub_v1_0_gart_enable,
+ .set_fault_enable_default = mmhub_v1_0_set_fault_enable_default,
+ .gart_disable = mmhub_v1_0_gart_disable,
+ .set_clockgating = mmhub_v1_0_set_clockgating,
+ .get_clockgating = mmhub_v1_0_get_clockgating,
+ .setup_vm_pt_regs = mmhub_v1_0_setup_vm_pt_regs,
+ .update_power_gating = mmhub_v1_0_update_power_gating,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
index c43319e8f945..d77f5b65a618 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
@@ -25,18 +25,4 @@
extern const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs;
-u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev);
-int mmhub_v1_0_gart_enable(struct amdgpu_device *adev);
-void mmhub_v1_0_gart_disable(struct amdgpu_device *adev);
-void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
- bool value);
-void mmhub_v1_0_init(struct amdgpu_device *adev);
-int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
- enum amd_clockgating_state state);
-void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags);
-void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
- bool enable);
-void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
- uint64_t page_table_base);
-
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
index 757fa8e83f5b..2063700f0bc6 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
@@ -36,7 +36,130 @@
#define mmDAGB0_CNTL_MISC2_Sienna_Cichlid 0x0070
#define mmDAGB0_CNTL_MISC2_Sienna_Cichlid_BASE_IDX 0
-void mmhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+static const char *mmhub_client_ids_navi1x[][2] = {
+ [3][0] = "DCEDMC",
+ [4][0] = "DCEVGA",
+ [5][0] = "MP0",
+ [6][0] = "MP1",
+ [13][0] = "VMC",
+ [14][0] = "HDP",
+ [15][0] = "OSS",
+ [16][0] = "VCNU",
+ [17][0] = "JPEG",
+ [18][0] = "VCN",
+ [3][1] = "DCEDMC",
+ [4][1] = "DCEXFC",
+ [5][1] = "DCEVGA",
+ [6][1] = "DCEDWB",
+ [7][1] = "MP0",
+ [8][1] = "MP1",
+ [9][1] = "DBGU1",
+ [10][1] = "DBGU0",
+ [11][1] = "XDP",
+ [14][1] = "HDP",
+ [15][1] = "OSS",
+ [16][1] = "VCNU",
+ [17][1] = "JPEG",
+ [18][1] = "VCN",
+};
+
+static const char *mmhub_client_ids_sienna_cichlid[][2] = {
+ [3][0] = "DCEDMC",
+ [4][0] = "DCEVGA",
+ [5][0] = "MP0",
+ [6][0] = "MP1",
+ [8][0] = "VMC",
+ [9][0] = "VCNU0",
+ [10][0] = "JPEG",
+ [12][0] = "VCNU1",
+ [13][0] = "VCN1",
+ [14][0] = "HDP",
+ [15][0] = "OSS",
+ [32+11][0] = "VCN0",
+ [0][1] = "DBGU0",
+ [1][1] = "DBGU1",
+ [2][1] = "DCEDWB",
+ [3][1] = "DCEDMC",
+ [4][1] = "DCEVGA",
+ [5][1] = "MP0",
+ [6][1] = "MP1",
+ [7][1] = "XDP",
+ [9][1] = "VCNU0",
+ [10][1] = "JPEG",
+ [11][1] = "VCN0",
+ [12][1] = "VCNU1",
+ [13][1] = "VCN1",
+ [14][1] = "HDP",
+ [15][1] = "OSS",
+};
+
+static uint32_t mmhub_v2_0_get_invalidate_req(unsigned int vmid,
+ uint32_t flush_type)
+{
+ u32 req = 0;
+
+ /* invalidate using legacy mode on vmid*/
+ req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ,
+ PER_VMID_INVALIDATE_REQ, 1 << vmid);
+ req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
+ req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
+ req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
+ req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
+ req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
+ req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
+ req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ,
+ CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
+
+ return req;
+}
+
+static void
+mmhub_v2_0_print_l2_protection_fault_status(struct amdgpu_device *adev,
+ uint32_t status)
+{
+ uint32_t cid, rw;
+ const char *mmhub_cid = NULL;
+
+ cid = REG_GET_FIELD(status,
+ MMVM_L2_PROTECTION_FAULT_STATUS, CID);
+ rw = REG_GET_FIELD(status,
+ MMVM_L2_PROTECTION_FAULT_STATUS, RW);
+
+ dev_err(adev->dev,
+ "MMVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
+ status);
+ switch (adev->asic_type) {
+ case CHIP_NAVI10:
+ case CHIP_NAVI12:
+ case CHIP_NAVI14:
+ mmhub_cid = mmhub_client_ids_navi1x[cid][rw];
+ break;
+ case CHIP_SIENNA_CICHLID:
+ case CHIP_NAVY_FLOUNDER:
+ mmhub_cid = mmhub_client_ids_sienna_cichlid[cid][rw];
+ break;
+ default:
+ mmhub_cid = NULL;
+ break;
+ }
+ dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
+ mmhub_cid ? mmhub_cid : "unknown", cid);
+ dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
+ REG_GET_FIELD(status,
+ MMVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
+ dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
+ REG_GET_FIELD(status,
+ MMVM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
+ dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
+ REG_GET_FIELD(status,
+ MMVM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
+ dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
+ REG_GET_FIELD(status,
+ MMVM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
+ dev_err(adev->dev, "\t RW: 0x%x\n", rw);
+}
+
+static void mmhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
uint64_t page_table_base)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
@@ -78,11 +201,6 @@ static void mmhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev)
WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_BOT, 0x00FFFFFF);
if (!amdgpu_sriov_vf(adev)) {
- /*
- * the new L1 policy will block SRIOV guest from writing
- * these regs, and they will be programed at host.
- * so skip programing these regs.
- */
/* Program the system aperture low logical page number. */
WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_LOW_ADDR,
adev->gmc.vram_start >> 18);
@@ -134,6 +252,12 @@ static void mmhub_v2_0_init_cache_regs(struct amdgpu_device *adev)
{
uint32_t tmp;
+ /* These registers are not accessible to VF-SRIOV.
+ * The PF will program them instead.
+ */
+ if (amdgpu_sriov_vf(adev))
+ return;
+
/* Setup L2 cache */
tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL);
tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_CACHE, 1);
@@ -189,6 +313,12 @@ static void mmhub_v2_0_enable_system_domain(struct amdgpu_device *adev)
static void mmhub_v2_0_disable_identity_aperture(struct amdgpu_device *adev)
{
+ /* These registers are not accessible to VF-SRIOV.
+ * The PF will program them instead.
+ */
+ if (amdgpu_sriov_vf(adev))
+ return;
+
WREG32_SOC15(MMHUB, 0,
mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
0xFFFFFFFF);
@@ -239,7 +369,7 @@ static void mmhub_v2_0_setup_vmid_config(struct amdgpu_device *adev)
/* Send no-retry XNACK on fault to suppress VM fault storm. */
tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
- !amdgpu_noretry);
+ !adev->gmc.noretry);
WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_CNTL,
i * hub->ctx_distance, tmp);
WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
@@ -268,7 +398,7 @@ static void mmhub_v2_0_program_invalidation(struct amdgpu_device *adev)
}
}
-int mmhub_v2_0_gart_enable(struct amdgpu_device *adev)
+static int mmhub_v2_0_gart_enable(struct amdgpu_device *adev)
{
/* GART Enable. */
mmhub_v2_0_init_gart_aperture_regs(adev);
@@ -284,7 +414,7 @@ int mmhub_v2_0_gart_enable(struct amdgpu_device *adev)
return 0;
}
-void mmhub_v2_0_gart_disable(struct amdgpu_device *adev)
+static void mmhub_v2_0_gart_disable(struct amdgpu_device *adev)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
u32 tmp;
@@ -315,9 +445,16 @@ void mmhub_v2_0_gart_disable(struct amdgpu_device *adev)
* @adev: amdgpu_device pointer
* @value: true redirects VM faults to the default page
*/
-void mmhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
+static void mmhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
{
u32 tmp;
+
+ /* These registers are not accessible to VF-SRIOV.
+ * The PF will program them instead.
+ */
+ if (amdgpu_sriov_vf(adev))
+ return;
+
tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL);
tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
@@ -351,7 +488,12 @@ void mmhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
WREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL, tmp);
}
-void mmhub_v2_0_init(struct amdgpu_device *adev)
+static const struct amdgpu_vmhub_funcs mmhub_v2_0_vmhub_funcs = {
+ .print_l2_protection_fault_status = mmhub_v2_0_print_l2_protection_fault_status,
+ .get_invalidate_req = mmhub_v2_0_get_invalidate_req,
+};
+
+static void mmhub_v2_0_init(struct amdgpu_device *adev)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
@@ -381,6 +523,16 @@ void mmhub_v2_0_init(struct amdgpu_device *adev)
mmMMVM_INVALIDATE_ENG0_REQ;
hub->eng_addr_distance = mmMMVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
+
+ hub->vm_cntx_cntl_vm_fault = MMVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ MMVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ MMVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ MMVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ MMVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ MMVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ MMVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
+
+ hub->vmhub_funcs = &mmhub_v2_0_vmhub_funcs;
}
static void mmhub_v2_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
@@ -471,7 +623,7 @@ static void mmhub_v2_0_update_medium_grain_light_sleep(struct amdgpu_device *ade
}
}
-int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev,
+static int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev,
enum amd_clockgating_state state)
{
if (amdgpu_sriov_vf(adev))
@@ -495,7 +647,7 @@ int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev,
return 0;
}
-void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u32 *flags)
+static void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u32 *flags)
{
int data, data1;
@@ -528,3 +680,14 @@ void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u32 *flags)
if (data & MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
*flags |= AMD_CG_SUPPORT_MC_LS;
}
+
+const struct amdgpu_mmhub_funcs mmhub_v2_0_funcs = {
+ .ras_late_init = amdgpu_mmhub_ras_late_init,
+ .init = mmhub_v2_0_init,
+ .gart_enable = mmhub_v2_0_gart_enable,
+ .set_fault_enable_default = mmhub_v2_0_set_fault_enable_default,
+ .gart_disable = mmhub_v2_0_gart_disable,
+ .set_clockgating = mmhub_v2_0_set_clockgating,
+ .get_clockgating = mmhub_v2_0_get_clockgating,
+ .setup_vm_pt_regs = mmhub_v2_0_setup_vm_pt_regs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h
index 3ea4344f0315..f80f461d67da 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h
@@ -23,15 +23,6 @@
#ifndef __MMHUB_V2_0_H__
#define __MMHUB_V2_0_H__
-int mmhub_v2_0_gart_enable(struct amdgpu_device *adev);
-void mmhub_v2_0_gart_disable(struct amdgpu_device *adev);
-void mmhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev,
- bool value);
-void mmhub_v2_0_init(struct amdgpu_device *adev);
-int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev,
- enum amd_clockgating_state state);
-void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u32 *flags);
-void mmhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
- uint64_t page_table_base);
+extern const struct amdgpu_mmhub_funcs mmhub_v2_0_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
index 9979f54fef57..66748bb01b52 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
@@ -36,7 +36,7 @@
#define MMHUB_NUM_INSTANCES 2
#define MMHUB_INSTANCE_REGISTER_OFFSET 0x3000
-u64 mmhub_v9_4_get_fb_location(struct amdgpu_device *adev)
+static u64 mmhub_v9_4_get_fb_location(struct amdgpu_device *adev)
{
/* The base should be same b/t 2 mmhubs on Acrturus. Read one here. */
u64 base = RREG32_SOC15(MMHUB, 0, mmVMSHAREDVC0_MC_VM_FB_LOCATION_BASE);
@@ -97,7 +97,7 @@ static void mmhub_v9_4_init_gart_aperture_regs(struct amdgpu_device *adev,
(u32)(adev->gmc.gart_end >> 44));
}
-void mmhub_v9_4_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+static void mmhub_v9_4_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
uint64_t page_table_base)
{
int i;
@@ -330,7 +330,7 @@ static void mmhub_v9_4_setup_vmid_config(struct amdgpu_device *adev, int hubid)
/* Send no-retry XNACK on fault to suppress VM fault storm. */
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
- !amdgpu_noretry);
+ !adev->gmc.noretry);
WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT1_CNTL,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET +
i * hub->ctx_distance, tmp);
@@ -375,7 +375,7 @@ static void mmhub_v9_4_program_invalidation(struct amdgpu_device *adev,
}
}
-int mmhub_v9_4_gart_enable(struct amdgpu_device *adev)
+static int mmhub_v9_4_gart_enable(struct amdgpu_device *adev)
{
int i;
@@ -397,7 +397,7 @@ int mmhub_v9_4_gart_enable(struct amdgpu_device *adev)
return 0;
}
-void mmhub_v9_4_gart_disable(struct amdgpu_device *adev)
+static void mmhub_v9_4_gart_disable(struct amdgpu_device *adev)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
u32 tmp;
@@ -442,7 +442,7 @@ void mmhub_v9_4_gart_disable(struct amdgpu_device *adev)
* @adev: amdgpu_device pointer
* @value: true redirects VM faults to the default page
*/
-void mmhub_v9_4_set_fault_enable_default(struct amdgpu_device *adev, bool value)
+static void mmhub_v9_4_set_fault_enable_default(struct amdgpu_device *adev, bool value)
{
u32 tmp;
int i;
@@ -500,7 +500,7 @@ void mmhub_v9_4_set_fault_enable_default(struct amdgpu_device *adev, bool value)
}
}
-void mmhub_v9_4_init(struct amdgpu_device *adev)
+static void mmhub_v9_4_init(struct amdgpu_device *adev)
{
struct amdgpu_vmhub *hub[MMHUB_NUM_INSTANCES] =
{&adev->vmhub[AMDGPU_MMHUB_0], &adev->vmhub[AMDGPU_MMHUB_1]};
@@ -630,7 +630,7 @@ static void mmhub_v9_4_update_medium_grain_light_sleep(struct amdgpu_device *ade
}
}
-int mmhub_v9_4_set_clockgating(struct amdgpu_device *adev,
+static int mmhub_v9_4_set_clockgating(struct amdgpu_device *adev,
enum amd_clockgating_state state)
{
if (amdgpu_sriov_vf(adev))
@@ -650,7 +650,7 @@ int mmhub_v9_4_set_clockgating(struct amdgpu_device *adev,
return 0;
}
-void mmhub_v9_4_get_clockgating(struct amdgpu_device *adev, u32 *flags)
+static void mmhub_v9_4_get_clockgating(struct amdgpu_device *adev, u32 *flags)
{
int data, data1;
@@ -1624,8 +1624,45 @@ static void mmhub_v9_4_reset_ras_error_count(struct amdgpu_device *adev)
}
}
+static const struct soc15_reg_entry mmhub_v9_4_err_status_regs[] = {
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_ERR_STATUS), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_ERR_STATUS), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_ERR_STATUS), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_ERR_STATUS), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_ERR_STATUS), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_ERR_STATUS), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_ERR_STATUS), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_ERR_STATUS), 0, 0, 0 },
+};
+
+static void mmhub_v9_4_query_ras_error_status(struct amdgpu_device *adev)
+{
+ int i;
+ uint32_t reg_value;
+
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(mmhub_v9_4_err_status_regs); i++) {
+ reg_value =
+ RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v9_4_err_status_regs[i]));
+ if (reg_value)
+ dev_warn(adev->dev, "MMHUB EA err detected at instance: %d, status: 0x%x!\n",
+ i, reg_value);
+ }
+}
+
const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs = {
.ras_late_init = amdgpu_mmhub_ras_late_init,
.query_ras_error_count = mmhub_v9_4_query_ras_error_count,
.reset_ras_error_count = mmhub_v9_4_reset_ras_error_count,
+ .get_fb_location = mmhub_v9_4_get_fb_location,
+ .init = mmhub_v9_4_init,
+ .gart_enable = mmhub_v9_4_gart_enable,
+ .set_fault_enable_default = mmhub_v9_4_set_fault_enable_default,
+ .gart_disable = mmhub_v9_4_gart_disable,
+ .set_clockgating = mmhub_v9_4_set_clockgating,
+ .get_clockgating = mmhub_v9_4_get_clockgating,
+ .setup_vm_pt_regs = mmhub_v9_4_setup_vm_pt_regs,
+ .query_ras_error_status = mmhub_v9_4_query_ras_error_status,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h
index 1b979773776c..92404a8f66f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h
@@ -25,16 +25,4 @@
extern const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs;
-u64 mmhub_v9_4_get_fb_location(struct amdgpu_device *adev);
-int mmhub_v9_4_gart_enable(struct amdgpu_device *adev);
-void mmhub_v9_4_gart_disable(struct amdgpu_device *adev);
-void mmhub_v9_4_set_fault_enable_default(struct amdgpu_device *adev,
- bool value);
-void mmhub_v9_4_init(struct amdgpu_device *adev);
-int mmhub_v9_4_set_clockgating(struct amdgpu_device *adev,
- enum amd_clockgating_state state);
-void mmhub_v9_4_get_clockgating(struct amdgpu_device *adev, u32 *flags);
-void mmhub_v9_4_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
- uint64_t page_table_base);
-
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 5fd67e1cc2a0..f5ce9a9f4cf5 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -238,19 +238,15 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
int timeout = AI_MAILBOX_POLL_FLR_TIMEDOUT;
- int locked;
/* block amdgpu_gpu_recover till msg FLR COMPLETE received,
* otherwise the mailbox msg will be ruined/reseted by
* the VF FLR.
- *
- * we can unlock the lock_reset to allow "amdgpu_job_timedout"
- * to run gpu_recover() after FLR_NOTIFICATION_CMPL received
- * which means host side had finished this VF's FLR.
*/
- locked = mutex_trylock(&adev->lock_reset);
- if (locked)
- adev->in_gpu_reset = true;
+ if (!down_read_trylock(&adev->reset_sem))
+ return;
+
+ atomic_set(&adev->in_gpu_reset, 1);
do {
if (xgpu_ai_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
@@ -261,14 +257,13 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
} while (timeout > 1);
flr_done:
- if (locked) {
- adev->in_gpu_reset = false;
- mutex_unlock(&adev->lock_reset);
- }
+ atomic_set(&adev->in_gpu_reset, 0);
+ up_read(&adev->reset_sem);
/* Trigger recovery for world switch failure if no TDR */
if (amdgpu_device_should_recover_gpu(adev)
- && adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT)
+ && (!amdgpu_device_has_job_running(adev) ||
+ adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT))
amdgpu_device_gpu_recover(adev, NULL);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index ce2bf1fb79ed..666ed99cc14b 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -259,19 +259,15 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
int timeout = NV_MAILBOX_POLL_FLR_TIMEDOUT;
- int locked;
/* block amdgpu_gpu_recover till msg FLR COMPLETE received,
* otherwise the mailbox msg will be ruined/reseted by
* the VF FLR.
- *
- * we can unlock the lock_reset to allow "amdgpu_job_timedout"
- * to run gpu_recover() after FLR_NOTIFICATION_CMPL received
- * which means host side had finished this VF's FLR.
*/
- locked = mutex_trylock(&adev->lock_reset);
- if (locked)
- adev->in_gpu_reset = true;
+ if (!down_read_trylock(&adev->reset_sem))
+ return;
+
+ atomic_set(&adev->in_gpu_reset, 1);
do {
if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
@@ -282,14 +278,13 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
} while (timeout > 1);
flr_done:
- if (locked) {
- adev->in_gpu_reset = false;
- mutex_unlock(&adev->lock_reset);
- }
+ atomic_set(&adev->in_gpu_reset, 0);
+ up_read(&adev->reset_sem);
/* Trigger recovery for world switch failure if no TDR */
if (amdgpu_device_should_recover_gpu(adev)
- && (adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT ||
+ && (!amdgpu_device_has_job_running(adev) ||
+ adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT ||
adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT ||
adev->compute_timeout == MAX_SCHEDULE_TIMEOUT ||
adev->video_timeout == MAX_SCHEDULE_TIMEOUT))
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index 350f1bf063c6..74b1e7dc49a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -306,7 +306,8 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
} else {
WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
}
- navi10_ih_reroute_ih(adev);
+ if (adev->irq.ih1.ring_size)
+ navi10_ih_reroute_ih(adev);
if (unlikely(adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT)) {
if (ih->use_bus_addr) {
@@ -668,19 +669,26 @@ static int navi10_ih_sw_init(void *handle)
adev->irq.ih.use_doorbell = true;
adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
- r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true);
- if (r)
- return r;
+ adev->irq.ih1.ring_size = 0;
+ adev->irq.ih2.ring_size = 0;
- adev->irq.ih1.use_doorbell = true;
- adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1;
+ if (adev->asic_type < CHIP_NAVI10) {
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true);
+ if (r)
+ return r;
- r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
- if (r)
- return r;
+ adev->irq.ih1.use_doorbell = true;
+ adev->irq.ih1.doorbell_index =
+ (adev->doorbell_index.ih + 1) << 1;
+
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
+ if (r)
+ return r;
- adev->irq.ih2.use_doorbell = true;
- adev->irq.ih2.doorbell_index = (adev->doorbell_index.ih + 2) << 1;
+ adev->irq.ih2.use_doorbell = true;
+ adev->irq.ih2.doorbell_index =
+ (adev->doorbell_index.ih + 2) << 1;
+ }
r = amdgpu_irq_init(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index e629156173d3..eadc9526d33f 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -302,6 +302,7 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
uint32_t bif_doorbell_intr_cntl;
struct ras_manager *obj = amdgpu_ras_find_obj(adev, adev->nbio.ras_if);
struct ras_err_data err_data = {0, 0, 0, NULL};
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
if (REG_GET_FIELD(bif_doorbell_intr_cntl,
@@ -312,28 +313,31 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
RAS_CNTLR_INTERRUPT_CLEAR, 1);
WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
- /*
- * clear error status after ras_controller_intr according to
- * hw team and count ue number for query
- */
- nbio_v7_4_query_ras_error_count(adev, &err_data);
-
- /* logging on error counter and printing for awareness */
- obj->err_data.ue_count += err_data.ue_count;
- obj->err_data.ce_count += err_data.ce_count;
-
- if (err_data.ce_count)
- dev_info(adev->dev, "%ld correctable hardware "
- "errors detected in %s block, "
- "no user action is needed.\n",
- obj->err_data.ce_count,
- adev->nbio.ras_if->name);
-
- if (err_data.ue_count)
- dev_info(adev->dev, "%ld uncorrectable hardware "
- "errors detected in %s block\n",
- obj->err_data.ue_count,
- adev->nbio.ras_if->name);
+ if (!ras->disable_ras_err_cnt_harvest) {
+ /*
+ * clear error status after ras_controller_intr
+ * according to hw team and count ue number
+ * for query
+ */
+ nbio_v7_4_query_ras_error_count(adev, &err_data);
+
+ /* logging on error cnt and printing for awareness */
+ obj->err_data.ue_count += err_data.ue_count;
+ obj->err_data.ce_count += err_data.ce_count;
+
+ if (err_data.ce_count)
+ dev_info(adev->dev, "%ld correctable hardware "
+ "errors detected in %s block, "
+ "no user action is needed.\n",
+ obj->err_data.ce_count,
+ adev->nbio.ras_if->name);
+
+ if (err_data.ue_count)
+ dev_info(adev->dev, "%ld uncorrectable hardware "
+ "errors detected in %s block\n",
+ obj->err_data.ue_count,
+ adev->nbio.ras_if->name);
+ }
dev_info(adev->dev, "RAS controller interrupt triggered "
"by NBIF error\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index da8024c2826e..8eeba8096493 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -69,75 +69,40 @@ static const struct amd_ip_funcs nv_common_ip_funcs;
*/
static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg)
{
- unsigned long flags, address, data;
- u32 r;
+ unsigned long address, data;
address = adev->nbio.funcs->get_pcie_index_offset(adev);
data = adev->nbio.funcs->get_pcie_data_offset(adev);
- spin_lock_irqsave(&adev->pcie_idx_lock, flags);
- WREG32(address, reg);
- (void)RREG32(address);
- r = RREG32(data);
- spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
- return r;
+ return amdgpu_device_indirect_rreg(adev, address, data, reg);
}
static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{
- unsigned long flags, address, data;
+ unsigned long address, data;
address = adev->nbio.funcs->get_pcie_index_offset(adev);
data = adev->nbio.funcs->get_pcie_data_offset(adev);
- spin_lock_irqsave(&adev->pcie_idx_lock, flags);
- WREG32(address, reg);
- (void)RREG32(address);
- WREG32(data, v);
- (void)RREG32(data);
- spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+ amdgpu_device_indirect_wreg(adev, address, data, reg, v);
}
static u64 nv_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
{
- unsigned long flags, address, data;
- u64 r;
+ unsigned long address, data;
address = adev->nbio.funcs->get_pcie_index_offset(adev);
data = adev->nbio.funcs->get_pcie_data_offset(adev);
- spin_lock_irqsave(&adev->pcie_idx_lock, flags);
- /* read low 32 bit */
- WREG32(address, reg);
- (void)RREG32(address);
- r = RREG32(data);
-
- /* read high 32 bit*/
- WREG32(address, reg + 4);
- (void)RREG32(address);
- r |= ((u64)RREG32(data) << 32);
- spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
- return r;
+ return amdgpu_device_indirect_rreg64(adev, address, data, reg);
}
static void nv_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
{
- unsigned long flags, address, data;
+ unsigned long address, data;
address = adev->nbio.funcs->get_pcie_index_offset(adev);
data = adev->nbio.funcs->get_pcie_data_offset(adev);
- spin_lock_irqsave(&adev->pcie_idx_lock, flags);
- /* write low 32 bit */
- WREG32(address, reg);
- (void)RREG32(address);
- WREG32(data, (u32)(v & 0xffffffffULL));
- (void)RREG32(data);
-
- /* write high 32 bit */
- WREG32(address, reg + 4);
- (void)RREG32(address);
- WREG32(data, (u32)(v >> 32));
- (void)RREG32(data);
- spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+ amdgpu_device_indirect_wreg64(adev, address, data, reg, v);
}
static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg)
@@ -311,7 +276,7 @@ static int nv_asic_mode1_reset(struct amdgpu_device *adev)
/* disable BM */
pci_clear_master(adev->pdev);
- pci_save_state(adev->pdev);
+ amdgpu_device_cache_pci_state(adev->pdev);
if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
dev_info(adev->dev, "GPU smu mode1 reset\n");
@@ -323,7 +288,7 @@ static int nv_asic_mode1_reset(struct amdgpu_device *adev)
if (ret)
dev_err(adev->dev, "GPU mode1 reset failed\n");
- pci_restore_state(adev->pdev);
+ amdgpu_device_load_pci_state(adev->pdev);
/* wait for asic to come out of reset */
for (i = 0; i < adev->usec_timeout; i++) {
@@ -364,6 +329,7 @@ nv_asic_reset_method(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_SIENNA_CICHLID:
+ case CHIP_NAVY_FLOUNDER:
return AMD_RESET_METHOD_MODE1;
default:
if (smu_baco_is_support(smu))
@@ -379,7 +345,7 @@ static int nv_asic_reset(struct amdgpu_device *adev)
struct smu_context *smu = &adev->smu;
if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
- dev_info(adev->dev, "GPU BACO reset\n");
+ dev_info(adev->dev, "BACO reset\n");
ret = smu_baco_enter(smu);
if (ret)
@@ -387,8 +353,10 @@ static int nv_asic_reset(struct amdgpu_device *adev)
ret = smu_baco_exit(smu);
if (ret)
return ret;
- } else
+ } else {
+ dev_info(adev->dev, "MODE1 reset\n");
ret = nv_asic_mode1_reset(adev);
+ }
return ret;
}
@@ -487,6 +455,15 @@ void nv_set_virt_ops(struct amdgpu_device *adev)
adev->virt.ops = &xgpu_nv_virt_ops;
}
+static bool nv_is_headless_sku(struct pci_dev *pdev)
+{
+ if ((pdev->device == 0x731E &&
+ (pdev->revision == 0xC6 || pdev->revision == 0xC7)) ||
+ (pdev->device == 0x7340 && pdev->revision == 0xC9))
+ return true;
+ return false;
+}
+
int nv_set_ip_blocks(struct amdgpu_device *adev)
{
int r;
@@ -523,7 +500,8 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
!amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
- amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
+ if (!nv_is_headless_sku(adev->pdev))
+ amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
if (adev->enable_mes)
amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
@@ -618,7 +596,7 @@ static void nv_invalidate_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg) {
- WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
+ WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
} else {
amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
@@ -689,6 +667,10 @@ static void nv_init_doorbell_index(struct amdgpu_device *adev)
adev->doorbell_index.sdma_doorbell_range = 20;
}
+static void nv_pre_asic_init(struct amdgpu_device *adev)
+{
+}
+
static const struct amdgpu_asic_funcs nv_asic_funcs =
{
.read_disabled_bios = &nv_read_disabled_bios,
@@ -708,6 +690,7 @@ static const struct amdgpu_asic_funcs nv_asic_funcs =
.need_reset_on_init = &nv_need_reset_on_init,
.get_pcie_replay_count = &nv_get_pcie_replay_count,
.supports_baco = &nv_asic_supports_baco,
+ .pre_asic_init = &nv_pre_asic_init,
};
static int nv_common_early_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/ppsmc.h b/drivers/gpu/drm/amd/amdgpu/ppsmc.h
deleted file mode 100644
index 8463245f424f..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/ppsmc.h
+++ /dev/null
@@ -1,200 +0,0 @@
-/*
- * Copyright 2011 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef PP_SMC_H
-#define PP_SMC_H
-
-#pragma pack(push, 1)
-
-#define PPSMC_SWSTATE_FLAG_DC 0x01
-#define PPSMC_SWSTATE_FLAG_UVD 0x02
-#define PPSMC_SWSTATE_FLAG_VCE 0x04
-#define PPSMC_SWSTATE_FLAG_PCIE_X1 0x08
-
-#define PPSMC_THERMAL_PROTECT_TYPE_INTERNAL 0x00
-#define PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL 0x01
-#define PPSMC_THERMAL_PROTECT_TYPE_NONE 0xff
-
-#define PPSMC_SYSTEMFLAG_GPIO_DC 0x01
-#define PPSMC_SYSTEMFLAG_STEPVDDC 0x02
-#define PPSMC_SYSTEMFLAG_GDDR5 0x04
-#define PPSMC_SYSTEMFLAG_DISABLE_BABYSTEP 0x08
-#define PPSMC_SYSTEMFLAG_REGULATOR_HOT 0x10
-#define PPSMC_SYSTEMFLAG_REGULATOR_HOT_ANALOG 0x20
-#define PPSMC_SYSTEMFLAG_REGULATOR_HOT_PROG_GPIO 0x40
-
-#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_MASK 0x07
-#define PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK 0x08
-#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTODPMLOWSTATE 0x00
-#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE 0x01
-#define PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH 0x02
-
-#define PPSMC_DISPLAY_WATERMARK_LOW 0
-#define PPSMC_DISPLAY_WATERMARK_HIGH 1
-
-#define PPSMC_STATEFLAG_AUTO_PULSE_SKIP 0x01
-#define PPSMC_STATEFLAG_POWERBOOST 0x02
-#define PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE 0x20
-#define PPSMC_STATEFLAG_DEEPSLEEP_BYPASS 0x40
-
-#define FDO_MODE_HARDWARE 0
-#define FDO_MODE_PIECE_WISE_LINEAR 1
-
-enum FAN_CONTROL {
- FAN_CONTROL_FUZZY,
- FAN_CONTROL_TABLE
-};
-
-#define PPSMC_Result_OK ((uint8_t)0x01)
-#define PPSMC_Result_Failed ((uint8_t)0xFF)
-
-typedef uint8_t PPSMC_Result;
-
-#define PPSMC_MSG_Halt ((uint8_t)0x10)
-#define PPSMC_MSG_Resume ((uint8_t)0x11)
-#define PPSMC_MSG_ZeroLevelsDisabled ((uint8_t)0x13)
-#define PPSMC_MSG_OneLevelsDisabled ((uint8_t)0x14)
-#define PPSMC_MSG_TwoLevelsDisabled ((uint8_t)0x15)
-#define PPSMC_MSG_EnableThermalInterrupt ((uint8_t)0x16)
-#define PPSMC_MSG_RunningOnAC ((uint8_t)0x17)
-#define PPSMC_MSG_SwitchToSwState ((uint8_t)0x20)
-#define PPSMC_MSG_SwitchToInitialState ((uint8_t)0x40)
-#define PPSMC_MSG_NoForcedLevel ((uint8_t)0x41)
-#define PPSMC_MSG_ForceHigh ((uint8_t)0x42)
-#define PPSMC_MSG_ForceMediumOrHigh ((uint8_t)0x43)
-#define PPSMC_MSG_SwitchToMinimumPower ((uint8_t)0x51)
-#define PPSMC_MSG_ResumeFromMinimumPower ((uint8_t)0x52)
-#define PPSMC_MSG_EnableCac ((uint8_t)0x53)
-#define PPSMC_MSG_DisableCac ((uint8_t)0x54)
-#define PPSMC_TDPClampingActive ((uint8_t)0x59)
-#define PPSMC_TDPClampingInactive ((uint8_t)0x5A)
-#define PPSMC_StartFanControl ((uint8_t)0x5B)
-#define PPSMC_StopFanControl ((uint8_t)0x5C)
-#define PPSMC_MSG_NoDisplay ((uint8_t)0x5D)
-#define PPSMC_NoDisplay ((uint8_t)0x5D)
-#define PPSMC_MSG_HasDisplay ((uint8_t)0x5E)
-#define PPSMC_HasDisplay ((uint8_t)0x5E)
-#define PPSMC_MSG_UVDPowerOFF ((uint8_t)0x60)
-#define PPSMC_MSG_UVDPowerON ((uint8_t)0x61)
-#define PPSMC_MSG_EnableULV ((uint8_t)0x62)
-#define PPSMC_MSG_DisableULV ((uint8_t)0x63)
-#define PPSMC_MSG_EnterULV ((uint8_t)0x64)
-#define PPSMC_MSG_ExitULV ((uint8_t)0x65)
-#define PPSMC_CACLongTermAvgEnable ((uint8_t)0x6E)
-#define PPSMC_CACLongTermAvgDisable ((uint8_t)0x6F)
-#define PPSMC_MSG_CollectCAC_PowerCorreln ((uint8_t)0x7A)
-#define PPSMC_FlushDataCache ((uint8_t)0x80)
-#define PPSMC_MSG_SetEnabledLevels ((uint8_t)0x82)
-#define PPSMC_MSG_SetForcedLevels ((uint8_t)0x83)
-#define PPSMC_MSG_ResetToDefaults ((uint8_t)0x84)
-#define PPSMC_MSG_EnableDTE ((uint8_t)0x87)
-#define PPSMC_MSG_DisableDTE ((uint8_t)0x88)
-#define PPSMC_MSG_ThrottleOVRDSCLKDS ((uint8_t)0x96)
-#define PPSMC_MSG_CancelThrottleOVRDSCLKDS ((uint8_t)0x97)
-#define PPSMC_MSG_EnableACDCGPIOInterrupt ((uint16_t) 0x149)
-
-/* CI/KV/KB */
-#define PPSMC_MSG_UVDDPM_SetEnabledMask ((uint16_t) 0x12D)
-#define PPSMC_MSG_VCEDPM_SetEnabledMask ((uint16_t) 0x12E)
-#define PPSMC_MSG_ACPDPM_SetEnabledMask ((uint16_t) 0x12F)
-#define PPSMC_MSG_SAMUDPM_SetEnabledMask ((uint16_t) 0x130)
-#define PPSMC_MSG_MCLKDPM_ForceState ((uint16_t) 0x131)
-#define PPSMC_MSG_MCLKDPM_NoForcedLevel ((uint16_t) 0x132)
-#define PPSMC_MSG_Thermal_Cntl_Disable ((uint16_t) 0x133)
-#define PPSMC_MSG_Voltage_Cntl_Disable ((uint16_t) 0x135)
-#define PPSMC_MSG_PCIeDPM_Enable ((uint16_t) 0x136)
-#define PPSMC_MSG_PCIeDPM_Disable ((uint16_t) 0x13d)
-#define PPSMC_MSG_ACPPowerOFF ((uint16_t) 0x137)
-#define PPSMC_MSG_ACPPowerON ((uint16_t) 0x138)
-#define PPSMC_MSG_SAMPowerOFF ((uint16_t) 0x139)
-#define PPSMC_MSG_SAMPowerON ((uint16_t) 0x13a)
-#define PPSMC_MSG_PCIeDPM_Disable ((uint16_t) 0x13d)
-#define PPSMC_MSG_NBDPM_Enable ((uint16_t) 0x140)
-#define PPSMC_MSG_NBDPM_Disable ((uint16_t) 0x141)
-#define PPSMC_MSG_SCLKDPM_SetEnabledMask ((uint16_t) 0x145)
-#define PPSMC_MSG_MCLKDPM_SetEnabledMask ((uint16_t) 0x146)
-#define PPSMC_MSG_PCIeDPM_ForceLevel ((uint16_t) 0x147)
-#define PPSMC_MSG_PCIeDPM_UnForceLevel ((uint16_t) 0x148)
-#define PPSMC_MSG_EnableVRHotGPIOInterrupt ((uint16_t) 0x14a)
-#define PPSMC_MSG_DPM_Enable ((uint16_t) 0x14e)
-#define PPSMC_MSG_DPM_Disable ((uint16_t) 0x14f)
-#define PPSMC_MSG_MCLKDPM_Enable ((uint16_t) 0x150)
-#define PPSMC_MSG_MCLKDPM_Disable ((uint16_t) 0x151)
-#define PPSMC_MSG_UVDDPM_Enable ((uint16_t) 0x154)
-#define PPSMC_MSG_UVDDPM_Disable ((uint16_t) 0x155)
-#define PPSMC_MSG_SAMUDPM_Enable ((uint16_t) 0x156)
-#define PPSMC_MSG_SAMUDPM_Disable ((uint16_t) 0x157)
-#define PPSMC_MSG_ACPDPM_Enable ((uint16_t) 0x158)
-#define PPSMC_MSG_ACPDPM_Disable ((uint16_t) 0x159)
-#define PPSMC_MSG_VCEDPM_Enable ((uint16_t) 0x15a)
-#define PPSMC_MSG_VCEDPM_Disable ((uint16_t) 0x15b)
-#define PPSMC_MSG_VddC_Request ((uint16_t) 0x15f)
-#define PPSMC_MSG_SCLKDPM_GetEnabledMask ((uint16_t) 0x162)
-#define PPSMC_MSG_PCIeDPM_SetEnabledMask ((uint16_t) 0x167)
-#define PPSMC_MSG_TDCLimitEnable ((uint16_t) 0x169)
-#define PPSMC_MSG_TDCLimitDisable ((uint16_t) 0x16a)
-#define PPSMC_MSG_PkgPwrLimitEnable ((uint16_t) 0x185)
-#define PPSMC_MSG_PkgPwrLimitDisable ((uint16_t) 0x186)
-#define PPSMC_MSG_PkgPwrSetLimit ((uint16_t) 0x187)
-#define PPSMC_MSG_OverDriveSetTargetTdp ((uint16_t) 0x188)
-#define PPSMC_MSG_SCLKDPM_FreezeLevel ((uint16_t) 0x189)
-#define PPSMC_MSG_SCLKDPM_UnfreezeLevel ((uint16_t) 0x18A)
-#define PPSMC_MSG_MCLKDPM_FreezeLevel ((uint16_t) 0x18B)
-#define PPSMC_MSG_MCLKDPM_UnfreezeLevel ((uint16_t) 0x18C)
-#define PPSMC_MSG_MASTER_DeepSleep_ON ((uint16_t) 0x18F)
-#define PPSMC_MSG_MASTER_DeepSleep_OFF ((uint16_t) 0x190)
-#define PPSMC_MSG_Remove_DC_Clamp ((uint16_t) 0x191)
-#define PPSMC_MSG_SetFanPwmMax ((uint16_t) 0x19A)
-#define PPSMC_MSG_SetFanRpmMax ((uint16_t) 0x205)
-
-#define PPSMC_MSG_ENABLE_THERMAL_DPM ((uint16_t) 0x19C)
-#define PPSMC_MSG_DISABLE_THERMAL_DPM ((uint16_t) 0x19D)
-
-#define PPSMC_MSG_API_GetSclkFrequency ((uint16_t) 0x200)
-#define PPSMC_MSG_API_GetMclkFrequency ((uint16_t) 0x201)
-
-/* TN */
-#define PPSMC_MSG_DPM_Config ((uint32_t) 0x102)
-#define PPSMC_MSG_DPM_ForceState ((uint32_t) 0x104)
-#define PPSMC_MSG_PG_SIMD_Config ((uint32_t) 0x108)
-#define PPSMC_MSG_Voltage_Cntl_Enable ((uint32_t) 0x109)
-#define PPSMC_MSG_Thermal_Cntl_Enable ((uint32_t) 0x10a)
-#define PPSMC_MSG_VCEPowerOFF ((uint32_t) 0x10e)
-#define PPSMC_MSG_VCEPowerON ((uint32_t) 0x10f)
-#define PPSMC_MSG_DPM_N_LevelsDisabled ((uint32_t) 0x112)
-#define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint32_t) 0x11d)
-#define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint32_t) 0x11e)
-#define PPSMC_MSG_EnableBAPM ((uint32_t) 0x120)
-#define PPSMC_MSG_DisableBAPM ((uint32_t) 0x121)
-#define PPSMC_MSG_UVD_DPM_Config ((uint32_t) 0x124)
-
-#define PPSMC_MSG_DRV_DRAM_ADDR_HI ((uint16_t) 0x250)
-#define PPSMC_MSG_DRV_DRAM_ADDR_LO ((uint16_t) 0x251)
-#define PPSMC_MSG_SMU_DRAM_ADDR_HI ((uint16_t) 0x252)
-#define PPSMC_MSG_SMU_DRAM_ADDR_LO ((uint16_t) 0x253)
-#define PPSMC_MSG_LoadUcodes ((uint16_t) 0x254)
-
-typedef uint16_t PPSMC_Msg;
-
-#pragma pack(pop)
-
-#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
index cbc04a5c0fe1..4137dc710aaf 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
@@ -83,19 +83,6 @@ struct psp_gfx_ctrl
*/
#define GFX_FLAG_RESPONSE 0x80000000
-/* Gbr IH registers ID */
-enum ih_reg_id {
- IH_RB = 0, // IH_RB_CNTL
- IH_RB_RNG1 = 1, // IH_RB_CNTL_RING1
- IH_RB_RNG2 = 2, // IH_RB_CNTL_RING2
-};
-
-/* Command to setup Gibraltar IH register */
-struct psp_gfx_cmd_gbr_ih_reg {
- uint32_t reg_value; /* Value to be set to the IH_RB_CNTL... register*/
- enum ih_reg_id reg_id; /* ID of the register */
-};
-
/* TEE Gfx Command IDs for the ring buffer interface. */
enum psp_gfx_cmd_id
{
@@ -214,7 +201,7 @@ enum psp_gfx_fw_type {
GFX_FW_TYPE_UVD1 = 23, /* UVD1 VG-20 */
GFX_FW_TYPE_TOC = 24, /* TOC NV-10 */
GFX_FW_TYPE_RLC_P = 25, /* RLC P NV */
- GFX_FW_TYPE_RLX6 = 26, /* RLX6 NV */
+ GFX_FW_TYPE_RLC_IRAM = 26, /* RLC_IRAM NV */
GFX_FW_TYPE_GLOBAL_TAP_DELAYS = 27, /* GLOBAL TAP DELAYS NV */
GFX_FW_TYPE_SE0_TAP_DELAYS = 28, /* SE0 TAP DELAYS NV */
GFX_FW_TYPE_SE1_TAP_DELAYS = 29, /* SE1 TAP DELAYS NV */
@@ -236,7 +223,7 @@ enum psp_gfx_fw_type {
GFX_FW_TYPE_ACCUM_CTRL_RAM = 45, /* ACCUM CTRL RAM NV */
GFX_FW_TYPE_RLCP_CAM = 46, /* RLCP CAM NV */
GFX_FW_TYPE_RLC_SPP_CAM_EXT = 47, /* RLC SPP CAM EXT NV */
- GFX_FW_TYPE_RLX6_DRAM_BOOT = 48, /* RLX6 DRAM BOOT NV */
+ GFX_FW_TYPE_RLC_DRAM_BOOT = 48, /* RLC DRAM BOOT NV */
GFX_FW_TYPE_VCN0_RAM = 49, /* VCN_RAM NV + RN */
GFX_FW_TYPE_VCN1_RAM = 50, /* VCN_RAM NV + RN */
GFX_FW_TYPE_DMUB = 51, /* DMUB RN */
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index d488d250805d..6c5d9612abcb 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -58,7 +58,7 @@ MODULE_FIRMWARE("amdgpu/arcturus_ta.bin");
MODULE_FIRMWARE("amdgpu/sienna_cichlid_sos.bin");
MODULE_FIRMWARE("amdgpu/sienna_cichlid_ta.bin");
MODULE_FIRMWARE("amdgpu/navy_flounder_sos.bin");
-MODULE_FIRMWARE("amdgpu/navy_flounder_asd.bin");
+MODULE_FIRMWARE("amdgpu/navy_flounder_ta.bin");
/* address block */
#define smnMP1_FIRMWARE_FLAGS 0x3010024
@@ -179,12 +179,11 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
}
break;
case CHIP_SIENNA_CICHLID:
+ case CHIP_NAVY_FLOUNDER:
err = psp_init_ta_microcode(&adev->psp, chip_name);
if (err)
return err;
break;
- case CHIP_NAVY_FLOUNDER:
- break;
default:
BUG();
}
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
index 6c9614f77d33..c4828bd3264b 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
@@ -38,6 +38,10 @@
#include "oss/osssys_4_0_sh_mask.h"
MODULE_FIRMWARE("amdgpu/renoir_asd.bin");
+MODULE_FIRMWARE("amdgpu/renoir_ta.bin");
+MODULE_FIRMWARE("amdgpu/green_sardine_asd.bin");
+MODULE_FIRMWARE("amdgpu/green_sardine_ta.bin");
+
/* address block */
#define smnMP1_FIRMWARE_FLAGS 0x3010024
@@ -45,17 +49,72 @@ static int psp_v12_0_init_microcode(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
const char *chip_name;
+ char fw_name[30];
int err = 0;
+ const struct ta_firmware_header_v1_0 *ta_hdr;
+ DRM_DEBUG("\n");
switch (adev->asic_type) {
case CHIP_RENOIR:
- chip_name = "renoir";
+ if (adev->apu_flags & AMD_APU_IS_RENOIR)
+ chip_name = "renoir";
+ else
+ chip_name = "green_sardine";
break;
default:
BUG();
}
err = psp_init_asd_microcode(psp, chip_name);
+ if (err)
+ goto out;
+
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
+ err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
+ if (err) {
+ release_firmware(adev->psp.ta_fw);
+ adev->psp.ta_fw = NULL;
+ dev_info(adev->dev,
+ "psp v12.0: Failed to load firmware \"%s\"\n",
+ fw_name);
+ } else {
+ err = amdgpu_ucode_validate(adev->psp.ta_fw);
+ if (err)
+ goto out2;
+
+ ta_hdr = (const struct ta_firmware_header_v1_0 *)
+ adev->psp.ta_fw->data;
+ adev->psp.ta_hdcp_ucode_version =
+ le32_to_cpu(ta_hdr->ta_hdcp_ucode_version);
+ adev->psp.ta_hdcp_ucode_size =
+ le32_to_cpu(ta_hdr->ta_hdcp_size_bytes);
+ adev->psp.ta_hdcp_start_addr =
+ (uint8_t *)ta_hdr +
+ le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
+
+ adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
+
+ adev->psp.ta_dtm_ucode_version =
+ le32_to_cpu(ta_hdr->ta_dtm_ucode_version);
+ adev->psp.ta_dtm_ucode_size =
+ le32_to_cpu(ta_hdr->ta_dtm_size_bytes);
+ adev->psp.ta_dtm_start_addr =
+ (uint8_t *)adev->psp.ta_hdcp_start_addr +
+ le32_to_cpu(ta_hdr->ta_dtm_offset_bytes);
+ }
+
+ return 0;
+
+out2:
+ release_firmware(adev->psp.ta_fw);
+ adev->psp.ta_fw = NULL;
+out:
+ if (err) {
+ dev_err(adev->dev,
+ "psp v12.0: Failed to load firmware \"%s\"\n",
+ fw_name);
+ }
+
return err;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/r600_dpm.h b/drivers/gpu/drm/amd/amdgpu/r600_dpm.h
deleted file mode 100644
index 055321f61ca7..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/r600_dpm.h
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Copyright 2011 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef __R600_DPM_H__
-#define __R600_DPM_H__
-
-#define R600_ASI_DFLT 10000
-#define R600_BSP_DFLT 0x41EB
-#define R600_BSU_DFLT 0x2
-#define R600_AH_DFLT 5
-#define R600_RLP_DFLT 25
-#define R600_RMP_DFLT 65
-#define R600_LHP_DFLT 40
-#define R600_LMP_DFLT 15
-#define R600_TD_DFLT 0
-#define R600_UTC_DFLT_00 0x24
-#define R600_UTC_DFLT_01 0x22
-#define R600_UTC_DFLT_02 0x22
-#define R600_UTC_DFLT_03 0x22
-#define R600_UTC_DFLT_04 0x22
-#define R600_UTC_DFLT_05 0x22
-#define R600_UTC_DFLT_06 0x22
-#define R600_UTC_DFLT_07 0x22
-#define R600_UTC_DFLT_08 0x22
-#define R600_UTC_DFLT_09 0x22
-#define R600_UTC_DFLT_10 0x22
-#define R600_UTC_DFLT_11 0x22
-#define R600_UTC_DFLT_12 0x22
-#define R600_UTC_DFLT_13 0x22
-#define R600_UTC_DFLT_14 0x22
-#define R600_DTC_DFLT_00 0x24
-#define R600_DTC_DFLT_01 0x22
-#define R600_DTC_DFLT_02 0x22
-#define R600_DTC_DFLT_03 0x22
-#define R600_DTC_DFLT_04 0x22
-#define R600_DTC_DFLT_05 0x22
-#define R600_DTC_DFLT_06 0x22
-#define R600_DTC_DFLT_07 0x22
-#define R600_DTC_DFLT_08 0x22
-#define R600_DTC_DFLT_09 0x22
-#define R600_DTC_DFLT_10 0x22
-#define R600_DTC_DFLT_11 0x22
-#define R600_DTC_DFLT_12 0x22
-#define R600_DTC_DFLT_13 0x22
-#define R600_DTC_DFLT_14 0x22
-#define R600_VRC_DFLT 0x0000C003
-#define R600_VOLTAGERESPONSETIME_DFLT 1000
-#define R600_BACKBIASRESPONSETIME_DFLT 1000
-#define R600_VRU_DFLT 0x3
-#define R600_SPLLSTEPTIME_DFLT 0x1000
-#define R600_SPLLSTEPUNIT_DFLT 0x3
-#define R600_TPU_DFLT 0
-#define R600_TPC_DFLT 0x200
-#define R600_SSTU_DFLT 0
-#define R600_SST_DFLT 0x00C8
-#define R600_GICST_DFLT 0x200
-#define R600_FCT_DFLT 0x0400
-#define R600_FCTU_DFLT 0
-#define R600_CTXCGTT3DRPHC_DFLT 0x20
-#define R600_CTXCGTT3DRSDC_DFLT 0x40
-#define R600_VDDC3DOORPHC_DFLT 0x100
-#define R600_VDDC3DOORSDC_DFLT 0x7
-#define R600_VDDC3DOORSU_DFLT 0
-#define R600_MPLLLOCKTIME_DFLT 100
-#define R600_MPLLRESETTIME_DFLT 150
-#define R600_VCOSTEPPCT_DFLT 20
-#define R600_ENDINGVCOSTEPPCT_DFLT 5
-#define R600_REFERENCEDIVIDER_DFLT 4
-
-#define R600_PM_NUMBER_OF_TC 15
-#define R600_PM_NUMBER_OF_SCLKS 20
-#define R600_PM_NUMBER_OF_MCLKS 4
-#define R600_PM_NUMBER_OF_VOLTAGE_LEVELS 4
-#define R600_PM_NUMBER_OF_ACTIVITY_LEVELS 3
-
-/* XXX are these ok? */
-#define R600_TEMP_RANGE_MIN (90 * 1000)
-#define R600_TEMP_RANGE_MAX (120 * 1000)
-
-#define FDO_PWM_MODE_STATIC 1
-#define FDO_PWM_MODE_STATIC_RPM 5
-
-enum r600_power_level {
- R600_POWER_LEVEL_LOW = 0,
- R600_POWER_LEVEL_MEDIUM = 1,
- R600_POWER_LEVEL_HIGH = 2,
- R600_POWER_LEVEL_CTXSW = 3,
-};
-
-enum r600_td {
- R600_TD_AUTO,
- R600_TD_UP,
- R600_TD_DOWN,
-};
-
-enum r600_display_watermark {
- R600_DISPLAY_WATERMARK_LOW = 0,
- R600_DISPLAY_WATERMARK_HIGH = 1,
-};
-
-enum r600_display_gap
-{
- R600_PM_DISPLAY_GAP_VBLANK_OR_WM = 0,
- R600_PM_DISPLAY_GAP_VBLANK = 1,
- R600_PM_DISPLAY_GAP_WATERMARK = 2,
- R600_PM_DISPLAY_GAP_IGNORE = 3,
-};
-#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 856c50386c86..e82f49f62f6e 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -69,6 +69,7 @@ MODULE_FIRMWARE("amdgpu/picasso_sdma.bin");
MODULE_FIRMWARE("amdgpu/raven2_sdma.bin");
MODULE_FIRMWARE("amdgpu/arcturus_sdma.bin");
MODULE_FIRMWARE("amdgpu/renoir_sdma.bin");
+MODULE_FIRMWARE("amdgpu/green_sardine_sdma.bin");
#define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK 0x000000F8L
#define SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK 0xFC000000L
@@ -592,6 +593,9 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
struct amdgpu_firmware_info *info = NULL;
const struct common_firmware_header *header = NULL;
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
DRM_DEBUG("\n");
switch (adev->asic_type) {
@@ -616,7 +620,10 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
chip_name = "arcturus";
break;
case CHIP_RENOIR:
- chip_name = "renoir";
+ if (adev->apu_flags & AMD_APU_IS_RENOIR)
+ chip_name = "renoir";
+ else
+ chip_name = "green_sardine";
break;
default:
BUG();
@@ -1000,7 +1007,7 @@ static void sdma_v4_0_page_stop(struct amdgpu_device *adev)
sdma[i] = &adev->sdma.instance[i].page;
if ((adev->mman.buffer_funcs_ring == sdma[i]) &&
- (unset == false)) {
+ (!unset)) {
amdgpu_ttm_set_buffer_funcs_status(adev, false);
unset = true;
}
@@ -1063,6 +1070,15 @@ static void sdma_v4_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
WREG32_SDMA(i, mmSDMA0_PHASE2_QUANTUM, phase_quantum);
}
WREG32_SDMA(i, mmSDMA0_CNTL, f32_cntl);
+
+ /*
+ * Enable SDMA utilization. Its only supported on
+ * Arcturus for the moment and firmware version 14
+ * and above.
+ */
+ if (adev->asic_type == CHIP_ARCTURUS &&
+ adev->sdma.instance[i].fw_version >= 14)
+ WREG32_SDMA(i, mmSDMA0_PUB_DUMMY_REG2, enable);
}
}
@@ -1080,7 +1096,7 @@ static void sdma_v4_0_enable(struct amdgpu_device *adev, bool enable)
u32 f32_cntl;
int i;
- if (enable == false) {
+ if (!enable) {
sdma_v4_0_gfx_stop(adev);
sdma_v4_0_rlc_stop(adev);
if (adev->sdma.has_page_queue)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index e2232dd12d8e..9c72b95b7463 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -203,6 +203,9 @@ static int sdma_v5_0_init_microcode(struct amdgpu_device *adev)
const struct common_firmware_header *header = NULL;
const struct sdma_firmware_header_v1_0 *hdr;
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
DRM_DEBUG("\n");
switch (adev->asic_type) {
@@ -616,7 +619,7 @@ static void sdma_v5_0_enable(struct amdgpu_device *adev, bool enable)
u32 f32_cntl;
int i;
- if (enable == false) {
+ if (!enable) {
sdma_v5_0_gfx_stop(adev);
sdma_v5_0_rlc_stop(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index 46a9617fee5f..9f3952723c63 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -148,6 +148,9 @@ static int sdma_v5_2_init_microcode(struct amdgpu_device *adev)
struct amdgpu_firmware_info *info = NULL;
const struct common_firmware_header *header = NULL;
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
DRM_DEBUG("\n");
switch (adev->asic_type) {
@@ -559,7 +562,7 @@ static void sdma_v5_2_enable(struct amdgpu_device *adev, bool enable)
u32 f32_cntl;
int i;
- if (enable == false) {
+ if (!enable) {
sdma_v5_2_gfx_stop(adev);
sdma_v5_2_rlc_stop(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index 1b449291f068..e5e336fd9e94 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -52,6 +52,8 @@
#include "bif/bif_3_0_d.h"
#include "bif/bif_3_0_sh_mask.h"
+#include "amdgpu_dm.h"
+
static const u32 tahiti_golden_registers[] =
{
mmAZALIA_SCLK_CONTROL, 0x00000030, 0x00000011,
@@ -1215,10 +1217,100 @@ static bool si_read_bios_from_rom(struct amdgpu_device *adev,
return true;
}
-//xxx: not implemented
+static void si_set_clk_bypass_mode(struct amdgpu_device *adev)
+{
+ u32 tmp, i;
+
+ tmp = RREG32(CG_SPLL_FUNC_CNTL);
+ tmp |= SPLL_BYPASS_EN;
+ WREG32(CG_SPLL_FUNC_CNTL, tmp);
+
+ tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
+ tmp |= SPLL_CTLREQ_CHG;
+ WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (RREG32(SPLL_STATUS) & SPLL_CHG_STATUS)
+ break;
+ udelay(1);
+ }
+
+ tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
+ tmp &= ~(SPLL_CTLREQ_CHG | SCLK_MUX_UPDATE);
+ WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
+
+ tmp = RREG32(MPLL_CNTL_MODE);
+ tmp &= ~MPLL_MCLK_SEL;
+ WREG32(MPLL_CNTL_MODE, tmp);
+}
+
+static void si_spll_powerdown(struct amdgpu_device *adev)
+{
+ u32 tmp;
+
+ tmp = RREG32(SPLL_CNTL_MODE);
+ tmp |= SPLL_SW_DIR_CONTROL;
+ WREG32(SPLL_CNTL_MODE, tmp);
+
+ tmp = RREG32(CG_SPLL_FUNC_CNTL);
+ tmp |= SPLL_RESET;
+ WREG32(CG_SPLL_FUNC_CNTL, tmp);
+
+ tmp = RREG32(CG_SPLL_FUNC_CNTL);
+ tmp |= SPLL_SLEEP;
+ WREG32(CG_SPLL_FUNC_CNTL, tmp);
+
+ tmp = RREG32(SPLL_CNTL_MODE);
+ tmp &= ~SPLL_SW_DIR_CONTROL;
+ WREG32(SPLL_CNTL_MODE, tmp);
+}
+
+static int si_gpu_pci_config_reset(struct amdgpu_device *adev)
+{
+ u32 i;
+ int r = -EINVAL;
+
+ dev_info(adev->dev, "GPU pci config reset\n");
+
+ /* set mclk/sclk to bypass */
+ si_set_clk_bypass_mode(adev);
+ /* powerdown spll */
+ si_spll_powerdown(adev);
+ /* disable BM */
+ pci_clear_master(adev->pdev);
+ /* reset */
+ amdgpu_device_pci_config_reset(adev);
+
+ udelay(100);
+
+ /* wait for asic to come out of reset */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) {
+ /* enable BM */
+ pci_set_master(adev->pdev);
+ adev->has_hw_reset = true;
+ r = 0;
+ break;
+ }
+ udelay(1);
+ }
+
+ return r;
+}
+
static int si_asic_reset(struct amdgpu_device *adev)
{
- return 0;
+ int r;
+
+ dev_info(adev->dev, "PCI CONFIG reset\n");
+
+ amdgpu_atombios_scratch_regs_engine_hung(adev, true);
+
+ r = si_gpu_pci_config_reset(adev);
+
+ amdgpu_atombios_scratch_regs_engine_hung(adev, false);
+
+ return r;
}
static bool si_asic_supports_baco(struct amdgpu_device *adev)
@@ -1247,7 +1339,7 @@ static void si_vga_set_state(struct amdgpu_device *adev, bool state)
uint32_t temp;
temp = RREG32(CONFIG_CNTL);
- if (state == false) {
+ if (!state) {
temp &= ~(1<<0);
temp |= (1<<1);
} else {
@@ -1779,6 +1871,10 @@ static int si_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
return 0;
}
+static void si_pre_asic_init(struct amdgpu_device *adev)
+{
+}
+
static const struct amdgpu_asic_funcs si_asic_funcs =
{
.read_disabled_bios = &si_read_disabled_bios,
@@ -1800,6 +1896,7 @@ static const struct amdgpu_asic_funcs si_asic_funcs =
.need_reset_on_init = &si_need_reset_on_init,
.get_pcie_replay_count = &si_get_pcie_replay_count,
.supports_baco = &si_asic_supports_baco,
+ .pre_asic_init = &si_pre_asic_init,
};
static uint32_t si_get_rev_id(struct amdgpu_device *adev)
@@ -2546,6 +2643,10 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &si_smu_ip_block);
if (adev->enable_virtual_display)
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC) && defined(CONFIG_DRM_AMD_DC_SI)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
+#endif
else
amdgpu_device_ip_block_add(adev, &dce_v6_0_ip_block);
amdgpu_device_ip_block_add(adev, &uvd_v3_1_ip_block);
@@ -2560,6 +2661,10 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &si_smu_ip_block);
if (adev->enable_virtual_display)
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC) && defined(CONFIG_DRM_AMD_DC_SI)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
+#endif
else
amdgpu_device_ip_block_add(adev, &dce_v6_4_ip_block);
amdgpu_device_ip_block_add(adev, &uvd_v3_1_ip_block);
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
deleted file mode 100644
index ea914b256ebd..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ /dev/null
@@ -1,8079 +0,0 @@
-/*
- * Copyright 2013 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/module.h>
-#include <linux/pci.h>
-
-#include "amdgpu.h"
-#include "amdgpu_pm.h"
-#include "amdgpu_dpm.h"
-#include "amdgpu_atombios.h"
-#include "amd_pcie.h"
-#include "sid.h"
-#include "r600_dpm.h"
-#include "si_dpm.h"
-#include "atom.h"
-#include "../include/pptable.h"
-#include <linux/math64.h>
-#include <linux/seq_file.h>
-#include <linux/firmware.h>
-
-#define MC_CG_ARB_FREQ_F0 0x0a
-#define MC_CG_ARB_FREQ_F1 0x0b
-#define MC_CG_ARB_FREQ_F2 0x0c
-#define MC_CG_ARB_FREQ_F3 0x0d
-
-#define SMC_RAM_END 0x20000
-
-#define SCLK_MIN_DEEPSLEEP_FREQ 1350
-
-
-/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
-#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
-#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
-#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
-#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
-#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
-#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
-
-#define BIOS_SCRATCH_4 0x5cd
-
-MODULE_FIRMWARE("amdgpu/tahiti_smc.bin");
-MODULE_FIRMWARE("amdgpu/pitcairn_smc.bin");
-MODULE_FIRMWARE("amdgpu/pitcairn_k_smc.bin");
-MODULE_FIRMWARE("amdgpu/verde_smc.bin");
-MODULE_FIRMWARE("amdgpu/verde_k_smc.bin");
-MODULE_FIRMWARE("amdgpu/oland_smc.bin");
-MODULE_FIRMWARE("amdgpu/oland_k_smc.bin");
-MODULE_FIRMWARE("amdgpu/hainan_smc.bin");
-MODULE_FIRMWARE("amdgpu/hainan_k_smc.bin");
-MODULE_FIRMWARE("amdgpu/banks_k_2_smc.bin");
-
-static const struct amd_pm_funcs si_dpm_funcs;
-
-union power_info {
- struct _ATOM_POWERPLAY_INFO info;
- struct _ATOM_POWERPLAY_INFO_V2 info_2;
- struct _ATOM_POWERPLAY_INFO_V3 info_3;
- struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
- struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
- struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
- struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
- struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
-};
-
-union fan_info {
- struct _ATOM_PPLIB_FANTABLE fan;
- struct _ATOM_PPLIB_FANTABLE2 fan2;
- struct _ATOM_PPLIB_FANTABLE3 fan3;
-};
-
-union pplib_clock_info {
- struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
- struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
- struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
- struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
- struct _ATOM_PPLIB_SI_CLOCK_INFO si;
-};
-
-static const u32 r600_utc[R600_PM_NUMBER_OF_TC] =
-{
- R600_UTC_DFLT_00,
- R600_UTC_DFLT_01,
- R600_UTC_DFLT_02,
- R600_UTC_DFLT_03,
- R600_UTC_DFLT_04,
- R600_UTC_DFLT_05,
- R600_UTC_DFLT_06,
- R600_UTC_DFLT_07,
- R600_UTC_DFLT_08,
- R600_UTC_DFLT_09,
- R600_UTC_DFLT_10,
- R600_UTC_DFLT_11,
- R600_UTC_DFLT_12,
- R600_UTC_DFLT_13,
- R600_UTC_DFLT_14,
-};
-
-static const u32 r600_dtc[R600_PM_NUMBER_OF_TC] =
-{
- R600_DTC_DFLT_00,
- R600_DTC_DFLT_01,
- R600_DTC_DFLT_02,
- R600_DTC_DFLT_03,
- R600_DTC_DFLT_04,
- R600_DTC_DFLT_05,
- R600_DTC_DFLT_06,
- R600_DTC_DFLT_07,
- R600_DTC_DFLT_08,
- R600_DTC_DFLT_09,
- R600_DTC_DFLT_10,
- R600_DTC_DFLT_11,
- R600_DTC_DFLT_12,
- R600_DTC_DFLT_13,
- R600_DTC_DFLT_14,
-};
-
-static const struct si_cac_config_reg cac_weights_tahiti[] =
-{
- { 0x0, 0x0000ffff, 0, 0xc, SISLANDS_CACCONFIG_CGIND },
- { 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1, 0x0000ffff, 0, 0x101, SISLANDS_CACCONFIG_CGIND },
- { 0x1, 0xffff0000, 16, 0xc, SISLANDS_CACCONFIG_CGIND },
- { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x3, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x4, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x5, 0x0000ffff, 0, 0x8fc, SISLANDS_CACCONFIG_CGIND },
- { 0x5, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x6, 0x0000ffff, 0, 0x95, SISLANDS_CACCONFIG_CGIND },
- { 0x6, 0xffff0000, 16, 0x34e, SISLANDS_CACCONFIG_CGIND },
- { 0x18f, 0x0000ffff, 0, 0x1a1, SISLANDS_CACCONFIG_CGIND },
- { 0x7, 0x0000ffff, 0, 0xda, SISLANDS_CACCONFIG_CGIND },
- { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x8, 0xffff0000, 16, 0x46, SISLANDS_CACCONFIG_CGIND },
- { 0x9, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0xa, 0x0000ffff, 0, 0x208, SISLANDS_CACCONFIG_CGIND },
- { 0xb, 0x0000ffff, 0, 0xe7, SISLANDS_CACCONFIG_CGIND },
- { 0xb, 0xffff0000, 16, 0x948, SISLANDS_CACCONFIG_CGIND },
- { 0xc, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0xd, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0xe, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0xf, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x11, 0x0000ffff, 0, 0x167, SISLANDS_CACCONFIG_CGIND },
- { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x12, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x13, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
- { 0x14, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x15, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x16, 0x0000ffff, 0, 0x31, SISLANDS_CACCONFIG_CGIND },
- { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1a, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1a, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1c, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1c, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1e, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1f, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x20, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x6d, 0x0000ffff, 0, 0x18e, SISLANDS_CACCONFIG_CGIND },
- { 0xFFFFFFFF }
-};
-
-static const struct si_cac_config_reg lcac_tahiti[] =
-{
- { 0x143, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
- { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x146, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
- { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x149, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
- { 0x149, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x14c, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
- { 0x14c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x9e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x9e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x101, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x101, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x10a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x10a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x10d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x10d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x8c, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
- { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x8f, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
- { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x92, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
- { 0x92, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x95, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
- { 0x95, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x14f, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
- { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x152, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
- { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x155, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
- { 0x155, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x158, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
- { 0x158, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x110, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
- { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x113, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
- { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x116, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
- { 0x116, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x119, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
- { 0x119, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x122, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x122, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x125, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x125, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x128, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x128, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x12b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x12b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x15b, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x15e, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x161, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x164, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x167, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x16a, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x16d, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
- { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x170, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x176, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x179, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x17c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x17f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0xFFFFFFFF }
-
-};
-
-static const struct si_cac_config_reg cac_override_tahiti[] =
-{
- { 0xFFFFFFFF }
-};
-
-static const struct si_powertune_data powertune_data_tahiti =
-{
- ((1 << 16) | 27027),
- 6,
- 0,
- 4,
- 95,
- {
- 0UL,
- 0UL,
- 4521550UL,
- 309631529UL,
- -1270850L,
- 4513710L,
- 40
- },
- 595000000UL,
- 12,
- {
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0
- },
- true
-};
-
-static const struct si_dte_data dte_data_tahiti =
-{
- { 1159409, 0, 0, 0, 0 },
- { 777, 0, 0, 0, 0 },
- 2,
- 54000,
- 127000,
- 25,
- 2,
- 10,
- 13,
- { 27, 31, 35, 39, 43, 47, 54, 61, 67, 74, 81, 88, 95, 0, 0, 0 },
- { 240888759, 221057860, 235370597, 162287531, 158510299, 131423027, 116673180, 103067515, 87941937, 76209048, 68209175, 64090048, 58301890, 0, 0, 0 },
- { 12024, 11189, 11451, 8411, 7939, 6666, 5681, 4905, 4241, 3720, 3354, 3122, 2890, 0, 0, 0 },
- 85,
- false
-};
-
-static const struct si_dte_data dte_data_tahiti_pro =
-{
- { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
- { 0x0, 0x0, 0x0, 0x0, 0x0 },
- 5,
- 45000,
- 100,
- 0xA,
- 1,
- 0,
- 0x10,
- { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
- { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
- { 0x7D0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
- 90,
- true
-};
-
-static const struct si_dte_data dte_data_new_zealand =
-{
- { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0 },
- { 0x29B, 0x3E9, 0x537, 0x7D2, 0 },
- 0x5,
- 0xAFC8,
- 0x69,
- 0x32,
- 1,
- 0,
- 0x10,
- { 0x82, 0xA0, 0xB4, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE },
- { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
- { 0xDAC, 0x1388, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685 },
- 85,
- true
-};
-
-static const struct si_dte_data dte_data_aruba_pro =
-{
- { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
- { 0x0, 0x0, 0x0, 0x0, 0x0 },
- 5,
- 45000,
- 100,
- 0xA,
- 1,
- 0,
- 0x10,
- { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
- { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
- { 0x1000, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
- 90,
- true
-};
-
-static const struct si_dte_data dte_data_malta =
-{
- { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
- { 0x0, 0x0, 0x0, 0x0, 0x0 },
- 5,
- 45000,
- 100,
- 0xA,
- 1,
- 0,
- 0x10,
- { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
- { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
- { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
- 90,
- true
-};
-
-static const struct si_cac_config_reg cac_weights_pitcairn[] =
-{
- { 0x0, 0x0000ffff, 0, 0x8a, SISLANDS_CACCONFIG_CGIND },
- { 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1, 0xffff0000, 16, 0x24d, SISLANDS_CACCONFIG_CGIND },
- { 0x2, 0x0000ffff, 0, 0x19, SISLANDS_CACCONFIG_CGIND },
- { 0x3, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
- { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x4, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
- { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x5, 0x0000ffff, 0, 0xc11, SISLANDS_CACCONFIG_CGIND },
- { 0x5, 0xffff0000, 16, 0x7f3, SISLANDS_CACCONFIG_CGIND },
- { 0x6, 0x0000ffff, 0, 0x403, SISLANDS_CACCONFIG_CGIND },
- { 0x6, 0xffff0000, 16, 0x367, SISLANDS_CACCONFIG_CGIND },
- { 0x18f, 0x0000ffff, 0, 0x4c9, SISLANDS_CACCONFIG_CGIND },
- { 0x7, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x8, 0xffff0000, 16, 0x45d, SISLANDS_CACCONFIG_CGIND },
- { 0x9, 0x0000ffff, 0, 0x36d, SISLANDS_CACCONFIG_CGIND },
- { 0xa, 0x0000ffff, 0, 0x534, SISLANDS_CACCONFIG_CGIND },
- { 0xb, 0x0000ffff, 0, 0x5da, SISLANDS_CACCONFIG_CGIND },
- { 0xb, 0xffff0000, 16, 0x880, SISLANDS_CACCONFIG_CGIND },
- { 0xc, 0x0000ffff, 0, 0x201, SISLANDS_CACCONFIG_CGIND },
- { 0xd, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0xe, 0x0000ffff, 0, 0x9f, SISLANDS_CACCONFIG_CGIND },
- { 0xf, 0x0000ffff, 0, 0x1f, SISLANDS_CACCONFIG_CGIND },
- { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x11, 0x0000ffff, 0, 0x5de, SISLANDS_CACCONFIG_CGIND },
- { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x12, 0x0000ffff, 0, 0x7b, SISLANDS_CACCONFIG_CGIND },
- { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x13, 0xffff0000, 16, 0x13, SISLANDS_CACCONFIG_CGIND },
- { 0x14, 0x0000ffff, 0, 0xf9, SISLANDS_CACCONFIG_CGIND },
- { 0x15, 0x0000ffff, 0, 0x66, SISLANDS_CACCONFIG_CGIND },
- { 0x15, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x4e, 0x0000ffff, 0, 0x13, SISLANDS_CACCONFIG_CGIND },
- { 0x16, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1a, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1a, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1c, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1c, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1e, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1f, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x20, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x6d, 0x0000ffff, 0, 0x186, SISLANDS_CACCONFIG_CGIND },
- { 0xFFFFFFFF }
-};
-
-static const struct si_cac_config_reg lcac_pitcairn[] =
-{
- { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x110, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x14f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x8c, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x143, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x113, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x152, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x8f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x146, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x9e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x9e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x10a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x10a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x116, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0x116, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x155, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0x155, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x92, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0x92, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x149, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x149, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x101, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x101, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x10d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x10d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x119, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0x119, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x158, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0x158, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x95, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0x95, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x14c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x14c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x122, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x122, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x125, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x125, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x128, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x128, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x12b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x12b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x164, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x167, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x16a, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x15e, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x161, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x15b, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x16d, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x170, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x176, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x179, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x17c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x17f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0xFFFFFFFF }
-};
-
-static const struct si_cac_config_reg cac_override_pitcairn[] =
-{
- { 0xFFFFFFFF }
-};
-
-static const struct si_powertune_data powertune_data_pitcairn =
-{
- ((1 << 16) | 27027),
- 5,
- 0,
- 6,
- 100,
- {
- 51600000UL,
- 1800000UL,
- 7194395UL,
- 309631529UL,
- -1270850L,
- 4513710L,
- 100
- },
- 117830498UL,
- 12,
- {
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0
- },
- true
-};
-
-static const struct si_dte_data dte_data_pitcairn =
-{
- { 0, 0, 0, 0, 0 },
- { 0, 0, 0, 0, 0 },
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
- 0,
- false
-};
-
-static const struct si_dte_data dte_data_curacao_xt =
-{
- { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
- { 0x0, 0x0, 0x0, 0x0, 0x0 },
- 5,
- 45000,
- 100,
- 0xA,
- 1,
- 0,
- 0x10,
- { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
- { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
- { 0x1D17, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
- 90,
- true
-};
-
-static const struct si_dte_data dte_data_curacao_pro =
-{
- { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
- { 0x0, 0x0, 0x0, 0x0, 0x0 },
- 5,
- 45000,
- 100,
- 0xA,
- 1,
- 0,
- 0x10,
- { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
- { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
- { 0x1D17, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
- 90,
- true
-};
-
-static const struct si_dte_data dte_data_neptune_xt =
-{
- { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
- { 0x0, 0x0, 0x0, 0x0, 0x0 },
- 5,
- 45000,
- 100,
- 0xA,
- 1,
- 0,
- 0x10,
- { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
- { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
- { 0x3A2F, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
- 90,
- true
-};
-
-static const struct si_cac_config_reg cac_weights_chelsea_pro[] =
-{
- { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
- { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
- { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
- { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
- { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
- { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
- { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
- { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
- { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
- { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
- { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
- { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
- { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
- { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
- { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
- { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
- { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
- { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
- { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
- { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
- { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
- { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
- { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
- { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
- { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
- { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
- { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
- { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x14, 0x0000ffff, 0, 0x2BD, SISLANDS_CACCONFIG_CGIND },
- { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
- { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
- { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
- { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
- { 0xFFFFFFFF }
-};
-
-static const struct si_cac_config_reg cac_weights_chelsea_xt[] =
-{
- { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
- { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
- { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
- { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
- { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
- { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
- { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
- { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
- { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
- { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
- { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
- { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
- { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
- { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
- { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
- { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
- { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
- { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
- { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
- { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
- { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
- { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
- { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
- { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
- { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
- { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
- { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
- { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x14, 0x0000ffff, 0, 0x30A, SISLANDS_CACCONFIG_CGIND },
- { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
- { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
- { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
- { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
- { 0xFFFFFFFF }
-};
-
-static const struct si_cac_config_reg cac_weights_heathrow[] =
-{
- { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
- { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
- { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
- { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
- { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
- { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
- { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
- { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
- { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
- { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
- { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
- { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
- { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
- { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
- { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
- { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
- { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
- { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
- { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
- { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
- { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
- { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
- { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
- { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
- { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
- { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
- { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
- { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x14, 0x0000ffff, 0, 0x362, SISLANDS_CACCONFIG_CGIND },
- { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
- { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
- { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
- { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
- { 0xFFFFFFFF }
-};
-
-static const struct si_cac_config_reg cac_weights_cape_verde_pro[] =
-{
- { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
- { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
- { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
- { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
- { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
- { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
- { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
- { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
- { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
- { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
- { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
- { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
- { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
- { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
- { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
- { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
- { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
- { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
- { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
- { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
- { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
- { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
- { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
- { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
- { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
- { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
- { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
- { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x14, 0x0000ffff, 0, 0x315, SISLANDS_CACCONFIG_CGIND },
- { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
- { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
- { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
- { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
- { 0xFFFFFFFF }
-};
-
-static const struct si_cac_config_reg cac_weights_cape_verde[] =
-{
- { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
- { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
- { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
- { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
- { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
- { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
- { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
- { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
- { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
- { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
- { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
- { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
- { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
- { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
- { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
- { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
- { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
- { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
- { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
- { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
- { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
- { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
- { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
- { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
- { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
- { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
- { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
- { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x14, 0x0000ffff, 0, 0x3BA, SISLANDS_CACCONFIG_CGIND },
- { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
- { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
- { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
- { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
- { 0xFFFFFFFF }
-};
-
-static const struct si_cac_config_reg lcac_cape_verde[] =
-{
- { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x110, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x14f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x8c, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x143, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x113, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x152, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x8f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x146, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x164, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x167, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x16a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x15e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x161, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x15b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0xFFFFFFFF }
-};
-
-static const struct si_cac_config_reg cac_override_cape_verde[] =
-{
- { 0xFFFFFFFF }
-};
-
-static const struct si_powertune_data powertune_data_cape_verde =
-{
- ((1 << 16) | 0x6993),
- 5,
- 0,
- 7,
- 105,
- {
- 0UL,
- 0UL,
- 7194395UL,
- 309631529UL,
- -1270850L,
- 4513710L,
- 100
- },
- 117830498UL,
- 12,
- {
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0
- },
- true
-};
-
-static const struct si_dte_data dte_data_cape_verde =
-{
- { 0, 0, 0, 0, 0 },
- { 0, 0, 0, 0, 0 },
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
- 0,
- false
-};
-
-static const struct si_dte_data dte_data_venus_xtx =
-{
- { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
- { 0x71C, 0xAAB, 0xE39, 0x11C7, 0x0 },
- 5,
- 55000,
- 0x69,
- 0xA,
- 1,
- 0,
- 0x3,
- { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
- { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
- { 0xD6D8, 0x88B8, 0x1555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
- 90,
- true
-};
-
-static const struct si_dte_data dte_data_venus_xt =
-{
- { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
- { 0xBDA, 0x11C7, 0x17B4, 0x1DA1, 0x0 },
- 5,
- 55000,
- 0x69,
- 0xA,
- 1,
- 0,
- 0x3,
- { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
- { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
- { 0xAFC8, 0x88B8, 0x238E, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
- 90,
- true
-};
-
-static const struct si_dte_data dte_data_venus_pro =
-{
- { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
- { 0x11C7, 0x1AAB, 0x238E, 0x2C72, 0x0 },
- 5,
- 55000,
- 0x69,
- 0xA,
- 1,
- 0,
- 0x3,
- { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
- { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
- { 0x88B8, 0x88B8, 0x3555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
- 90,
- true
-};
-
-static const struct si_cac_config_reg cac_weights_oland[] =
-{
- { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
- { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
- { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
- { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
- { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
- { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
- { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
- { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
- { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
- { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
- { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
- { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
- { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
- { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
- { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
- { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
- { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
- { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
- { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
- { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
- { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
- { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
- { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
- { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
- { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
- { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
- { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
- { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x14, 0x0000ffff, 0, 0x3BA, SISLANDS_CACCONFIG_CGIND },
- { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
- { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
- { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
- { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
- { 0xFFFFFFFF }
-};
-
-static const struct si_cac_config_reg cac_weights_mars_pro[] =
-{
- { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
- { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
- { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
- { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
- { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
- { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
- { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
- { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
- { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
- { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
- { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
- { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
- { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
- { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
- { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
- { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
- { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
- { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
- { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
- { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
- { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
- { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
- { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
- { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
- { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
- { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
- { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
- { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
- { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
- { 0x14, 0x0000ffff, 0, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
- { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
- { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
- { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
- { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
- { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
- { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
- { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
- { 0xFFFFFFFF }
-};
-
-static const struct si_cac_config_reg cac_weights_mars_xt[] =
-{
- { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
- { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
- { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
- { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
- { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
- { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
- { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
- { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
- { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
- { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
- { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
- { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
- { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
- { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
- { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
- { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
- { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
- { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
- { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
- { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
- { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
- { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
- { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
- { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
- { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
- { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
- { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
- { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
- { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
- { 0x14, 0x0000ffff, 0, 0x60, SISLANDS_CACCONFIG_CGIND },
- { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
- { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
- { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
- { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
- { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
- { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
- { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
- { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
- { 0xFFFFFFFF }
-};
-
-static const struct si_cac_config_reg cac_weights_oland_pro[] =
-{
- { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
- { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
- { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
- { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
- { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
- { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
- { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
- { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
- { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
- { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
- { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
- { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
- { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
- { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
- { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
- { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
- { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
- { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
- { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
- { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
- { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
- { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
- { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
- { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
- { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
- { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
- { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
- { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
- { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
- { 0x14, 0x0000ffff, 0, 0x90, SISLANDS_CACCONFIG_CGIND },
- { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
- { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
- { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
- { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
- { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
- { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
- { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
- { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
- { 0xFFFFFFFF }
-};
-
-static const struct si_cac_config_reg cac_weights_oland_xt[] =
-{
- { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
- { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
- { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
- { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
- { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
- { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
- { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
- { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
- { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
- { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
- { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
- { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
- { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
- { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
- { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
- { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
- { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
- { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
- { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
- { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
- { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
- { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
- { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
- { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
- { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
- { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
- { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
- { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
- { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
- { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
- { 0x14, 0x0000ffff, 0, 0x120, SISLANDS_CACCONFIG_CGIND },
- { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
- { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
- { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
- { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
- { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
- { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
- { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
- { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
- { 0xFFFFFFFF }
-};
-
-static const struct si_cac_config_reg lcac_oland[] =
-{
- { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x110, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
- { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x14f, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
- { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x8c, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
- { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x143, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
- { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x164, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x167, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x16a, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x15e, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x161, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x15b, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x173, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0xFFFFFFFF }
-};
-
-static const struct si_cac_config_reg lcac_mars_pro[] =
-{
- { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x110, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
- { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x14f, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
- { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x8c, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
- { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x143, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x164, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x167, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x16a, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x15e, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x161, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x15b, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
- { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x173, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
- { 0xFFFFFFFF }
-};
-
-static const struct si_cac_config_reg cac_override_oland[] =
-{
- { 0xFFFFFFFF }
-};
-
-static const struct si_powertune_data powertune_data_oland =
-{
- ((1 << 16) | 0x6993),
- 5,
- 0,
- 7,
- 105,
- {
- 0UL,
- 0UL,
- 7194395UL,
- 309631529UL,
- -1270850L,
- 4513710L,
- 100
- },
- 117830498UL,
- 12,
- {
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0
- },
- true
-};
-
-static const struct si_powertune_data powertune_data_mars_pro =
-{
- ((1 << 16) | 0x6993),
- 5,
- 0,
- 7,
- 105,
- {
- 0UL,
- 0UL,
- 7194395UL,
- 309631529UL,
- -1270850L,
- 4513710L,
- 100
- },
- 117830498UL,
- 12,
- {
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0
- },
- true
-};
-
-static const struct si_dte_data dte_data_oland =
-{
- { 0, 0, 0, 0, 0 },
- { 0, 0, 0, 0, 0 },
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
- 0,
- false
-};
-
-static const struct si_dte_data dte_data_mars_pro =
-{
- { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
- { 0x0, 0x0, 0x0, 0x0, 0x0 },
- 5,
- 55000,
- 105,
- 0xA,
- 1,
- 0,
- 0x10,
- { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
- { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
- { 0xF627, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
- 90,
- true
-};
-
-static const struct si_dte_data dte_data_sun_xt =
-{
- { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
- { 0x0, 0x0, 0x0, 0x0, 0x0 },
- 5,
- 55000,
- 105,
- 0xA,
- 1,
- 0,
- 0x10,
- { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
- { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
- { 0xD555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
- 90,
- true
-};
-
-
-static const struct si_cac_config_reg cac_weights_hainan[] =
-{
- { 0x0, 0x0000ffff, 0, 0x2d9, SISLANDS_CACCONFIG_CGIND },
- { 0x0, 0xffff0000, 16, 0x22b, SISLANDS_CACCONFIG_CGIND },
- { 0x1, 0x0000ffff, 0, 0x21c, SISLANDS_CACCONFIG_CGIND },
- { 0x1, 0xffff0000, 16, 0x1dc, SISLANDS_CACCONFIG_CGIND },
- { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x3, 0x0000ffff, 0, 0x24e, SISLANDS_CACCONFIG_CGIND },
- { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x4, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x5, 0x0000ffff, 0, 0x35e, SISLANDS_CACCONFIG_CGIND },
- { 0x5, 0xffff0000, 16, 0x1143, SISLANDS_CACCONFIG_CGIND },
- { 0x6, 0x0000ffff, 0, 0xe17, SISLANDS_CACCONFIG_CGIND },
- { 0x6, 0xffff0000, 16, 0x441, SISLANDS_CACCONFIG_CGIND },
- { 0x18f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x7, 0x0000ffff, 0, 0x28b, SISLANDS_CACCONFIG_CGIND },
- { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x8, 0xffff0000, 16, 0xabe, SISLANDS_CACCONFIG_CGIND },
- { 0x9, 0x0000ffff, 0, 0xf11, SISLANDS_CACCONFIG_CGIND },
- { 0xa, 0x0000ffff, 0, 0x907, SISLANDS_CACCONFIG_CGIND },
- { 0xb, 0x0000ffff, 0, 0xb45, SISLANDS_CACCONFIG_CGIND },
- { 0xb, 0xffff0000, 16, 0xd1e, SISLANDS_CACCONFIG_CGIND },
- { 0xc, 0x0000ffff, 0, 0xa2c, SISLANDS_CACCONFIG_CGIND },
- { 0xd, 0x0000ffff, 0, 0x62, SISLANDS_CACCONFIG_CGIND },
- { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0xe, 0x0000ffff, 0, 0x1f3, SISLANDS_CACCONFIG_CGIND },
- { 0xf, 0x0000ffff, 0, 0x42, SISLANDS_CACCONFIG_CGIND },
- { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x11, 0x0000ffff, 0, 0x709, SISLANDS_CACCONFIG_CGIND },
- { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x12, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x13, 0xffff0000, 16, 0x3a, SISLANDS_CACCONFIG_CGIND },
- { 0x14, 0x0000ffff, 0, 0x357, SISLANDS_CACCONFIG_CGIND },
- { 0x15, 0x0000ffff, 0, 0x9f, SISLANDS_CACCONFIG_CGIND },
- { 0x15, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x16, 0x0000ffff, 0, 0x314, SISLANDS_CACCONFIG_CGIND },
- { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x17, 0x0000ffff, 0, 0x6d, SISLANDS_CACCONFIG_CGIND },
- { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
- { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
- { 0x6d, 0x0000ffff, 0, 0x1b9, SISLANDS_CACCONFIG_CGIND },
- { 0xFFFFFFFF }
-};
-
-static const struct si_powertune_data powertune_data_hainan =
-{
- ((1 << 16) | 0x6993),
- 5,
- 0,
- 9,
- 105,
- {
- 0UL,
- 0UL,
- 7194395UL,
- 309631529UL,
- -1270850L,
- 4513710L,
- 100
- },
- 117830498UL,
- 12,
- {
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0
- },
- true
-};
-
-static struct rv7xx_power_info *rv770_get_pi(struct amdgpu_device *adev);
-static struct evergreen_power_info *evergreen_get_pi(struct amdgpu_device *adev);
-static struct ni_power_info *ni_get_pi(struct amdgpu_device *adev);
-static struct si_ps *si_get_ps(struct amdgpu_ps *rps);
-
-static int si_populate_voltage_value(struct amdgpu_device *adev,
- const struct atom_voltage_table *table,
- u16 value, SISLANDS_SMC_VOLTAGE_VALUE *voltage);
-static int si_get_std_voltage_value(struct amdgpu_device *adev,
- SISLANDS_SMC_VOLTAGE_VALUE *voltage,
- u16 *std_voltage);
-static int si_write_smc_soft_register(struct amdgpu_device *adev,
- u16 reg_offset, u32 value);
-static int si_convert_power_level_to_smc(struct amdgpu_device *adev,
- struct rv7xx_pl *pl,
- SISLANDS_SMC_HW_PERFORMANCE_LEVEL *level);
-static int si_calculate_sclk_params(struct amdgpu_device *adev,
- u32 engine_clock,
- SISLANDS_SMC_SCLK_VALUE *sclk);
-
-static void si_thermal_start_smc_fan_control(struct amdgpu_device *adev);
-static void si_fan_ctrl_set_default_mode(struct amdgpu_device *adev);
-static void si_dpm_set_irq_funcs(struct amdgpu_device *adev);
-
-static struct si_power_info *si_get_pi(struct amdgpu_device *adev)
-{
- struct si_power_info *pi = adev->pm.dpm.priv;
- return pi;
-}
-
-static void si_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coeffients *coeff,
- u16 v, s32 t, u32 ileakage, u32 *leakage)
-{
- s64 kt, kv, leakage_w, i_leakage, vddc;
- s64 temperature, t_slope, t_intercept, av, bv, t_ref;
- s64 tmp;
-
- i_leakage = div64_s64(drm_int2fixp(ileakage), 100);
- vddc = div64_s64(drm_int2fixp(v), 1000);
- temperature = div64_s64(drm_int2fixp(t), 1000);
-
- t_slope = div64_s64(drm_int2fixp(coeff->t_slope), 100000000);
- t_intercept = div64_s64(drm_int2fixp(coeff->t_intercept), 100000000);
- av = div64_s64(drm_int2fixp(coeff->av), 100000000);
- bv = div64_s64(drm_int2fixp(coeff->bv), 100000000);
- t_ref = drm_int2fixp(coeff->t_ref);
-
- tmp = drm_fixp_mul(t_slope, vddc) + t_intercept;
- kt = drm_fixp_exp(drm_fixp_mul(tmp, temperature));
- kt = drm_fixp_div(kt, drm_fixp_exp(drm_fixp_mul(tmp, t_ref)));
- kv = drm_fixp_mul(av, drm_fixp_exp(drm_fixp_mul(bv, vddc)));
-
- leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc);
-
- *leakage = drm_fixp2int(leakage_w * 1000);
-}
-
-static void si_calculate_leakage_for_v_and_t(struct amdgpu_device *adev,
- const struct ni_leakage_coeffients *coeff,
- u16 v,
- s32 t,
- u32 i_leakage,
- u32 *leakage)
-{
- si_calculate_leakage_for_v_and_t_formula(coeff, v, t, i_leakage, leakage);
-}
-
-static void si_calculate_leakage_for_v_formula(const struct ni_leakage_coeffients *coeff,
- const u32 fixed_kt, u16 v,
- u32 ileakage, u32 *leakage)
-{
- s64 kt, kv, leakage_w, i_leakage, vddc;
-
- i_leakage = div64_s64(drm_int2fixp(ileakage), 100);
- vddc = div64_s64(drm_int2fixp(v), 1000);
-
- kt = div64_s64(drm_int2fixp(fixed_kt), 100000000);
- kv = drm_fixp_mul(div64_s64(drm_int2fixp(coeff->av), 100000000),
- drm_fixp_exp(drm_fixp_mul(div64_s64(drm_int2fixp(coeff->bv), 100000000), vddc)));
-
- leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc);
-
- *leakage = drm_fixp2int(leakage_w * 1000);
-}
-
-static void si_calculate_leakage_for_v(struct amdgpu_device *adev,
- const struct ni_leakage_coeffients *coeff,
- const u32 fixed_kt,
- u16 v,
- u32 i_leakage,
- u32 *leakage)
-{
- si_calculate_leakage_for_v_formula(coeff, fixed_kt, v, i_leakage, leakage);
-}
-
-
-static void si_update_dte_from_pl2(struct amdgpu_device *adev,
- struct si_dte_data *dte_data)
-{
- u32 p_limit1 = adev->pm.dpm.tdp_limit;
- u32 p_limit2 = adev->pm.dpm.near_tdp_limit;
- u32 k = dte_data->k;
- u32 t_max = dte_data->max_t;
- u32 t_split[5] = { 10, 15, 20, 25, 30 };
- u32 t_0 = dte_data->t0;
- u32 i;
-
- if (p_limit2 != 0 && p_limit2 <= p_limit1) {
- dte_data->tdep_count = 3;
-
- for (i = 0; i < k; i++) {
- dte_data->r[i] =
- (t_split[i] * (t_max - t_0/(u32)1000) * (1 << 14)) /
- (p_limit2 * (u32)100);
- }
-
- dte_data->tdep_r[1] = dte_data->r[4] * 2;
-
- for (i = 2; i < SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE; i++) {
- dte_data->tdep_r[i] = dte_data->r[4];
- }
- } else {
- DRM_ERROR("Invalid PL2! DTE will not be updated.\n");
- }
-}
-
-static struct rv7xx_power_info *rv770_get_pi(struct amdgpu_device *adev)
-{
- struct rv7xx_power_info *pi = adev->pm.dpm.priv;
-
- return pi;
-}
-
-static struct ni_power_info *ni_get_pi(struct amdgpu_device *adev)
-{
- struct ni_power_info *pi = adev->pm.dpm.priv;
-
- return pi;
-}
-
-static struct si_ps *si_get_ps(struct amdgpu_ps *aps)
-{
- struct si_ps *ps = aps->ps_priv;
-
- return ps;
-}
-
-static void si_initialize_powertune_defaults(struct amdgpu_device *adev)
-{
- struct ni_power_info *ni_pi = ni_get_pi(adev);
- struct si_power_info *si_pi = si_get_pi(adev);
- bool update_dte_from_pl2 = false;
-
- if (adev->asic_type == CHIP_TAHITI) {
- si_pi->cac_weights = cac_weights_tahiti;
- si_pi->lcac_config = lcac_tahiti;
- si_pi->cac_override = cac_override_tahiti;
- si_pi->powertune_data = &powertune_data_tahiti;
- si_pi->dte_data = dte_data_tahiti;
-
- switch (adev->pdev->device) {
- case 0x6798:
- si_pi->dte_data.enable_dte_by_default = true;
- break;
- case 0x6799:
- si_pi->dte_data = dte_data_new_zealand;
- break;
- case 0x6790:
- case 0x6791:
- case 0x6792:
- case 0x679E:
- si_pi->dte_data = dte_data_aruba_pro;
- update_dte_from_pl2 = true;
- break;
- case 0x679B:
- si_pi->dte_data = dte_data_malta;
- update_dte_from_pl2 = true;
- break;
- case 0x679A:
- si_pi->dte_data = dte_data_tahiti_pro;
- update_dte_from_pl2 = true;
- break;
- default:
- if (si_pi->dte_data.enable_dte_by_default == true)
- DRM_ERROR("DTE is not enabled!\n");
- break;
- }
- } else if (adev->asic_type == CHIP_PITCAIRN) {
- si_pi->cac_weights = cac_weights_pitcairn;
- si_pi->lcac_config = lcac_pitcairn;
- si_pi->cac_override = cac_override_pitcairn;
- si_pi->powertune_data = &powertune_data_pitcairn;
-
- switch (adev->pdev->device) {
- case 0x6810:
- case 0x6818:
- si_pi->dte_data = dte_data_curacao_xt;
- update_dte_from_pl2 = true;
- break;
- case 0x6819:
- case 0x6811:
- si_pi->dte_data = dte_data_curacao_pro;
- update_dte_from_pl2 = true;
- break;
- case 0x6800:
- case 0x6806:
- si_pi->dte_data = dte_data_neptune_xt;
- update_dte_from_pl2 = true;
- break;
- default:
- si_pi->dte_data = dte_data_pitcairn;
- break;
- }
- } else if (adev->asic_type == CHIP_VERDE) {
- si_pi->lcac_config = lcac_cape_verde;
- si_pi->cac_override = cac_override_cape_verde;
- si_pi->powertune_data = &powertune_data_cape_verde;
-
- switch (adev->pdev->device) {
- case 0x683B:
- case 0x683F:
- case 0x6829:
- case 0x6835:
- si_pi->cac_weights = cac_weights_cape_verde_pro;
- si_pi->dte_data = dte_data_cape_verde;
- break;
- case 0x682C:
- si_pi->cac_weights = cac_weights_cape_verde_pro;
- si_pi->dte_data = dte_data_sun_xt;
- update_dte_from_pl2 = true;
- break;
- case 0x6825:
- case 0x6827:
- si_pi->cac_weights = cac_weights_heathrow;
- si_pi->dte_data = dte_data_cape_verde;
- break;
- case 0x6824:
- case 0x682D:
- si_pi->cac_weights = cac_weights_chelsea_xt;
- si_pi->dte_data = dte_data_cape_verde;
- break;
- case 0x682F:
- si_pi->cac_weights = cac_weights_chelsea_pro;
- si_pi->dte_data = dte_data_cape_verde;
- break;
- case 0x6820:
- si_pi->cac_weights = cac_weights_heathrow;
- si_pi->dte_data = dte_data_venus_xtx;
- break;
- case 0x6821:
- si_pi->cac_weights = cac_weights_heathrow;
- si_pi->dte_data = dte_data_venus_xt;
- break;
- case 0x6823:
- case 0x682B:
- case 0x6822:
- case 0x682A:
- si_pi->cac_weights = cac_weights_chelsea_pro;
- si_pi->dte_data = dte_data_venus_pro;
- break;
- default:
- si_pi->cac_weights = cac_weights_cape_verde;
- si_pi->dte_data = dte_data_cape_verde;
- break;
- }
- } else if (adev->asic_type == CHIP_OLAND) {
- si_pi->lcac_config = lcac_mars_pro;
- si_pi->cac_override = cac_override_oland;
- si_pi->powertune_data = &powertune_data_mars_pro;
- si_pi->dte_data = dte_data_mars_pro;
-
- switch (adev->pdev->device) {
- case 0x6601:
- case 0x6621:
- case 0x6603:
- case 0x6605:
- si_pi->cac_weights = cac_weights_mars_pro;
- update_dte_from_pl2 = true;
- break;
- case 0x6600:
- case 0x6606:
- case 0x6620:
- case 0x6604:
- si_pi->cac_weights = cac_weights_mars_xt;
- update_dte_from_pl2 = true;
- break;
- case 0x6611:
- case 0x6613:
- case 0x6608:
- si_pi->cac_weights = cac_weights_oland_pro;
- update_dte_from_pl2 = true;
- break;
- case 0x6610:
- si_pi->cac_weights = cac_weights_oland_xt;
- update_dte_from_pl2 = true;
- break;
- default:
- si_pi->cac_weights = cac_weights_oland;
- si_pi->lcac_config = lcac_oland;
- si_pi->cac_override = cac_override_oland;
- si_pi->powertune_data = &powertune_data_oland;
- si_pi->dte_data = dte_data_oland;
- break;
- }
- } else if (adev->asic_type == CHIP_HAINAN) {
- si_pi->cac_weights = cac_weights_hainan;
- si_pi->lcac_config = lcac_oland;
- si_pi->cac_override = cac_override_oland;
- si_pi->powertune_data = &powertune_data_hainan;
- si_pi->dte_data = dte_data_sun_xt;
- update_dte_from_pl2 = true;
- } else {
- DRM_ERROR("Unknown SI asic revision, failed to initialize PowerTune!\n");
- return;
- }
-
- ni_pi->enable_power_containment = false;
- ni_pi->enable_cac = false;
- ni_pi->enable_sq_ramping = false;
- si_pi->enable_dte = false;
-
- if (si_pi->powertune_data->enable_powertune_by_default) {
- ni_pi->enable_power_containment = true;
- ni_pi->enable_cac = true;
- if (si_pi->dte_data.enable_dte_by_default) {
- si_pi->enable_dte = true;
- if (update_dte_from_pl2)
- si_update_dte_from_pl2(adev, &si_pi->dte_data);
-
- }
- ni_pi->enable_sq_ramping = true;
- }
-
- ni_pi->driver_calculate_cac_leakage = true;
- ni_pi->cac_configuration_required = true;
-
- if (ni_pi->cac_configuration_required) {
- ni_pi->support_cac_long_term_average = true;
- si_pi->dyn_powertune_data.l2_lta_window_size =
- si_pi->powertune_data->l2_lta_window_size_default;
- si_pi->dyn_powertune_data.lts_truncate =
- si_pi->powertune_data->lts_truncate_default;
- } else {
- ni_pi->support_cac_long_term_average = false;
- si_pi->dyn_powertune_data.l2_lta_window_size = 0;
- si_pi->dyn_powertune_data.lts_truncate = 0;
- }
-
- si_pi->dyn_powertune_data.disable_uvd_powertune = false;
-}
-
-static u32 si_get_smc_power_scaling_factor(struct amdgpu_device *adev)
-{
- return 1;
-}
-
-static u32 si_calculate_cac_wintime(struct amdgpu_device *adev)
-{
- u32 xclk;
- u32 wintime;
- u32 cac_window;
- u32 cac_window_size;
-
- xclk = amdgpu_asic_get_xclk(adev);
-
- if (xclk == 0)
- return 0;
-
- cac_window = RREG32(CG_CAC_CTRL) & CAC_WINDOW_MASK;
- cac_window_size = ((cac_window & 0xFFFF0000) >> 16) * (cac_window & 0x0000FFFF);
-
- wintime = (cac_window_size * 100) / xclk;
-
- return wintime;
-}
-
-static u32 si_scale_power_for_smc(u32 power_in_watts, u32 scaling_factor)
-{
- return power_in_watts;
-}
-
-static int si_calculate_adjusted_tdp_limits(struct amdgpu_device *adev,
- bool adjust_polarity,
- u32 tdp_adjustment,
- u32 *tdp_limit,
- u32 *near_tdp_limit)
-{
- u32 adjustment_delta, max_tdp_limit;
-
- if (tdp_adjustment > (u32)adev->pm.dpm.tdp_od_limit)
- return -EINVAL;
-
- max_tdp_limit = ((100 + 100) * adev->pm.dpm.tdp_limit) / 100;
-
- if (adjust_polarity) {
- *tdp_limit = ((100 + tdp_adjustment) * adev->pm.dpm.tdp_limit) / 100;
- *near_tdp_limit = adev->pm.dpm.near_tdp_limit_adjusted + (*tdp_limit - adev->pm.dpm.tdp_limit);
- } else {
- *tdp_limit = ((100 - tdp_adjustment) * adev->pm.dpm.tdp_limit) / 100;
- adjustment_delta = adev->pm.dpm.tdp_limit - *tdp_limit;
- if (adjustment_delta < adev->pm.dpm.near_tdp_limit_adjusted)
- *near_tdp_limit = adev->pm.dpm.near_tdp_limit_adjusted - adjustment_delta;
- else
- *near_tdp_limit = 0;
- }
-
- if ((*tdp_limit <= 0) || (*tdp_limit > max_tdp_limit))
- return -EINVAL;
- if ((*near_tdp_limit <= 0) || (*near_tdp_limit > *tdp_limit))
- return -EINVAL;
-
- return 0;
-}
-
-static int si_populate_smc_tdp_limits(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_state)
-{
- struct ni_power_info *ni_pi = ni_get_pi(adev);
- struct si_power_info *si_pi = si_get_pi(adev);
-
- if (ni_pi->enable_power_containment) {
- SISLANDS_SMC_STATETABLE *smc_table = &si_pi->smc_statetable;
- PP_SIslands_PAPMParameters *papm_parm;
- struct amdgpu_ppm_table *ppm = adev->pm.dpm.dyn_state.ppm_table;
- u32 scaling_factor = si_get_smc_power_scaling_factor(adev);
- u32 tdp_limit;
- u32 near_tdp_limit;
- int ret;
-
- if (scaling_factor == 0)
- return -EINVAL;
-
- memset(smc_table, 0, sizeof(SISLANDS_SMC_STATETABLE));
-
- ret = si_calculate_adjusted_tdp_limits(adev,
- false, /* ??? */
- adev->pm.dpm.tdp_adjustment,
- &tdp_limit,
- &near_tdp_limit);
- if (ret)
- return ret;
-
- smc_table->dpm2Params.TDPLimit =
- cpu_to_be32(si_scale_power_for_smc(tdp_limit, scaling_factor) * 1000);
- smc_table->dpm2Params.NearTDPLimit =
- cpu_to_be32(si_scale_power_for_smc(near_tdp_limit, scaling_factor) * 1000);
- smc_table->dpm2Params.SafePowerLimit =
- cpu_to_be32(si_scale_power_for_smc((near_tdp_limit * SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT) / 100, scaling_factor) * 1000);
-
- ret = amdgpu_si_copy_bytes_to_smc(adev,
- (si_pi->state_table_start + offsetof(SISLANDS_SMC_STATETABLE, dpm2Params) +
- offsetof(PP_SIslands_DPM2Parameters, TDPLimit)),
- (u8 *)(&(smc_table->dpm2Params.TDPLimit)),
- sizeof(u32) * 3,
- si_pi->sram_end);
- if (ret)
- return ret;
-
- if (si_pi->enable_ppm) {
- papm_parm = &si_pi->papm_parm;
- memset(papm_parm, 0, sizeof(PP_SIslands_PAPMParameters));
- papm_parm->NearTDPLimitTherm = cpu_to_be32(ppm->dgpu_tdp);
- papm_parm->dGPU_T_Limit = cpu_to_be32(ppm->tj_max);
- papm_parm->dGPU_T_Warning = cpu_to_be32(95);
- papm_parm->dGPU_T_Hysteresis = cpu_to_be32(5);
- papm_parm->PlatformPowerLimit = 0xffffffff;
- papm_parm->NearTDPLimitPAPM = 0xffffffff;
-
- ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->papm_cfg_table_start,
- (u8 *)papm_parm,
- sizeof(PP_SIslands_PAPMParameters),
- si_pi->sram_end);
- if (ret)
- return ret;
- }
- }
- return 0;
-}
-
-static int si_populate_smc_tdp_limits_2(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_state)
-{
- struct ni_power_info *ni_pi = ni_get_pi(adev);
- struct si_power_info *si_pi = si_get_pi(adev);
-
- if (ni_pi->enable_power_containment) {
- SISLANDS_SMC_STATETABLE *smc_table = &si_pi->smc_statetable;
- u32 scaling_factor = si_get_smc_power_scaling_factor(adev);
- int ret;
-
- memset(smc_table, 0, sizeof(SISLANDS_SMC_STATETABLE));
-
- smc_table->dpm2Params.NearTDPLimit =
- cpu_to_be32(si_scale_power_for_smc(adev->pm.dpm.near_tdp_limit_adjusted, scaling_factor) * 1000);
- smc_table->dpm2Params.SafePowerLimit =
- cpu_to_be32(si_scale_power_for_smc((adev->pm.dpm.near_tdp_limit_adjusted * SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT) / 100, scaling_factor) * 1000);
-
- ret = amdgpu_si_copy_bytes_to_smc(adev,
- (si_pi->state_table_start +
- offsetof(SISLANDS_SMC_STATETABLE, dpm2Params) +
- offsetof(PP_SIslands_DPM2Parameters, NearTDPLimit)),
- (u8 *)(&(smc_table->dpm2Params.NearTDPLimit)),
- sizeof(u32) * 2,
- si_pi->sram_end);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static u16 si_calculate_power_efficiency_ratio(struct amdgpu_device *adev,
- const u16 prev_std_vddc,
- const u16 curr_std_vddc)
-{
- u64 margin = (u64)SISLANDS_DPM2_PWREFFICIENCYRATIO_MARGIN;
- u64 prev_vddc = (u64)prev_std_vddc;
- u64 curr_vddc = (u64)curr_std_vddc;
- u64 pwr_efficiency_ratio, n, d;
-
- if ((prev_vddc == 0) || (curr_vddc == 0))
- return 0;
-
- n = div64_u64((u64)1024 * curr_vddc * curr_vddc * ((u64)1000 + margin), (u64)1000);
- d = prev_vddc * prev_vddc;
- pwr_efficiency_ratio = div64_u64(n, d);
-
- if (pwr_efficiency_ratio > (u64)0xFFFF)
- return 0;
-
- return (u16)pwr_efficiency_ratio;
-}
-
-static bool si_should_disable_uvd_powertune(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_state)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
-
- if (si_pi->dyn_powertune_data.disable_uvd_powertune &&
- amdgpu_state->vclk && amdgpu_state->dclk)
- return true;
-
- return false;
-}
-
-struct evergreen_power_info *evergreen_get_pi(struct amdgpu_device *adev)
-{
- struct evergreen_power_info *pi = adev->pm.dpm.priv;
-
- return pi;
-}
-
-static int si_populate_power_containment_values(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_state,
- SISLANDS_SMC_SWSTATE *smc_state)
-{
- struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
- struct ni_power_info *ni_pi = ni_get_pi(adev);
- struct si_ps *state = si_get_ps(amdgpu_state);
- SISLANDS_SMC_VOLTAGE_VALUE vddc;
- u32 prev_sclk;
- u32 max_sclk;
- u32 min_sclk;
- u16 prev_std_vddc;
- u16 curr_std_vddc;
- int i;
- u16 pwr_efficiency_ratio;
- u8 max_ps_percent;
- bool disable_uvd_power_tune;
- int ret;
-
- if (ni_pi->enable_power_containment == false)
- return 0;
-
- if (state->performance_level_count == 0)
- return -EINVAL;
-
- if (smc_state->levelCount != state->performance_level_count)
- return -EINVAL;
-
- disable_uvd_power_tune = si_should_disable_uvd_powertune(adev, amdgpu_state);
-
- smc_state->levels[0].dpm2.MaxPS = 0;
- smc_state->levels[0].dpm2.NearTDPDec = 0;
- smc_state->levels[0].dpm2.AboveSafeInc = 0;
- smc_state->levels[0].dpm2.BelowSafeInc = 0;
- smc_state->levels[0].dpm2.PwrEfficiencyRatio = 0;
-
- for (i = 1; i < state->performance_level_count; i++) {
- prev_sclk = state->performance_levels[i-1].sclk;
- max_sclk = state->performance_levels[i].sclk;
- if (i == 1)
- max_ps_percent = SISLANDS_DPM2_MAXPS_PERCENT_M;
- else
- max_ps_percent = SISLANDS_DPM2_MAXPS_PERCENT_H;
-
- if (prev_sclk > max_sclk)
- return -EINVAL;
-
- if ((max_ps_percent == 0) ||
- (prev_sclk == max_sclk) ||
- disable_uvd_power_tune)
- min_sclk = max_sclk;
- else if (i == 1)
- min_sclk = prev_sclk;
- else
- min_sclk = (prev_sclk * (u32)max_ps_percent) / 100;
-
- if (min_sclk < state->performance_levels[0].sclk)
- min_sclk = state->performance_levels[0].sclk;
-
- if (min_sclk == 0)
- return -EINVAL;
-
- ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
- state->performance_levels[i-1].vddc, &vddc);
- if (ret)
- return ret;
-
- ret = si_get_std_voltage_value(adev, &vddc, &prev_std_vddc);
- if (ret)
- return ret;
-
- ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
- state->performance_levels[i].vddc, &vddc);
- if (ret)
- return ret;
-
- ret = si_get_std_voltage_value(adev, &vddc, &curr_std_vddc);
- if (ret)
- return ret;
-
- pwr_efficiency_ratio = si_calculate_power_efficiency_ratio(adev,
- prev_std_vddc, curr_std_vddc);
-
- smc_state->levels[i].dpm2.MaxPS = (u8)((SISLANDS_DPM2_MAX_PULSE_SKIP * (max_sclk - min_sclk)) / max_sclk);
- smc_state->levels[i].dpm2.NearTDPDec = SISLANDS_DPM2_NEAR_TDP_DEC;
- smc_state->levels[i].dpm2.AboveSafeInc = SISLANDS_DPM2_ABOVE_SAFE_INC;
- smc_state->levels[i].dpm2.BelowSafeInc = SISLANDS_DPM2_BELOW_SAFE_INC;
- smc_state->levels[i].dpm2.PwrEfficiencyRatio = cpu_to_be16(pwr_efficiency_ratio);
- }
-
- return 0;
-}
-
-static int si_populate_sq_ramping_values(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_state,
- SISLANDS_SMC_SWSTATE *smc_state)
-{
- struct ni_power_info *ni_pi = ni_get_pi(adev);
- struct si_ps *state = si_get_ps(amdgpu_state);
- u32 sq_power_throttle, sq_power_throttle2;
- bool enable_sq_ramping = ni_pi->enable_sq_ramping;
- int i;
-
- if (state->performance_level_count == 0)
- return -EINVAL;
-
- if (smc_state->levelCount != state->performance_level_count)
- return -EINVAL;
-
- if (adev->pm.dpm.sq_ramping_threshold == 0)
- return -EINVAL;
-
- if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER > (MAX_POWER_MASK >> MAX_POWER_SHIFT))
- enable_sq_ramping = false;
-
- if (SISLANDS_DPM2_SQ_RAMP_MIN_POWER > (MIN_POWER_MASK >> MIN_POWER_SHIFT))
- enable_sq_ramping = false;
-
- if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA > (MAX_POWER_DELTA_MASK >> MAX_POWER_DELTA_SHIFT))
- enable_sq_ramping = false;
-
- if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT))
- enable_sq_ramping = false;
-
- if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
- enable_sq_ramping = false;
-
- for (i = 0; i < state->performance_level_count; i++) {
- sq_power_throttle = 0;
- sq_power_throttle2 = 0;
-
- if ((state->performance_levels[i].sclk >= adev->pm.dpm.sq_ramping_threshold) &&
- enable_sq_ramping) {
- sq_power_throttle |= MAX_POWER(SISLANDS_DPM2_SQ_RAMP_MAX_POWER);
- sq_power_throttle |= MIN_POWER(SISLANDS_DPM2_SQ_RAMP_MIN_POWER);
- sq_power_throttle2 |= MAX_POWER_DELTA(SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA);
- sq_power_throttle2 |= STI_SIZE(SISLANDS_DPM2_SQ_RAMP_STI_SIZE);
- sq_power_throttle2 |= LTI_RATIO(SISLANDS_DPM2_SQ_RAMP_LTI_RATIO);
- } else {
- sq_power_throttle |= MAX_POWER_MASK | MIN_POWER_MASK;
- sq_power_throttle2 |= MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
- }
-
- smc_state->levels[i].SQPowerThrottle = cpu_to_be32(sq_power_throttle);
- smc_state->levels[i].SQPowerThrottle_2 = cpu_to_be32(sq_power_throttle2);
- }
-
- return 0;
-}
-
-static int si_enable_power_containment(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_new_state,
- bool enable)
-{
- struct ni_power_info *ni_pi = ni_get_pi(adev);
- PPSMC_Result smc_result;
- int ret = 0;
-
- if (ni_pi->enable_power_containment) {
- if (enable) {
- if (!si_should_disable_uvd_powertune(adev, amdgpu_new_state)) {
- smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_TDPClampingActive);
- if (smc_result != PPSMC_Result_OK) {
- ret = -EINVAL;
- ni_pi->pc_enabled = false;
- } else {
- ni_pi->pc_enabled = true;
- }
- }
- } else {
- smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_TDPClampingInactive);
- if (smc_result != PPSMC_Result_OK)
- ret = -EINVAL;
- ni_pi->pc_enabled = false;
- }
- }
-
- return ret;
-}
-
-static int si_initialize_smc_dte_tables(struct amdgpu_device *adev)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
- int ret = 0;
- struct si_dte_data *dte_data = &si_pi->dte_data;
- Smc_SIslands_DTE_Configuration *dte_tables = NULL;
- u32 table_size;
- u8 tdep_count;
- u32 i;
-
- if (dte_data == NULL)
- si_pi->enable_dte = false;
-
- if (si_pi->enable_dte == false)
- return 0;
-
- if (dte_data->k <= 0)
- return -EINVAL;
-
- dte_tables = kzalloc(sizeof(Smc_SIslands_DTE_Configuration), GFP_KERNEL);
- if (dte_tables == NULL) {
- si_pi->enable_dte = false;
- return -ENOMEM;
- }
-
- table_size = dte_data->k;
-
- if (table_size > SMC_SISLANDS_DTE_MAX_FILTER_STAGES)
- table_size = SMC_SISLANDS_DTE_MAX_FILTER_STAGES;
-
- tdep_count = dte_data->tdep_count;
- if (tdep_count > SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE)
- tdep_count = SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE;
-
- dte_tables->K = cpu_to_be32(table_size);
- dte_tables->T0 = cpu_to_be32(dte_data->t0);
- dte_tables->MaxT = cpu_to_be32(dte_data->max_t);
- dte_tables->WindowSize = dte_data->window_size;
- dte_tables->temp_select = dte_data->temp_select;
- dte_tables->DTE_mode = dte_data->dte_mode;
- dte_tables->Tthreshold = cpu_to_be32(dte_data->t_threshold);
-
- if (tdep_count > 0)
- table_size--;
-
- for (i = 0; i < table_size; i++) {
- dte_tables->tau[i] = cpu_to_be32(dte_data->tau[i]);
- dte_tables->R[i] = cpu_to_be32(dte_data->r[i]);
- }
-
- dte_tables->Tdep_count = tdep_count;
-
- for (i = 0; i < (u32)tdep_count; i++) {
- dte_tables->T_limits[i] = dte_data->t_limits[i];
- dte_tables->Tdep_tau[i] = cpu_to_be32(dte_data->tdep_tau[i]);
- dte_tables->Tdep_R[i] = cpu_to_be32(dte_data->tdep_r[i]);
- }
-
- ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->dte_table_start,
- (u8 *)dte_tables,
- sizeof(Smc_SIslands_DTE_Configuration),
- si_pi->sram_end);
- kfree(dte_tables);
-
- return ret;
-}
-
-static int si_get_cac_std_voltage_max_min(struct amdgpu_device *adev,
- u16 *max, u16 *min)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
- struct amdgpu_cac_leakage_table *table =
- &adev->pm.dpm.dyn_state.cac_leakage_table;
- u32 i;
- u32 v0_loadline;
-
- if (table == NULL)
- return -EINVAL;
-
- *max = 0;
- *min = 0xFFFF;
-
- for (i = 0; i < table->count; i++) {
- if (table->entries[i].vddc > *max)
- *max = table->entries[i].vddc;
- if (table->entries[i].vddc < *min)
- *min = table->entries[i].vddc;
- }
-
- if (si_pi->powertune_data->lkge_lut_v0_percent > 100)
- return -EINVAL;
-
- v0_loadline = (*min) * (100 - si_pi->powertune_data->lkge_lut_v0_percent) / 100;
-
- if (v0_loadline > 0xFFFFUL)
- return -EINVAL;
-
- *min = (u16)v0_loadline;
-
- if ((*min > *max) || (*max == 0) || (*min == 0))
- return -EINVAL;
-
- return 0;
-}
-
-static u16 si_get_cac_std_voltage_step(u16 max, u16 min)
-{
- return ((max - min) + (SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES - 1)) /
- SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES;
-}
-
-static int si_init_dte_leakage_table(struct amdgpu_device *adev,
- PP_SIslands_CacConfig *cac_tables,
- u16 vddc_max, u16 vddc_min, u16 vddc_step,
- u16 t0, u16 t_step)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
- u32 leakage;
- unsigned int i, j;
- s32 t;
- u32 smc_leakage;
- u32 scaling_factor;
- u16 voltage;
-
- scaling_factor = si_get_smc_power_scaling_factor(adev);
-
- for (i = 0; i < SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES ; i++) {
- t = (1000 * (i * t_step + t0));
-
- for (j = 0; j < SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) {
- voltage = vddc_max - (vddc_step * j);
-
- si_calculate_leakage_for_v_and_t(adev,
- &si_pi->powertune_data->leakage_coefficients,
- voltage,
- t,
- si_pi->dyn_powertune_data.cac_leakage,
- &leakage);
-
- smc_leakage = si_scale_power_for_smc(leakage, scaling_factor) / 4;
-
- if (smc_leakage > 0xFFFF)
- smc_leakage = 0xFFFF;
-
- cac_tables->cac_lkge_lut[i][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES-1-j] =
- cpu_to_be16((u16)smc_leakage);
- }
- }
- return 0;
-}
-
-static int si_init_simplified_leakage_table(struct amdgpu_device *adev,
- PP_SIslands_CacConfig *cac_tables,
- u16 vddc_max, u16 vddc_min, u16 vddc_step)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
- u32 leakage;
- unsigned int i, j;
- u32 smc_leakage;
- u32 scaling_factor;
- u16 voltage;
-
- scaling_factor = si_get_smc_power_scaling_factor(adev);
-
- for (j = 0; j < SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) {
- voltage = vddc_max - (vddc_step * j);
-
- si_calculate_leakage_for_v(adev,
- &si_pi->powertune_data->leakage_coefficients,
- si_pi->powertune_data->fixed_kt,
- voltage,
- si_pi->dyn_powertune_data.cac_leakage,
- &leakage);
-
- smc_leakage = si_scale_power_for_smc(leakage, scaling_factor) / 4;
-
- if (smc_leakage > 0xFFFF)
- smc_leakage = 0xFFFF;
-
- for (i = 0; i < SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES ; i++)
- cac_tables->cac_lkge_lut[i][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES-1-j] =
- cpu_to_be16((u16)smc_leakage);
- }
- return 0;
-}
-
-static int si_initialize_smc_cac_tables(struct amdgpu_device *adev)
-{
- struct ni_power_info *ni_pi = ni_get_pi(adev);
- struct si_power_info *si_pi = si_get_pi(adev);
- PP_SIslands_CacConfig *cac_tables = NULL;
- u16 vddc_max, vddc_min, vddc_step;
- u16 t0, t_step;
- u32 load_line_slope, reg;
- int ret = 0;
- u32 ticks_per_us = amdgpu_asic_get_xclk(adev) / 100;
-
- if (ni_pi->enable_cac == false)
- return 0;
-
- cac_tables = kzalloc(sizeof(PP_SIslands_CacConfig), GFP_KERNEL);
- if (!cac_tables)
- return -ENOMEM;
-
- reg = RREG32(CG_CAC_CTRL) & ~CAC_WINDOW_MASK;
- reg |= CAC_WINDOW(si_pi->powertune_data->cac_window);
- WREG32(CG_CAC_CTRL, reg);
-
- si_pi->dyn_powertune_data.cac_leakage = adev->pm.dpm.cac_leakage;
- si_pi->dyn_powertune_data.dc_pwr_value =
- si_pi->powertune_data->dc_cac[NISLANDS_DCCAC_LEVEL_0];
- si_pi->dyn_powertune_data.wintime = si_calculate_cac_wintime(adev);
- si_pi->dyn_powertune_data.shift_n = si_pi->powertune_data->shift_n_default;
-
- si_pi->dyn_powertune_data.leakage_minimum_temperature = 80 * 1000;
-
- ret = si_get_cac_std_voltage_max_min(adev, &vddc_max, &vddc_min);
- if (ret)
- goto done_free;
-
- vddc_step = si_get_cac_std_voltage_step(vddc_max, vddc_min);
- vddc_min = vddc_max - (vddc_step * (SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES - 1));
- t_step = 4;
- t0 = 60;
-
- if (si_pi->enable_dte || ni_pi->driver_calculate_cac_leakage)
- ret = si_init_dte_leakage_table(adev, cac_tables,
- vddc_max, vddc_min, vddc_step,
- t0, t_step);
- else
- ret = si_init_simplified_leakage_table(adev, cac_tables,
- vddc_max, vddc_min, vddc_step);
- if (ret)
- goto done_free;
-
- load_line_slope = ((u32)adev->pm.dpm.load_line_slope << SMC_SISLANDS_SCALE_R) / 100;
-
- cac_tables->l2numWin_TDP = cpu_to_be32(si_pi->dyn_powertune_data.l2_lta_window_size);
- cac_tables->lts_truncate_n = si_pi->dyn_powertune_data.lts_truncate;
- cac_tables->SHIFT_N = si_pi->dyn_powertune_data.shift_n;
- cac_tables->lkge_lut_V0 = cpu_to_be32((u32)vddc_min);
- cac_tables->lkge_lut_Vstep = cpu_to_be32((u32)vddc_step);
- cac_tables->R_LL = cpu_to_be32(load_line_slope);
- cac_tables->WinTime = cpu_to_be32(si_pi->dyn_powertune_data.wintime);
- cac_tables->calculation_repeats = cpu_to_be32(2);
- cac_tables->dc_cac = cpu_to_be32(0);
- cac_tables->log2_PG_LKG_SCALE = 12;
- cac_tables->cac_temp = si_pi->powertune_data->operating_temp;
- cac_tables->lkge_lut_T0 = cpu_to_be32((u32)t0);
- cac_tables->lkge_lut_Tstep = cpu_to_be32((u32)t_step);
-
- ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->cac_table_start,
- (u8 *)cac_tables,
- sizeof(PP_SIslands_CacConfig),
- si_pi->sram_end);
-
- if (ret)
- goto done_free;
-
- ret = si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_ticks_per_us, ticks_per_us);
-
-done_free:
- if (ret) {
- ni_pi->enable_cac = false;
- ni_pi->enable_power_containment = false;
- }
-
- kfree(cac_tables);
-
- return ret;
-}
-
-static int si_program_cac_config_registers(struct amdgpu_device *adev,
- const struct si_cac_config_reg *cac_config_regs)
-{
- const struct si_cac_config_reg *config_regs = cac_config_regs;
- u32 data = 0, offset;
-
- if (!config_regs)
- return -EINVAL;
-
- while (config_regs->offset != 0xFFFFFFFF) {
- switch (config_regs->type) {
- case SISLANDS_CACCONFIG_CGIND:
- offset = SMC_CG_IND_START + config_regs->offset;
- if (offset < SMC_CG_IND_END)
- data = RREG32_SMC(offset);
- break;
- default:
- data = RREG32(config_regs->offset);
- break;
- }
-
- data &= ~config_regs->mask;
- data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
-
- switch (config_regs->type) {
- case SISLANDS_CACCONFIG_CGIND:
- offset = SMC_CG_IND_START + config_regs->offset;
- if (offset < SMC_CG_IND_END)
- WREG32_SMC(offset, data);
- break;
- default:
- WREG32(config_regs->offset, data);
- break;
- }
- config_regs++;
- }
- return 0;
-}
-
-static int si_initialize_hardware_cac_manager(struct amdgpu_device *adev)
-{
- struct ni_power_info *ni_pi = ni_get_pi(adev);
- struct si_power_info *si_pi = si_get_pi(adev);
- int ret;
-
- if ((ni_pi->enable_cac == false) ||
- (ni_pi->cac_configuration_required == false))
- return 0;
-
- ret = si_program_cac_config_registers(adev, si_pi->lcac_config);
- if (ret)
- return ret;
- ret = si_program_cac_config_registers(adev, si_pi->cac_override);
- if (ret)
- return ret;
- ret = si_program_cac_config_registers(adev, si_pi->cac_weights);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int si_enable_smc_cac(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_new_state,
- bool enable)
-{
- struct ni_power_info *ni_pi = ni_get_pi(adev);
- struct si_power_info *si_pi = si_get_pi(adev);
- PPSMC_Result smc_result;
- int ret = 0;
-
- if (ni_pi->enable_cac) {
- if (enable) {
- if (!si_should_disable_uvd_powertune(adev, amdgpu_new_state)) {
- if (ni_pi->support_cac_long_term_average) {
- smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_CACLongTermAvgEnable);
- if (smc_result != PPSMC_Result_OK)
- ni_pi->support_cac_long_term_average = false;
- }
-
- smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableCac);
- if (smc_result != PPSMC_Result_OK) {
- ret = -EINVAL;
- ni_pi->cac_enabled = false;
- } else {
- ni_pi->cac_enabled = true;
- }
-
- if (si_pi->enable_dte) {
- smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableDTE);
- if (smc_result != PPSMC_Result_OK)
- ret = -EINVAL;
- }
- }
- } else if (ni_pi->cac_enabled) {
- if (si_pi->enable_dte)
- smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableDTE);
-
- smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableCac);
-
- ni_pi->cac_enabled = false;
-
- if (ni_pi->support_cac_long_term_average)
- smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_CACLongTermAvgDisable);
- }
- }
- return ret;
-}
-
-static int si_init_smc_spll_table(struct amdgpu_device *adev)
-{
- struct ni_power_info *ni_pi = ni_get_pi(adev);
- struct si_power_info *si_pi = si_get_pi(adev);
- SMC_SISLANDS_SPLL_DIV_TABLE *spll_table;
- SISLANDS_SMC_SCLK_VALUE sclk_params;
- u32 fb_div, p_div;
- u32 clk_s, clk_v;
- u32 sclk = 0;
- int ret = 0;
- u32 tmp;
- int i;
-
- if (si_pi->spll_table_start == 0)
- return -EINVAL;
-
- spll_table = kzalloc(sizeof(SMC_SISLANDS_SPLL_DIV_TABLE), GFP_KERNEL);
- if (spll_table == NULL)
- return -ENOMEM;
-
- for (i = 0; i < 256; i++) {
- ret = si_calculate_sclk_params(adev, sclk, &sclk_params);
- if (ret)
- break;
- p_div = (sclk_params.vCG_SPLL_FUNC_CNTL & SPLL_PDIV_A_MASK) >> SPLL_PDIV_A_SHIFT;
- fb_div = (sclk_params.vCG_SPLL_FUNC_CNTL_3 & SPLL_FB_DIV_MASK) >> SPLL_FB_DIV_SHIFT;
- clk_s = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM & CLK_S_MASK) >> CLK_S_SHIFT;
- clk_v = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM_2 & CLK_V_MASK) >> CLK_V_SHIFT;
-
- fb_div &= ~0x00001FFF;
- fb_div >>= 1;
- clk_v >>= 6;
-
- if (p_div & ~(SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT))
- ret = -EINVAL;
- if (fb_div & ~(SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT))
- ret = -EINVAL;
- if (clk_s & ~(SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT))
- ret = -EINVAL;
- if (clk_v & ~(SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT))
- ret = -EINVAL;
-
- if (ret)
- break;
-
- tmp = ((fb_div << SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK) |
- ((p_div << SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK);
- spll_table->freq[i] = cpu_to_be32(tmp);
-
- tmp = ((clk_v << SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK) |
- ((clk_s << SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK);
- spll_table->ss[i] = cpu_to_be32(tmp);
-
- sclk += 512;
- }
-
-
- if (!ret)
- ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->spll_table_start,
- (u8 *)spll_table,
- sizeof(SMC_SISLANDS_SPLL_DIV_TABLE),
- si_pi->sram_end);
-
- if (ret)
- ni_pi->enable_power_containment = false;
-
- kfree(spll_table);
-
- return ret;
-}
-
-static u16 si_get_lower_of_leakage_and_vce_voltage(struct amdgpu_device *adev,
- u16 vce_voltage)
-{
- u16 highest_leakage = 0;
- struct si_power_info *si_pi = si_get_pi(adev);
- int i;
-
- for (i = 0; i < si_pi->leakage_voltage.count; i++){
- if (highest_leakage < si_pi->leakage_voltage.entries[i].voltage)
- highest_leakage = si_pi->leakage_voltage.entries[i].voltage;
- }
-
- if (si_pi->leakage_voltage.count && (highest_leakage < vce_voltage))
- return highest_leakage;
-
- return vce_voltage;
-}
-
-static int si_get_vce_clock_voltage(struct amdgpu_device *adev,
- u32 evclk, u32 ecclk, u16 *voltage)
-{
- u32 i;
- int ret = -EINVAL;
- struct amdgpu_vce_clock_voltage_dependency_table *table =
- &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
-
- if (((evclk == 0) && (ecclk == 0)) ||
- (table && (table->count == 0))) {
- *voltage = 0;
- return 0;
- }
-
- for (i = 0; i < table->count; i++) {
- if ((evclk <= table->entries[i].evclk) &&
- (ecclk <= table->entries[i].ecclk)) {
- *voltage = table->entries[i].v;
- ret = 0;
- break;
- }
- }
-
- /* if no match return the highest voltage */
- if (ret)
- *voltage = table->entries[table->count - 1].v;
-
- *voltage = si_get_lower_of_leakage_and_vce_voltage(adev, *voltage);
-
- return ret;
-}
-
-static bool si_dpm_vblank_too_short(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
- /* we never hit the non-gddr5 limit so disable it */
- u32 switch_limit = adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 0;
-
- if (vblank_time < switch_limit)
- return true;
- else
- return false;
-
-}
-
-static int ni_copy_and_switch_arb_sets(struct amdgpu_device *adev,
- u32 arb_freq_src, u32 arb_freq_dest)
-{
- u32 mc_arb_dram_timing;
- u32 mc_arb_dram_timing2;
- u32 burst_time;
- u32 mc_cg_config;
-
- switch (arb_freq_src) {
- case MC_CG_ARB_FREQ_F0:
- mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING);
- mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
- burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE0_MASK) >> STATE0_SHIFT;
- break;
- case MC_CG_ARB_FREQ_F1:
- mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_1);
- mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_1);
- burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE1_MASK) >> STATE1_SHIFT;
- break;
- case MC_CG_ARB_FREQ_F2:
- mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_2);
- mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_2);
- burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE2_MASK) >> STATE2_SHIFT;
- break;
- case MC_CG_ARB_FREQ_F3:
- mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_3);
- mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_3);
- burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE3_MASK) >> STATE3_SHIFT;
- break;
- default:
- return -EINVAL;
- }
-
- switch (arb_freq_dest) {
- case MC_CG_ARB_FREQ_F0:
- WREG32(MC_ARB_DRAM_TIMING, mc_arb_dram_timing);
- WREG32(MC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
- WREG32_P(MC_ARB_BURST_TIME, STATE0(burst_time), ~STATE0_MASK);
- break;
- case MC_CG_ARB_FREQ_F1:
- WREG32(MC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
- WREG32(MC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
- WREG32_P(MC_ARB_BURST_TIME, STATE1(burst_time), ~STATE1_MASK);
- break;
- case MC_CG_ARB_FREQ_F2:
- WREG32(MC_ARB_DRAM_TIMING_2, mc_arb_dram_timing);
- WREG32(MC_ARB_DRAM_TIMING2_2, mc_arb_dram_timing2);
- WREG32_P(MC_ARB_BURST_TIME, STATE2(burst_time), ~STATE2_MASK);
- break;
- case MC_CG_ARB_FREQ_F3:
- WREG32(MC_ARB_DRAM_TIMING_3, mc_arb_dram_timing);
- WREG32(MC_ARB_DRAM_TIMING2_3, mc_arb_dram_timing2);
- WREG32_P(MC_ARB_BURST_TIME, STATE3(burst_time), ~STATE3_MASK);
- break;
- default:
- return -EINVAL;
- }
-
- mc_cg_config = RREG32(MC_CG_CONFIG) | 0x0000000F;
- WREG32(MC_CG_CONFIG, mc_cg_config);
- WREG32_P(MC_ARB_CG, CG_ARB_REQ(arb_freq_dest), ~CG_ARB_REQ_MASK);
-
- return 0;
-}
-
-static void ni_update_current_ps(struct amdgpu_device *adev,
- struct amdgpu_ps *rps)
-{
- struct si_ps *new_ps = si_get_ps(rps);
- struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
- struct ni_power_info *ni_pi = ni_get_pi(adev);
-
- eg_pi->current_rps = *rps;
- ni_pi->current_ps = *new_ps;
- eg_pi->current_rps.ps_priv = &ni_pi->current_ps;
- adev->pm.dpm.current_ps = &eg_pi->current_rps;
-}
-
-static void ni_update_requested_ps(struct amdgpu_device *adev,
- struct amdgpu_ps *rps)
-{
- struct si_ps *new_ps = si_get_ps(rps);
- struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
- struct ni_power_info *ni_pi = ni_get_pi(adev);
-
- eg_pi->requested_rps = *rps;
- ni_pi->requested_ps = *new_ps;
- eg_pi->requested_rps.ps_priv = &ni_pi->requested_ps;
- adev->pm.dpm.requested_ps = &eg_pi->requested_rps;
-}
-
-static void ni_set_uvd_clock_before_set_eng_clock(struct amdgpu_device *adev,
- struct amdgpu_ps *new_ps,
- struct amdgpu_ps *old_ps)
-{
- struct si_ps *new_state = si_get_ps(new_ps);
- struct si_ps *current_state = si_get_ps(old_ps);
-
- if ((new_ps->vclk == old_ps->vclk) &&
- (new_ps->dclk == old_ps->dclk))
- return;
-
- if (new_state->performance_levels[new_state->performance_level_count - 1].sclk >=
- current_state->performance_levels[current_state->performance_level_count - 1].sclk)
- return;
-
- amdgpu_asic_set_uvd_clocks(adev, new_ps->vclk, new_ps->dclk);
-}
-
-static void ni_set_uvd_clock_after_set_eng_clock(struct amdgpu_device *adev,
- struct amdgpu_ps *new_ps,
- struct amdgpu_ps *old_ps)
-{
- struct si_ps *new_state = si_get_ps(new_ps);
- struct si_ps *current_state = si_get_ps(old_ps);
-
- if ((new_ps->vclk == old_ps->vclk) &&
- (new_ps->dclk == old_ps->dclk))
- return;
-
- if (new_state->performance_levels[new_state->performance_level_count - 1].sclk <
- current_state->performance_levels[current_state->performance_level_count - 1].sclk)
- return;
-
- amdgpu_asic_set_uvd_clocks(adev, new_ps->vclk, new_ps->dclk);
-}
-
-static u16 btc_find_voltage(struct atom_voltage_table *table, u16 voltage)
-{
- unsigned int i;
-
- for (i = 0; i < table->count; i++)
- if (voltage <= table->entries[i].value)
- return table->entries[i].value;
-
- return table->entries[table->count - 1].value;
-}
-
-static u32 btc_find_valid_clock(struct amdgpu_clock_array *clocks,
- u32 max_clock, u32 requested_clock)
-{
- unsigned int i;
-
- if ((clocks == NULL) || (clocks->count == 0))
- return (requested_clock < max_clock) ? requested_clock : max_clock;
-
- for (i = 0; i < clocks->count; i++) {
- if (clocks->values[i] >= requested_clock)
- return (clocks->values[i] < max_clock) ? clocks->values[i] : max_clock;
- }
-
- return (clocks->values[clocks->count - 1] < max_clock) ?
- clocks->values[clocks->count - 1] : max_clock;
-}
-
-static u32 btc_get_valid_mclk(struct amdgpu_device *adev,
- u32 max_mclk, u32 requested_mclk)
-{
- return btc_find_valid_clock(&adev->pm.dpm.dyn_state.valid_mclk_values,
- max_mclk, requested_mclk);
-}
-
-static u32 btc_get_valid_sclk(struct amdgpu_device *adev,
- u32 max_sclk, u32 requested_sclk)
-{
- return btc_find_valid_clock(&adev->pm.dpm.dyn_state.valid_sclk_values,
- max_sclk, requested_sclk);
-}
-
-static void btc_get_max_clock_from_voltage_dependency_table(struct amdgpu_clock_voltage_dependency_table *table,
- u32 *max_clock)
-{
- u32 i, clock = 0;
-
- if ((table == NULL) || (table->count == 0)) {
- *max_clock = clock;
- return;
- }
-
- for (i = 0; i < table->count; i++) {
- if (clock < table->entries[i].clk)
- clock = table->entries[i].clk;
- }
- *max_clock = clock;
-}
-
-static void btc_apply_voltage_dependency_rules(struct amdgpu_clock_voltage_dependency_table *table,
- u32 clock, u16 max_voltage, u16 *voltage)
-{
- u32 i;
-
- if ((table == NULL) || (table->count == 0))
- return;
-
- for (i= 0; i < table->count; i++) {
- if (clock <= table->entries[i].clk) {
- if (*voltage < table->entries[i].v)
- *voltage = (u16)((table->entries[i].v < max_voltage) ?
- table->entries[i].v : max_voltage);
- return;
- }
- }
-
- *voltage = (*voltage > max_voltage) ? *voltage : max_voltage;
-}
-
-static void btc_adjust_clock_combinations(struct amdgpu_device *adev,
- const struct amdgpu_clock_and_voltage_limits *max_limits,
- struct rv7xx_pl *pl)
-{
-
- if ((pl->mclk == 0) || (pl->sclk == 0))
- return;
-
- if (pl->mclk == pl->sclk)
- return;
-
- if (pl->mclk > pl->sclk) {
- if (((pl->mclk + (pl->sclk - 1)) / pl->sclk) > adev->pm.dpm.dyn_state.mclk_sclk_ratio)
- pl->sclk = btc_get_valid_sclk(adev,
- max_limits->sclk,
- (pl->mclk +
- (adev->pm.dpm.dyn_state.mclk_sclk_ratio - 1)) /
- adev->pm.dpm.dyn_state.mclk_sclk_ratio);
- } else {
- if ((pl->sclk - pl->mclk) > adev->pm.dpm.dyn_state.sclk_mclk_delta)
- pl->mclk = btc_get_valid_mclk(adev,
- max_limits->mclk,
- pl->sclk -
- adev->pm.dpm.dyn_state.sclk_mclk_delta);
- }
-}
-
-static void btc_apply_voltage_delta_rules(struct amdgpu_device *adev,
- u16 max_vddc, u16 max_vddci,
- u16 *vddc, u16 *vddci)
-{
- struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
- u16 new_voltage;
-
- if ((0 == *vddc) || (0 == *vddci))
- return;
-
- if (*vddc > *vddci) {
- if ((*vddc - *vddci) > adev->pm.dpm.dyn_state.vddc_vddci_delta) {
- new_voltage = btc_find_voltage(&eg_pi->vddci_voltage_table,
- (*vddc - adev->pm.dpm.dyn_state.vddc_vddci_delta));
- *vddci = (new_voltage < max_vddci) ? new_voltage : max_vddci;
- }
- } else {
- if ((*vddci - *vddc) > adev->pm.dpm.dyn_state.vddc_vddci_delta) {
- new_voltage = btc_find_voltage(&eg_pi->vddc_voltage_table,
- (*vddci - adev->pm.dpm.dyn_state.vddc_vddci_delta));
- *vddc = (new_voltage < max_vddc) ? new_voltage : max_vddc;
- }
- }
-}
-
-static void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
- u32 *p, u32 *u)
-{
- u32 b_c = 0;
- u32 i_c;
- u32 tmp;
-
- i_c = (i * r_c) / 100;
- tmp = i_c >> p_b;
-
- while (tmp) {
- b_c++;
- tmp >>= 1;
- }
-
- *u = (b_c + 1) / 2;
- *p = i_c / (1 << (2 * (*u)));
-}
-
-static int r600_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th)
-{
- u32 k, a, ah, al;
- u32 t1;
-
- if ((fl == 0) || (fh == 0) || (fl > fh))
- return -EINVAL;
-
- k = (100 * fh) / fl;
- t1 = (t * (k - 100));
- a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100));
- a = (a + 5) / 10;
- ah = ((a * t) + 5000) / 10000;
- al = a - ah;
-
- *th = t - ah;
- *tl = t + al;
-
- return 0;
-}
-
-static bool r600_is_uvd_state(u32 class, u32 class2)
-{
- if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
- return true;
- if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
- return true;
- if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
- return true;
- if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
- return true;
- if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
- return true;
- return false;
-}
-
-static u8 rv770_get_memory_module_index(struct amdgpu_device *adev)
-{
- return (u8) ((RREG32(BIOS_SCRATCH_4) >> 16) & 0xff);
-}
-
-static void rv770_get_max_vddc(struct amdgpu_device *adev)
-{
- struct rv7xx_power_info *pi = rv770_get_pi(adev);
- u16 vddc;
-
- if (amdgpu_atombios_get_max_vddc(adev, 0, 0, &vddc))
- pi->max_vddc = 0;
- else
- pi->max_vddc = vddc;
-}
-
-static void rv770_get_engine_memory_ss(struct amdgpu_device *adev)
-{
- struct rv7xx_power_info *pi = rv770_get_pi(adev);
- struct amdgpu_atom_ss ss;
-
- pi->sclk_ss = amdgpu_atombios_get_asic_ss_info(adev, &ss,
- ASIC_INTERNAL_ENGINE_SS, 0);
- pi->mclk_ss = amdgpu_atombios_get_asic_ss_info(adev, &ss,
- ASIC_INTERNAL_MEMORY_SS, 0);
-
- if (pi->sclk_ss || pi->mclk_ss)
- pi->dynamic_ss = true;
- else
- pi->dynamic_ss = false;
-}
-
-
-static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
- struct amdgpu_ps *rps)
-{
- struct si_ps *ps = si_get_ps(rps);
- struct amdgpu_clock_and_voltage_limits *max_limits;
- bool disable_mclk_switching = false;
- bool disable_sclk_switching = false;
- u32 mclk, sclk;
- u16 vddc, vddci, min_vce_voltage = 0;
- u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
- u32 max_sclk = 0, max_mclk = 0;
- int i;
-
- if (adev->asic_type == CHIP_HAINAN) {
- if ((adev->pdev->revision == 0x81) ||
- (adev->pdev->revision == 0xC3) ||
- (adev->pdev->device == 0x6664) ||
- (adev->pdev->device == 0x6665) ||
- (adev->pdev->device == 0x6667)) {
- max_sclk = 75000;
- }
- if ((adev->pdev->revision == 0xC3) ||
- (adev->pdev->device == 0x6665)) {
- max_sclk = 60000;
- max_mclk = 80000;
- }
- } else if (adev->asic_type == CHIP_OLAND) {
- if ((adev->pdev->revision == 0xC7) ||
- (adev->pdev->revision == 0x80) ||
- (adev->pdev->revision == 0x81) ||
- (adev->pdev->revision == 0x83) ||
- (adev->pdev->revision == 0x87) ||
- (adev->pdev->device == 0x6604) ||
- (adev->pdev->device == 0x6605)) {
- max_sclk = 75000;
- }
- }
-
- if (rps->vce_active) {
- rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
- rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk;
- si_get_vce_clock_voltage(adev, rps->evclk, rps->ecclk,
- &min_vce_voltage);
- } else {
- rps->evclk = 0;
- rps->ecclk = 0;
- }
-
- if ((adev->pm.dpm.new_active_crtc_count > 1) ||
- si_dpm_vblank_too_short(adev))
- disable_mclk_switching = true;
-
- if (rps->vclk || rps->dclk) {
- disable_mclk_switching = true;
- disable_sclk_switching = true;
- }
-
- if (adev->pm.ac_power)
- max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
- else
- max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
-
- for (i = ps->performance_level_count - 2; i >= 0; i--) {
- if (ps->performance_levels[i].vddc > ps->performance_levels[i+1].vddc)
- ps->performance_levels[i].vddc = ps->performance_levels[i+1].vddc;
- }
- if (adev->pm.ac_power == false) {
- for (i = 0; i < ps->performance_level_count; i++) {
- if (ps->performance_levels[i].mclk > max_limits->mclk)
- ps->performance_levels[i].mclk = max_limits->mclk;
- if (ps->performance_levels[i].sclk > max_limits->sclk)
- ps->performance_levels[i].sclk = max_limits->sclk;
- if (ps->performance_levels[i].vddc > max_limits->vddc)
- ps->performance_levels[i].vddc = max_limits->vddc;
- if (ps->performance_levels[i].vddci > max_limits->vddci)
- ps->performance_levels[i].vddci = max_limits->vddci;
- }
- }
-
- /* limit clocks to max supported clocks based on voltage dependency tables */
- btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
- &max_sclk_vddc);
- btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
- &max_mclk_vddci);
- btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
- &max_mclk_vddc);
-
- for (i = 0; i < ps->performance_level_count; i++) {
- if (max_sclk_vddc) {
- if (ps->performance_levels[i].sclk > max_sclk_vddc)
- ps->performance_levels[i].sclk = max_sclk_vddc;
- }
- if (max_mclk_vddci) {
- if (ps->performance_levels[i].mclk > max_mclk_vddci)
- ps->performance_levels[i].mclk = max_mclk_vddci;
- }
- if (max_mclk_vddc) {
- if (ps->performance_levels[i].mclk > max_mclk_vddc)
- ps->performance_levels[i].mclk = max_mclk_vddc;
- }
- if (max_mclk) {
- if (ps->performance_levels[i].mclk > max_mclk)
- ps->performance_levels[i].mclk = max_mclk;
- }
- if (max_sclk) {
- if (ps->performance_levels[i].sclk > max_sclk)
- ps->performance_levels[i].sclk = max_sclk;
- }
- }
-
- /* XXX validate the min clocks required for display */
-
- if (disable_mclk_switching) {
- mclk = ps->performance_levels[ps->performance_level_count - 1].mclk;
- vddci = ps->performance_levels[ps->performance_level_count - 1].vddci;
- } else {
- mclk = ps->performance_levels[0].mclk;
- vddci = ps->performance_levels[0].vddci;
- }
-
- if (disable_sclk_switching) {
- sclk = ps->performance_levels[ps->performance_level_count - 1].sclk;
- vddc = ps->performance_levels[ps->performance_level_count - 1].vddc;
- } else {
- sclk = ps->performance_levels[0].sclk;
- vddc = ps->performance_levels[0].vddc;
- }
-
- if (rps->vce_active) {
- if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk)
- sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk;
- if (mclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk)
- mclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk;
- }
-
- /* adjusted low state */
- ps->performance_levels[0].sclk = sclk;
- ps->performance_levels[0].mclk = mclk;
- ps->performance_levels[0].vddc = vddc;
- ps->performance_levels[0].vddci = vddci;
-
- if (disable_sclk_switching) {
- sclk = ps->performance_levels[0].sclk;
- for (i = 1; i < ps->performance_level_count; i++) {
- if (sclk < ps->performance_levels[i].sclk)
- sclk = ps->performance_levels[i].sclk;
- }
- for (i = 0; i < ps->performance_level_count; i++) {
- ps->performance_levels[i].sclk = sclk;
- ps->performance_levels[i].vddc = vddc;
- }
- } else {
- for (i = 1; i < ps->performance_level_count; i++) {
- if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk)
- ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk;
- if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc)
- ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc;
- }
- }
-
- if (disable_mclk_switching) {
- mclk = ps->performance_levels[0].mclk;
- for (i = 1; i < ps->performance_level_count; i++) {
- if (mclk < ps->performance_levels[i].mclk)
- mclk = ps->performance_levels[i].mclk;
- }
- for (i = 0; i < ps->performance_level_count; i++) {
- ps->performance_levels[i].mclk = mclk;
- ps->performance_levels[i].vddci = vddci;
- }
- } else {
- for (i = 1; i < ps->performance_level_count; i++) {
- if (ps->performance_levels[i].mclk < ps->performance_levels[i - 1].mclk)
- ps->performance_levels[i].mclk = ps->performance_levels[i - 1].mclk;
- if (ps->performance_levels[i].vddci < ps->performance_levels[i - 1].vddci)
- ps->performance_levels[i].vddci = ps->performance_levels[i - 1].vddci;
- }
- }
-
- for (i = 0; i < ps->performance_level_count; i++)
- btc_adjust_clock_combinations(adev, max_limits,
- &ps->performance_levels[i]);
-
- for (i = 0; i < ps->performance_level_count; i++) {
- if (ps->performance_levels[i].vddc < min_vce_voltage)
- ps->performance_levels[i].vddc = min_vce_voltage;
- btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
- ps->performance_levels[i].sclk,
- max_limits->vddc, &ps->performance_levels[i].vddc);
- btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
- ps->performance_levels[i].mclk,
- max_limits->vddci, &ps->performance_levels[i].vddci);
- btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
- ps->performance_levels[i].mclk,
- max_limits->vddc, &ps->performance_levels[i].vddc);
- btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk,
- adev->clock.current_dispclk,
- max_limits->vddc, &ps->performance_levels[i].vddc);
- }
-
- for (i = 0; i < ps->performance_level_count; i++) {
- btc_apply_voltage_delta_rules(adev,
- max_limits->vddc, max_limits->vddci,
- &ps->performance_levels[i].vddc,
- &ps->performance_levels[i].vddci);
- }
-
- ps->dc_compatible = true;
- for (i = 0; i < ps->performance_level_count; i++) {
- if (ps->performance_levels[i].vddc > adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc)
- ps->dc_compatible = false;
- }
-}
-
-#if 0
-static int si_read_smc_soft_register(struct amdgpu_device *adev,
- u16 reg_offset, u32 *value)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
-
- return amdgpu_si_read_smc_sram_dword(adev,
- si_pi->soft_regs_start + reg_offset, value,
- si_pi->sram_end);
-}
-#endif
-
-static int si_write_smc_soft_register(struct amdgpu_device *adev,
- u16 reg_offset, u32 value)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
-
- return amdgpu_si_write_smc_sram_dword(adev,
- si_pi->soft_regs_start + reg_offset,
- value, si_pi->sram_end);
-}
-
-static bool si_is_special_1gb_platform(struct amdgpu_device *adev)
-{
- bool ret = false;
- u32 tmp, width, row, column, bank, density;
- bool is_memory_gddr5, is_special;
-
- tmp = RREG32(MC_SEQ_MISC0);
- is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE == ((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT));
- is_special = (MC_SEQ_MISC0_REV_ID_VALUE == ((tmp & MC_SEQ_MISC0_REV_ID_MASK) >> MC_SEQ_MISC0_REV_ID_SHIFT))
- & (MC_SEQ_MISC0_VEN_ID_VALUE == ((tmp & MC_SEQ_MISC0_VEN_ID_MASK) >> MC_SEQ_MISC0_VEN_ID_SHIFT));
-
- WREG32(MC_SEQ_IO_DEBUG_INDEX, 0xb);
- width = ((RREG32(MC_SEQ_IO_DEBUG_DATA) >> 1) & 1) ? 16 : 32;
-
- tmp = RREG32(MC_ARB_RAMCFG);
- row = ((tmp & NOOFROWS_MASK) >> NOOFROWS_SHIFT) + 10;
- column = ((tmp & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) + 8;
- bank = ((tmp & NOOFBANK_MASK) >> NOOFBANK_SHIFT) + 2;
-
- density = (1 << (row + column - 20 + bank)) * width;
-
- if ((adev->pdev->device == 0x6819) &&
- is_memory_gddr5 && is_special && (density == 0x400))
- ret = true;
-
- return ret;
-}
-
-static void si_get_leakage_vddc(struct amdgpu_device *adev)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
- u16 vddc, count = 0;
- int i, ret;
-
- for (i = 0; i < SISLANDS_MAX_LEAKAGE_COUNT; i++) {
- ret = amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(adev, &vddc, SISLANDS_LEAKAGE_INDEX0 + i);
-
- if (!ret && (vddc > 0) && (vddc != (SISLANDS_LEAKAGE_INDEX0 + i))) {
- si_pi->leakage_voltage.entries[count].voltage = vddc;
- si_pi->leakage_voltage.entries[count].leakage_index =
- SISLANDS_LEAKAGE_INDEX0 + i;
- count++;
- }
- }
- si_pi->leakage_voltage.count = count;
-}
-
-static int si_get_leakage_voltage_from_leakage_index(struct amdgpu_device *adev,
- u32 index, u16 *leakage_voltage)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
- int i;
-
- if (leakage_voltage == NULL)
- return -EINVAL;
-
- if ((index & 0xff00) != 0xff00)
- return -EINVAL;
-
- if ((index & 0xff) > SISLANDS_MAX_LEAKAGE_COUNT + 1)
- return -EINVAL;
-
- if (index < SISLANDS_LEAKAGE_INDEX0)
- return -EINVAL;
-
- for (i = 0; i < si_pi->leakage_voltage.count; i++) {
- if (si_pi->leakage_voltage.entries[i].leakage_index == index) {
- *leakage_voltage = si_pi->leakage_voltage.entries[i].voltage;
- return 0;
- }
- }
- return -EAGAIN;
-}
-
-static void si_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources)
-{
- struct rv7xx_power_info *pi = rv770_get_pi(adev);
- bool want_thermal_protection;
- enum amdgpu_dpm_event_src dpm_event_src;
-
- switch (sources) {
- case 0:
- default:
- want_thermal_protection = false;
- break;
- case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL):
- want_thermal_protection = true;
- dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGITAL;
- break;
- case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
- want_thermal_protection = true;
- dpm_event_src = AMDGPU_DPM_EVENT_SRC_EXTERNAL;
- break;
- case ((1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
- (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL)):
- want_thermal_protection = true;
- dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
- break;
- }
-
- if (want_thermal_protection) {
- WREG32_P(CG_THERMAL_CTRL, DPM_EVENT_SRC(dpm_event_src), ~DPM_EVENT_SRC_MASK);
- if (pi->thermal_protection)
- WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
- } else {
- WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
- }
-}
-
-static void si_enable_auto_throttle_source(struct amdgpu_device *adev,
- enum amdgpu_dpm_auto_throttle_src source,
- bool enable)
-{
- struct rv7xx_power_info *pi = rv770_get_pi(adev);
-
- if (enable) {
- if (!(pi->active_auto_throttle_sources & (1 << source))) {
- pi->active_auto_throttle_sources |= 1 << source;
- si_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
- }
- } else {
- if (pi->active_auto_throttle_sources & (1 << source)) {
- pi->active_auto_throttle_sources &= ~(1 << source);
- si_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
- }
- }
-}
-
-static void si_start_dpm(struct amdgpu_device *adev)
-{
- WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN);
-}
-
-static void si_stop_dpm(struct amdgpu_device *adev)
-{
- WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
-}
-
-static void si_enable_sclk_control(struct amdgpu_device *adev, bool enable)
-{
- if (enable)
- WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF);
- else
- WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
-
-}
-
-#if 0
-static int si_notify_hardware_of_thermal_state(struct amdgpu_device *adev,
- u32 thermal_level)
-{
- PPSMC_Result ret;
-
- if (thermal_level == 0) {
- ret = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt);
- if (ret == PPSMC_Result_OK)
- return 0;
- else
- return -EINVAL;
- }
- return 0;
-}
-
-static void si_notify_hardware_vpu_recovery_event(struct amdgpu_device *adev)
-{
- si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_tdr_is_about_to_happen, true);
-}
-#endif
-
-#if 0
-static int si_notify_hw_of_powersource(struct amdgpu_device *adev, bool ac_power)
-{
- if (ac_power)
- return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_RunningOnAC) == PPSMC_Result_OK) ?
- 0 : -EINVAL;
-
- return 0;
-}
-#endif
-
-static PPSMC_Result si_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
- PPSMC_Msg msg, u32 parameter)
-{
- WREG32(SMC_SCRATCH0, parameter);
- return amdgpu_si_send_msg_to_smc(adev, msg);
-}
-
-static int si_restrict_performance_levels_before_switch(struct amdgpu_device *adev)
-{
- if (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_NoForcedLevel) != PPSMC_Result_OK)
- return -EINVAL;
-
- return (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, 1) == PPSMC_Result_OK) ?
- 0 : -EINVAL;
-}
-
-static int si_dpm_force_performance_level(void *handle,
- enum amd_dpm_forced_level level)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_ps *rps = adev->pm.dpm.current_ps;
- struct si_ps *ps = si_get_ps(rps);
- u32 levels = ps->performance_level_count;
-
- if (level == AMD_DPM_FORCED_LEVEL_HIGH) {
- if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK)
- return -EINVAL;
-
- if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 1) != PPSMC_Result_OK)
- return -EINVAL;
- } else if (level == AMD_DPM_FORCED_LEVEL_LOW) {
- if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
- return -EINVAL;
-
- if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, 1) != PPSMC_Result_OK)
- return -EINVAL;
- } else if (level == AMD_DPM_FORCED_LEVEL_AUTO) {
- if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
- return -EINVAL;
-
- if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK)
- return -EINVAL;
- }
-
- adev->pm.dpm.forced_level = level;
-
- return 0;
-}
-
-#if 0
-static int si_set_boot_state(struct amdgpu_device *adev)
-{
- return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_SwitchToInitialState) == PPSMC_Result_OK) ?
- 0 : -EINVAL;
-}
-#endif
-
-static int si_set_sw_state(struct amdgpu_device *adev)
-{
- return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_SwitchToSwState) == PPSMC_Result_OK) ?
- 0 : -EINVAL;
-}
-
-static int si_halt_smc(struct amdgpu_device *adev)
-{
- if (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_Halt) != PPSMC_Result_OK)
- return -EINVAL;
-
- return (amdgpu_si_wait_for_smc_inactive(adev) == PPSMC_Result_OK) ?
- 0 : -EINVAL;
-}
-
-static int si_resume_smc(struct amdgpu_device *adev)
-{
- if (amdgpu_si_send_msg_to_smc(adev, PPSMC_FlushDataCache) != PPSMC_Result_OK)
- return -EINVAL;
-
- return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_Resume) == PPSMC_Result_OK) ?
- 0 : -EINVAL;
-}
-
-static void si_dpm_start_smc(struct amdgpu_device *adev)
-{
- amdgpu_si_program_jump_on_start(adev);
- amdgpu_si_start_smc(adev);
- amdgpu_si_smc_clock(adev, true);
-}
-
-static void si_dpm_stop_smc(struct amdgpu_device *adev)
-{
- amdgpu_si_reset_smc(adev);
- amdgpu_si_smc_clock(adev, false);
-}
-
-static int si_process_firmware_header(struct amdgpu_device *adev)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
- u32 tmp;
- int ret;
-
- ret = amdgpu_si_read_smc_sram_dword(adev,
- SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
- SISLANDS_SMC_FIRMWARE_HEADER_stateTable,
- &tmp, si_pi->sram_end);
- if (ret)
- return ret;
-
- si_pi->state_table_start = tmp;
-
- ret = amdgpu_si_read_smc_sram_dword(adev,
- SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
- SISLANDS_SMC_FIRMWARE_HEADER_softRegisters,
- &tmp, si_pi->sram_end);
- if (ret)
- return ret;
-
- si_pi->soft_regs_start = tmp;
-
- ret = amdgpu_si_read_smc_sram_dword(adev,
- SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
- SISLANDS_SMC_FIRMWARE_HEADER_mcRegisterTable,
- &tmp, si_pi->sram_end);
- if (ret)
- return ret;
-
- si_pi->mc_reg_table_start = tmp;
-
- ret = amdgpu_si_read_smc_sram_dword(adev,
- SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
- SISLANDS_SMC_FIRMWARE_HEADER_fanTable,
- &tmp, si_pi->sram_end);
- if (ret)
- return ret;
-
- si_pi->fan_table_start = tmp;
-
- ret = amdgpu_si_read_smc_sram_dword(adev,
- SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
- SISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable,
- &tmp, si_pi->sram_end);
- if (ret)
- return ret;
-
- si_pi->arb_table_start = tmp;
-
- ret = amdgpu_si_read_smc_sram_dword(adev,
- SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
- SISLANDS_SMC_FIRMWARE_HEADER_CacConfigTable,
- &tmp, si_pi->sram_end);
- if (ret)
- return ret;
-
- si_pi->cac_table_start = tmp;
-
- ret = amdgpu_si_read_smc_sram_dword(adev,
- SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
- SISLANDS_SMC_FIRMWARE_HEADER_DteConfiguration,
- &tmp, si_pi->sram_end);
- if (ret)
- return ret;
-
- si_pi->dte_table_start = tmp;
-
- ret = amdgpu_si_read_smc_sram_dword(adev,
- SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
- SISLANDS_SMC_FIRMWARE_HEADER_spllTable,
- &tmp, si_pi->sram_end);
- if (ret)
- return ret;
-
- si_pi->spll_table_start = tmp;
-
- ret = amdgpu_si_read_smc_sram_dword(adev,
- SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
- SISLANDS_SMC_FIRMWARE_HEADER_PAPMParameters,
- &tmp, si_pi->sram_end);
- if (ret)
- return ret;
-
- si_pi->papm_cfg_table_start = tmp;
-
- return ret;
-}
-
-static void si_read_clock_registers(struct amdgpu_device *adev)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
-
- si_pi->clock_registers.cg_spll_func_cntl = RREG32(CG_SPLL_FUNC_CNTL);
- si_pi->clock_registers.cg_spll_func_cntl_2 = RREG32(CG_SPLL_FUNC_CNTL_2);
- si_pi->clock_registers.cg_spll_func_cntl_3 = RREG32(CG_SPLL_FUNC_CNTL_3);
- si_pi->clock_registers.cg_spll_func_cntl_4 = RREG32(CG_SPLL_FUNC_CNTL_4);
- si_pi->clock_registers.cg_spll_spread_spectrum = RREG32(CG_SPLL_SPREAD_SPECTRUM);
- si_pi->clock_registers.cg_spll_spread_spectrum_2 = RREG32(CG_SPLL_SPREAD_SPECTRUM_2);
- si_pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
- si_pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
- si_pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
- si_pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL);
- si_pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL);
- si_pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1);
- si_pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2);
- si_pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1);
- si_pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2);
-}
-
-static void si_enable_thermal_protection(struct amdgpu_device *adev,
- bool enable)
-{
- if (enable)
- WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
- else
- WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
-}
-
-static void si_enable_acpi_power_management(struct amdgpu_device *adev)
-{
- WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN);
-}
-
-#if 0
-static int si_enter_ulp_state(struct amdgpu_device *adev)
-{
- WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
-
- udelay(25000);
-
- return 0;
-}
-
-static int si_exit_ulp_state(struct amdgpu_device *adev)
-{
- int i;
-
- WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
-
- udelay(7000);
-
- for (i = 0; i < adev->usec_timeout; i++) {
- if (RREG32(SMC_RESP_0) == 1)
- break;
- udelay(1000);
- }
-
- return 0;
-}
-#endif
-
-static int si_notify_smc_display_change(struct amdgpu_device *adev,
- bool has_display)
-{
- PPSMC_Msg msg = has_display ?
- PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
-
- return (amdgpu_si_send_msg_to_smc(adev, msg) == PPSMC_Result_OK) ?
- 0 : -EINVAL;
-}
-
-static void si_program_response_times(struct amdgpu_device *adev)
-{
- u32 voltage_response_time, acpi_delay_time, vbi_time_out;
- u32 vddc_dly, acpi_dly, vbi_dly;
- u32 reference_clock;
-
- si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mvdd_chg_time, 1);
-
- voltage_response_time = (u32)adev->pm.dpm.voltage_response_time;
-
- if (voltage_response_time == 0)
- voltage_response_time = 1000;
-
- acpi_delay_time = 15000;
- vbi_time_out = 100000;
-
- reference_clock = amdgpu_asic_get_xclk(adev);
-
- vddc_dly = (voltage_response_time * reference_clock) / 100;
- acpi_dly = (acpi_delay_time * reference_clock) / 100;
- vbi_dly = (vbi_time_out * reference_clock) / 100;
-
- si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_delay_vreg, vddc_dly);
- si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_delay_acpi, acpi_dly);
- si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mclk_chg_timeout, vbi_dly);
- si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mc_block_delay, 0xAA);
-}
-
-static void si_program_ds_registers(struct amdgpu_device *adev)
-{
- struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
- u32 tmp;
-
- /* DEEP_SLEEP_CLK_SEL field should be 0x10 on tahiti A0 */
- if (adev->asic_type == CHIP_TAHITI && adev->rev_id == 0x0)
- tmp = 0x10;
- else
- tmp = 0x1;
-
- if (eg_pi->sclk_deep_sleep) {
- WREG32_P(MISC_CLK_CNTL, DEEP_SLEEP_CLK_SEL(tmp), ~DEEP_SLEEP_CLK_SEL_MASK);
- WREG32_P(CG_SPLL_AUTOSCALE_CNTL, AUTOSCALE_ON_SS_CLEAR,
- ~AUTOSCALE_ON_SS_CLEAR);
- }
-}
-
-static void si_program_display_gap(struct amdgpu_device *adev)
-{
- u32 tmp, pipe;
- int i;
-
- tmp = RREG32(CG_DISPLAY_GAP_CNTL) & ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
- if (adev->pm.dpm.new_active_crtc_count > 0)
- tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
- else
- tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE);
-
- if (adev->pm.dpm.new_active_crtc_count > 1)
- tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
- else
- tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE);
-
- WREG32(CG_DISPLAY_GAP_CNTL, tmp);
-
- tmp = RREG32(DCCG_DISP_SLOW_SELECT_REG);
- pipe = (tmp & DCCG_DISP1_SLOW_SELECT_MASK) >> DCCG_DISP1_SLOW_SELECT_SHIFT;
-
- if ((adev->pm.dpm.new_active_crtc_count > 0) &&
- (!(adev->pm.dpm.new_active_crtcs & (1 << pipe)))) {
- /* find the first active crtc */
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- if (adev->pm.dpm.new_active_crtcs & (1 << i))
- break;
- }
- if (i == adev->mode_info.num_crtc)
- pipe = 0;
- else
- pipe = i;
-
- tmp &= ~DCCG_DISP1_SLOW_SELECT_MASK;
- tmp |= DCCG_DISP1_SLOW_SELECT(pipe);
- WREG32(DCCG_DISP_SLOW_SELECT_REG, tmp);
- }
-
- /* Setting this to false forces the performance state to low if the crtcs are disabled.
- * This can be a problem on PowerXpress systems or if you want to use the card
- * for offscreen rendering or compute if there are no crtcs enabled.
- */
- si_notify_smc_display_change(adev, adev->pm.dpm.new_active_crtc_count > 0);
-}
-
-static void si_enable_spread_spectrum(struct amdgpu_device *adev, bool enable)
-{
- struct rv7xx_power_info *pi = rv770_get_pi(adev);
-
- if (enable) {
- if (pi->sclk_ss)
- WREG32_P(GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, ~DYN_SPREAD_SPECTRUM_EN);
- } else {
- WREG32_P(CG_SPLL_SPREAD_SPECTRUM, 0, ~SSEN);
- WREG32_P(GENERAL_PWRMGT, 0, ~DYN_SPREAD_SPECTRUM_EN);
- }
-}
-
-static void si_setup_bsp(struct amdgpu_device *adev)
-{
- struct rv7xx_power_info *pi = rv770_get_pi(adev);
- u32 xclk = amdgpu_asic_get_xclk(adev);
-
- r600_calculate_u_and_p(pi->asi,
- xclk,
- 16,
- &pi->bsp,
- &pi->bsu);
-
- r600_calculate_u_and_p(pi->pasi,
- xclk,
- 16,
- &pi->pbsp,
- &pi->pbsu);
-
-
- pi->dsp = BSP(pi->bsp) | BSU(pi->bsu);
- pi->psp = BSP(pi->pbsp) | BSU(pi->pbsu);
-
- WREG32(CG_BSP, pi->dsp);
-}
-
-static void si_program_git(struct amdgpu_device *adev)
-{
- WREG32_P(CG_GIT, CG_GICST(R600_GICST_DFLT), ~CG_GICST_MASK);
-}
-
-static void si_program_tp(struct amdgpu_device *adev)
-{
- int i;
- enum r600_td td = R600_TD_DFLT;
-
- for (i = 0; i < R600_PM_NUMBER_OF_TC; i++)
- WREG32(CG_FFCT_0 + i, (UTC_0(r600_utc[i]) | DTC_0(r600_dtc[i])));
-
- if (td == R600_TD_AUTO)
- WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL);
- else
- WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL);
-
- if (td == R600_TD_UP)
- WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE);
-
- if (td == R600_TD_DOWN)
- WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE);
-}
-
-static void si_program_tpp(struct amdgpu_device *adev)
-{
- WREG32(CG_TPC, R600_TPC_DFLT);
-}
-
-static void si_program_sstp(struct amdgpu_device *adev)
-{
- WREG32(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT)));
-}
-
-static void si_enable_display_gap(struct amdgpu_device *adev)
-{
- u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL);
-
- tmp &= ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
- tmp |= (DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
- DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE));
-
- tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK);
- tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK) |
- DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE));
- WREG32(CG_DISPLAY_GAP_CNTL, tmp);
-}
-
-static void si_program_vc(struct amdgpu_device *adev)
-{
- struct rv7xx_power_info *pi = rv770_get_pi(adev);
-
- WREG32(CG_FTV, pi->vrc);
-}
-
-static void si_clear_vc(struct amdgpu_device *adev)
-{
- WREG32(CG_FTV, 0);
-}
-
-static u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
-{
- u8 mc_para_index;
-
- if (memory_clock < 10000)
- mc_para_index = 0;
- else if (memory_clock >= 80000)
- mc_para_index = 0x0f;
- else
- mc_para_index = (u8)((memory_clock - 10000) / 5000 + 1);
- return mc_para_index;
-}
-
-static u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode)
-{
- u8 mc_para_index;
-
- if (strobe_mode) {
- if (memory_clock < 12500)
- mc_para_index = 0x00;
- else if (memory_clock > 47500)
- mc_para_index = 0x0f;
- else
- mc_para_index = (u8)((memory_clock - 10000) / 2500);
- } else {
- if (memory_clock < 65000)
- mc_para_index = 0x00;
- else if (memory_clock > 135000)
- mc_para_index = 0x0f;
- else
- mc_para_index = (u8)((memory_clock - 60000) / 5000);
- }
- return mc_para_index;
-}
-
-static u8 si_get_strobe_mode_settings(struct amdgpu_device *adev, u32 mclk)
-{
- struct rv7xx_power_info *pi = rv770_get_pi(adev);
- bool strobe_mode = false;
- u8 result = 0;
-
- if (mclk <= pi->mclk_strobe_mode_threshold)
- strobe_mode = true;
-
- if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
- result = si_get_mclk_frequency_ratio(mclk, strobe_mode);
- else
- result = si_get_ddr3_mclk_frequency_ratio(mclk);
-
- if (strobe_mode)
- result |= SISLANDS_SMC_STROBE_ENABLE;
-
- return result;
-}
-
-static int si_upload_firmware(struct amdgpu_device *adev)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
-
- amdgpu_si_reset_smc(adev);
- amdgpu_si_smc_clock(adev, false);
-
- return amdgpu_si_load_smc_ucode(adev, si_pi->sram_end);
-}
-
-static bool si_validate_phase_shedding_tables(struct amdgpu_device *adev,
- const struct atom_voltage_table *table,
- const struct amdgpu_phase_shedding_limits_table *limits)
-{
- u32 data, num_bits, num_levels;
-
- if ((table == NULL) || (limits == NULL))
- return false;
-
- data = table->mask_low;
-
- num_bits = hweight32(data);
-
- if (num_bits == 0)
- return false;
-
- num_levels = (1 << num_bits);
-
- if (table->count != num_levels)
- return false;
-
- if (limits->count != (num_levels - 1))
- return false;
-
- return true;
-}
-
-static void si_trim_voltage_table_to_fit_state_table(struct amdgpu_device *adev,
- u32 max_voltage_steps,
- struct atom_voltage_table *voltage_table)
-{
- unsigned int i, diff;
-
- if (voltage_table->count <= max_voltage_steps)
- return;
-
- diff = voltage_table->count - max_voltage_steps;
-
- for (i= 0; i < max_voltage_steps; i++)
- voltage_table->entries[i] = voltage_table->entries[i + diff];
-
- voltage_table->count = max_voltage_steps;
-}
-
-static int si_get_svi2_voltage_table(struct amdgpu_device *adev,
- struct amdgpu_clock_voltage_dependency_table *voltage_dependency_table,
- struct atom_voltage_table *voltage_table)
-{
- u32 i;
-
- if (voltage_dependency_table == NULL)
- return -EINVAL;
-
- voltage_table->mask_low = 0;
- voltage_table->phase_delay = 0;
-
- voltage_table->count = voltage_dependency_table->count;
- for (i = 0; i < voltage_table->count; i++) {
- voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
- voltage_table->entries[i].smio_low = 0;
- }
-
- return 0;
-}
-
-static int si_construct_voltage_tables(struct amdgpu_device *adev)
-{
- struct rv7xx_power_info *pi = rv770_get_pi(adev);
- struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
- struct si_power_info *si_pi = si_get_pi(adev);
- int ret;
-
- if (pi->voltage_control) {
- ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC,
- VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddc_voltage_table);
- if (ret)
- return ret;
-
- if (eg_pi->vddc_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
- si_trim_voltage_table_to_fit_state_table(adev,
- SISLANDS_MAX_NO_VREG_STEPS,
- &eg_pi->vddc_voltage_table);
- } else if (si_pi->voltage_control_svi2) {
- ret = si_get_svi2_voltage_table(adev,
- &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
- &eg_pi->vddc_voltage_table);
- if (ret)
- return ret;
- } else {
- return -EINVAL;
- }
-
- if (eg_pi->vddci_control) {
- ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDCI,
- VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddci_voltage_table);
- if (ret)
- return ret;
-
- if (eg_pi->vddci_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
- si_trim_voltage_table_to_fit_state_table(adev,
- SISLANDS_MAX_NO_VREG_STEPS,
- &eg_pi->vddci_voltage_table);
- }
- if (si_pi->vddci_control_svi2) {
- ret = si_get_svi2_voltage_table(adev,
- &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
- &eg_pi->vddci_voltage_table);
- if (ret)
- return ret;
- }
-
- if (pi->mvdd_control) {
- ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_MVDDC,
- VOLTAGE_OBJ_GPIO_LUT, &si_pi->mvdd_voltage_table);
-
- if (ret) {
- pi->mvdd_control = false;
- return ret;
- }
-
- if (si_pi->mvdd_voltage_table.count == 0) {
- pi->mvdd_control = false;
- return -EINVAL;
- }
-
- if (si_pi->mvdd_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
- si_trim_voltage_table_to_fit_state_table(adev,
- SISLANDS_MAX_NO_VREG_STEPS,
- &si_pi->mvdd_voltage_table);
- }
-
- if (si_pi->vddc_phase_shed_control) {
- ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC,
- VOLTAGE_OBJ_PHASE_LUT, &si_pi->vddc_phase_shed_table);
- if (ret)
- si_pi->vddc_phase_shed_control = false;
-
- if ((si_pi->vddc_phase_shed_table.count == 0) ||
- (si_pi->vddc_phase_shed_table.count > SISLANDS_MAX_NO_VREG_STEPS))
- si_pi->vddc_phase_shed_control = false;
- }
-
- return 0;
-}
-
-static void si_populate_smc_voltage_table(struct amdgpu_device *adev,
- const struct atom_voltage_table *voltage_table,
- SISLANDS_SMC_STATETABLE *table)
-{
- unsigned int i;
-
- for (i = 0; i < voltage_table->count; i++)
- table->lowSMIO[i] |= cpu_to_be32(voltage_table->entries[i].smio_low);
-}
-
-static int si_populate_smc_voltage_tables(struct amdgpu_device *adev,
- SISLANDS_SMC_STATETABLE *table)
-{
- struct rv7xx_power_info *pi = rv770_get_pi(adev);
- struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
- struct si_power_info *si_pi = si_get_pi(adev);
- u8 i;
-
- if (si_pi->voltage_control_svi2) {
- si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc,
- si_pi->svc_gpio_id);
- si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd,
- si_pi->svd_gpio_id);
- si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_plat_type,
- 2);
- } else {
- if (eg_pi->vddc_voltage_table.count) {
- si_populate_smc_voltage_table(adev, &eg_pi->vddc_voltage_table, table);
- table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] =
- cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
-
- for (i = 0; i < eg_pi->vddc_voltage_table.count; i++) {
- if (pi->max_vddc_in_table <= eg_pi->vddc_voltage_table.entries[i].value) {
- table->maxVDDCIndexInPPTable = i;
- break;
- }
- }
- }
-
- if (eg_pi->vddci_voltage_table.count) {
- si_populate_smc_voltage_table(adev, &eg_pi->vddci_voltage_table, table);
-
- table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDCI] =
- cpu_to_be32(eg_pi->vddci_voltage_table.mask_low);
- }
-
-
- if (si_pi->mvdd_voltage_table.count) {
- si_populate_smc_voltage_table(adev, &si_pi->mvdd_voltage_table, table);
-
- table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_MVDD] =
- cpu_to_be32(si_pi->mvdd_voltage_table.mask_low);
- }
-
- if (si_pi->vddc_phase_shed_control) {
- if (si_validate_phase_shedding_tables(adev, &si_pi->vddc_phase_shed_table,
- &adev->pm.dpm.dyn_state.phase_shedding_limits_table)) {
- si_populate_smc_voltage_table(adev, &si_pi->vddc_phase_shed_table, table);
-
- table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING] =
- cpu_to_be32(si_pi->vddc_phase_shed_table.mask_low);
-
- si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_phase_shedding_delay,
- (u32)si_pi->vddc_phase_shed_table.phase_delay);
- } else {
- si_pi->vddc_phase_shed_control = false;
- }
- }
- }
-
- return 0;
-}
-
-static int si_populate_voltage_value(struct amdgpu_device *adev,
- const struct atom_voltage_table *table,
- u16 value, SISLANDS_SMC_VOLTAGE_VALUE *voltage)
-{
- unsigned int i;
-
- for (i = 0; i < table->count; i++) {
- if (value <= table->entries[i].value) {
- voltage->index = (u8)i;
- voltage->value = cpu_to_be16(table->entries[i].value);
- break;
- }
- }
-
- if (i >= table->count)
- return -EINVAL;
-
- return 0;
-}
-
-static int si_populate_mvdd_value(struct amdgpu_device *adev, u32 mclk,
- SISLANDS_SMC_VOLTAGE_VALUE *voltage)
-{
- struct rv7xx_power_info *pi = rv770_get_pi(adev);
- struct si_power_info *si_pi = si_get_pi(adev);
-
- if (pi->mvdd_control) {
- if (mclk <= pi->mvdd_split_frequency)
- voltage->index = 0;
- else
- voltage->index = (u8)(si_pi->mvdd_voltage_table.count) - 1;
-
- voltage->value = cpu_to_be16(si_pi->mvdd_voltage_table.entries[voltage->index].value);
- }
- return 0;
-}
-
-static int si_get_std_voltage_value(struct amdgpu_device *adev,
- SISLANDS_SMC_VOLTAGE_VALUE *voltage,
- u16 *std_voltage)
-{
- u16 v_index;
- bool voltage_found = false;
- *std_voltage = be16_to_cpu(voltage->value);
-
- if (adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
- if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_NEW_CAC_VOLTAGE) {
- if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
- return -EINVAL;
-
- for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
- if (be16_to_cpu(voltage->value) ==
- (u16)adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
- voltage_found = true;
- if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
- *std_voltage =
- adev->pm.dpm.dyn_state.cac_leakage_table.entries[v_index].vddc;
- else
- *std_voltage =
- adev->pm.dpm.dyn_state.cac_leakage_table.entries[adev->pm.dpm.dyn_state.cac_leakage_table.count-1].vddc;
- break;
- }
- }
-
- if (!voltage_found) {
- for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
- if (be16_to_cpu(voltage->value) <=
- (u16)adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
- voltage_found = true;
- if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
- *std_voltage =
- adev->pm.dpm.dyn_state.cac_leakage_table.entries[v_index].vddc;
- else
- *std_voltage =
- adev->pm.dpm.dyn_state.cac_leakage_table.entries[adev->pm.dpm.dyn_state.cac_leakage_table.count-1].vddc;
- break;
- }
- }
- }
- } else {
- if ((u32)voltage->index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
- *std_voltage = adev->pm.dpm.dyn_state.cac_leakage_table.entries[voltage->index].vddc;
- }
- }
-
- return 0;
-}
-
-static int si_populate_std_voltage_value(struct amdgpu_device *adev,
- u16 value, u8 index,
- SISLANDS_SMC_VOLTAGE_VALUE *voltage)
-{
- voltage->index = index;
- voltage->value = cpu_to_be16(value);
-
- return 0;
-}
-
-static int si_populate_phase_shedding_value(struct amdgpu_device *adev,
- const struct amdgpu_phase_shedding_limits_table *limits,
- u16 voltage, u32 sclk, u32 mclk,
- SISLANDS_SMC_VOLTAGE_VALUE *smc_voltage)
-{
- unsigned int i;
-
- for (i = 0; i < limits->count; i++) {
- if ((voltage <= limits->entries[i].voltage) &&
- (sclk <= limits->entries[i].sclk) &&
- (mclk <= limits->entries[i].mclk))
- break;
- }
-
- smc_voltage->phase_settings = (u8)i;
-
- return 0;
-}
-
-static int si_init_arb_table_index(struct amdgpu_device *adev)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
- u32 tmp;
- int ret;
-
- ret = amdgpu_si_read_smc_sram_dword(adev, si_pi->arb_table_start,
- &tmp, si_pi->sram_end);
- if (ret)
- return ret;
-
- tmp &= 0x00FFFFFF;
- tmp |= MC_CG_ARB_FREQ_F1 << 24;
-
- return amdgpu_si_write_smc_sram_dword(adev, si_pi->arb_table_start,
- tmp, si_pi->sram_end);
-}
-
-static int si_initial_switch_from_arb_f0_to_f1(struct amdgpu_device *adev)
-{
- return ni_copy_and_switch_arb_sets(adev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
-}
-
-static int si_reset_to_default(struct amdgpu_device *adev)
-{
- return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
- 0 : -EINVAL;
-}
-
-static int si_force_switch_to_arb_f0(struct amdgpu_device *adev)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
- u32 tmp;
- int ret;
-
- ret = amdgpu_si_read_smc_sram_dword(adev, si_pi->arb_table_start,
- &tmp, si_pi->sram_end);
- if (ret)
- return ret;
-
- tmp = (tmp >> 24) & 0xff;
-
- if (tmp == MC_CG_ARB_FREQ_F0)
- return 0;
-
- return ni_copy_and_switch_arb_sets(adev, tmp, MC_CG_ARB_FREQ_F0);
-}
-
-static u32 si_calculate_memory_refresh_rate(struct amdgpu_device *adev,
- u32 engine_clock)
-{
- u32 dram_rows;
- u32 dram_refresh_rate;
- u32 mc_arb_rfsh_rate;
- u32 tmp = (RREG32(MC_ARB_RAMCFG) & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
-
- if (tmp >= 4)
- dram_rows = 16384;
- else
- dram_rows = 1 << (tmp + 10);
-
- dram_refresh_rate = 1 << ((RREG32(MC_SEQ_MISC0) & 0x3) + 3);
- mc_arb_rfsh_rate = ((engine_clock * 10) * dram_refresh_rate / dram_rows - 32) / 64;
-
- return mc_arb_rfsh_rate;
-}
-
-static int si_populate_memory_timing_parameters(struct amdgpu_device *adev,
- struct rv7xx_pl *pl,
- SMC_SIslands_MCArbDramTimingRegisterSet *arb_regs)
-{
- u32 dram_timing;
- u32 dram_timing2;
- u32 burst_time;
-
- arb_regs->mc_arb_rfsh_rate =
- (u8)si_calculate_memory_refresh_rate(adev, pl->sclk);
-
- amdgpu_atombios_set_engine_dram_timings(adev,
- pl->sclk,
- pl->mclk);
-
- dram_timing = RREG32(MC_ARB_DRAM_TIMING);
- dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
- burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK;
-
- arb_regs->mc_arb_dram_timing = cpu_to_be32(dram_timing);
- arb_regs->mc_arb_dram_timing2 = cpu_to_be32(dram_timing2);
- arb_regs->mc_arb_burst_time = (u8)burst_time;
-
- return 0;
-}
-
-static int si_do_program_memory_timing_parameters(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_state,
- unsigned int first_arb_set)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
- struct si_ps *state = si_get_ps(amdgpu_state);
- SMC_SIslands_MCArbDramTimingRegisterSet arb_regs = { 0 };
- int i, ret = 0;
-
- for (i = 0; i < state->performance_level_count; i++) {
- ret = si_populate_memory_timing_parameters(adev, &state->performance_levels[i], &arb_regs);
- if (ret)
- break;
- ret = amdgpu_si_copy_bytes_to_smc(adev,
- si_pi->arb_table_start +
- offsetof(SMC_SIslands_MCArbDramTimingRegisters, data) +
- sizeof(SMC_SIslands_MCArbDramTimingRegisterSet) * (first_arb_set + i),
- (u8 *)&arb_regs,
- sizeof(SMC_SIslands_MCArbDramTimingRegisterSet),
- si_pi->sram_end);
- if (ret)
- break;
- }
-
- return ret;
-}
-
-static int si_program_memory_timing_parameters(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_new_state)
-{
- return si_do_program_memory_timing_parameters(adev, amdgpu_new_state,
- SISLANDS_DRIVER_STATE_ARB_INDEX);
-}
-
-static int si_populate_initial_mvdd_value(struct amdgpu_device *adev,
- struct SISLANDS_SMC_VOLTAGE_VALUE *voltage)
-{
- struct rv7xx_power_info *pi = rv770_get_pi(adev);
- struct si_power_info *si_pi = si_get_pi(adev);
-
- if (pi->mvdd_control)
- return si_populate_voltage_value(adev, &si_pi->mvdd_voltage_table,
- si_pi->mvdd_bootup_value, voltage);
-
- return 0;
-}
-
-static int si_populate_smc_initial_state(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_initial_state,
- SISLANDS_SMC_STATETABLE *table)
-{
- struct si_ps *initial_state = si_get_ps(amdgpu_initial_state);
- struct rv7xx_power_info *pi = rv770_get_pi(adev);
- struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
- struct si_power_info *si_pi = si_get_pi(adev);
- u32 reg;
- int ret;
-
- table->initialState.levels[0].mclk.vDLL_CNTL =
- cpu_to_be32(si_pi->clock_registers.dll_cntl);
- table->initialState.levels[0].mclk.vMCLK_PWRMGT_CNTL =
- cpu_to_be32(si_pi->clock_registers.mclk_pwrmgt_cntl);
- table->initialState.levels[0].mclk.vMPLL_AD_FUNC_CNTL =
- cpu_to_be32(si_pi->clock_registers.mpll_ad_func_cntl);
- table->initialState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL =
- cpu_to_be32(si_pi->clock_registers.mpll_dq_func_cntl);
- table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL =
- cpu_to_be32(si_pi->clock_registers.mpll_func_cntl);
- table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL_1 =
- cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_1);
- table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL_2 =
- cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_2);
- table->initialState.levels[0].mclk.vMPLL_SS =
- cpu_to_be32(si_pi->clock_registers.mpll_ss1);
- table->initialState.levels[0].mclk.vMPLL_SS2 =
- cpu_to_be32(si_pi->clock_registers.mpll_ss2);
-
- table->initialState.levels[0].mclk.mclk_value =
- cpu_to_be32(initial_state->performance_levels[0].mclk);
-
- table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
- cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl);
- table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
- cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_2);
- table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
- cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_3);
- table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 =
- cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_4);
- table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM =
- cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum);
- table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2 =
- cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum_2);
-
- table->initialState.levels[0].sclk.sclk_value =
- cpu_to_be32(initial_state->performance_levels[0].sclk);
-
- table->initialState.levels[0].arbRefreshState =
- SISLANDS_INITIAL_STATE_ARB_INDEX;
-
- table->initialState.levels[0].ACIndex = 0;
-
- ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
- initial_state->performance_levels[0].vddc,
- &table->initialState.levels[0].vddc);
-
- if (!ret) {
- u16 std_vddc;
-
- ret = si_get_std_voltage_value(adev,
- &table->initialState.levels[0].vddc,
- &std_vddc);
- if (!ret)
- si_populate_std_voltage_value(adev, std_vddc,
- table->initialState.levels[0].vddc.index,
- &table->initialState.levels[0].std_vddc);
- }
-
- if (eg_pi->vddci_control)
- si_populate_voltage_value(adev,
- &eg_pi->vddci_voltage_table,
- initial_state->performance_levels[0].vddci,
- &table->initialState.levels[0].vddci);
-
- if (si_pi->vddc_phase_shed_control)
- si_populate_phase_shedding_value(adev,
- &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
- initial_state->performance_levels[0].vddc,
- initial_state->performance_levels[0].sclk,
- initial_state->performance_levels[0].mclk,
- &table->initialState.levels[0].vddc);
-
- si_populate_initial_mvdd_value(adev, &table->initialState.levels[0].mvdd);
-
- reg = CG_R(0xffff) | CG_L(0);
- table->initialState.levels[0].aT = cpu_to_be32(reg);
- table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp);
- table->initialState.levels[0].gen2PCIE = (u8)si_pi->boot_pcie_gen;
-
- if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
- table->initialState.levels[0].strobeMode =
- si_get_strobe_mode_settings(adev,
- initial_state->performance_levels[0].mclk);
-
- if (initial_state->performance_levels[0].mclk > pi->mclk_edc_enable_threshold)
- table->initialState.levels[0].mcFlags = SISLANDS_SMC_MC_EDC_RD_FLAG | SISLANDS_SMC_MC_EDC_WR_FLAG;
- else
- table->initialState.levels[0].mcFlags = 0;
- }
-
- table->initialState.levelCount = 1;
-
- table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC;
-
- table->initialState.levels[0].dpm2.MaxPS = 0;
- table->initialState.levels[0].dpm2.NearTDPDec = 0;
- table->initialState.levels[0].dpm2.AboveSafeInc = 0;
- table->initialState.levels[0].dpm2.BelowSafeInc = 0;
- table->initialState.levels[0].dpm2.PwrEfficiencyRatio = 0;
-
- reg = MIN_POWER_MASK | MAX_POWER_MASK;
- table->initialState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
-
- reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
- table->initialState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
-
- return 0;
-}
-
-static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
- SISLANDS_SMC_STATETABLE *table)
-{
- struct rv7xx_power_info *pi = rv770_get_pi(adev);
- struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
- struct si_power_info *si_pi = si_get_pi(adev);
- u32 spll_func_cntl = si_pi->clock_registers.cg_spll_func_cntl;
- u32 spll_func_cntl_2 = si_pi->clock_registers.cg_spll_func_cntl_2;
- u32 spll_func_cntl_3 = si_pi->clock_registers.cg_spll_func_cntl_3;
- u32 spll_func_cntl_4 = si_pi->clock_registers.cg_spll_func_cntl_4;
- u32 dll_cntl = si_pi->clock_registers.dll_cntl;
- u32 mclk_pwrmgt_cntl = si_pi->clock_registers.mclk_pwrmgt_cntl;
- u32 mpll_ad_func_cntl = si_pi->clock_registers.mpll_ad_func_cntl;
- u32 mpll_dq_func_cntl = si_pi->clock_registers.mpll_dq_func_cntl;
- u32 mpll_func_cntl = si_pi->clock_registers.mpll_func_cntl;
- u32 mpll_func_cntl_1 = si_pi->clock_registers.mpll_func_cntl_1;
- u32 mpll_func_cntl_2 = si_pi->clock_registers.mpll_func_cntl_2;
- u32 reg;
- int ret;
-
- table->ACPIState = table->initialState;
-
- table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC;
-
- if (pi->acpi_vddc) {
- ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
- pi->acpi_vddc, &table->ACPIState.levels[0].vddc);
- if (!ret) {
- u16 std_vddc;
-
- ret = si_get_std_voltage_value(adev,
- &table->ACPIState.levels[0].vddc, &std_vddc);
- if (!ret)
- si_populate_std_voltage_value(adev, std_vddc,
- table->ACPIState.levels[0].vddc.index,
- &table->ACPIState.levels[0].std_vddc);
- }
- table->ACPIState.levels[0].gen2PCIE = si_pi->acpi_pcie_gen;
-
- if (si_pi->vddc_phase_shed_control) {
- si_populate_phase_shedding_value(adev,
- &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
- pi->acpi_vddc,
- 0,
- 0,
- &table->ACPIState.levels[0].vddc);
- }
- } else {
- ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
- pi->min_vddc_in_table, &table->ACPIState.levels[0].vddc);
- if (!ret) {
- u16 std_vddc;
-
- ret = si_get_std_voltage_value(adev,
- &table->ACPIState.levels[0].vddc, &std_vddc);
-
- if (!ret)
- si_populate_std_voltage_value(adev, std_vddc,
- table->ACPIState.levels[0].vddc.index,
- &table->ACPIState.levels[0].std_vddc);
- }
- table->ACPIState.levels[0].gen2PCIE =
- (u8)amdgpu_get_pcie_gen_support(adev,
- si_pi->sys_pcie_mask,
- si_pi->boot_pcie_gen,
- AMDGPU_PCIE_GEN1);
-
- if (si_pi->vddc_phase_shed_control)
- si_populate_phase_shedding_value(adev,
- &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
- pi->min_vddc_in_table,
- 0,
- 0,
- &table->ACPIState.levels[0].vddc);
- }
-
- if (pi->acpi_vddc) {
- if (eg_pi->acpi_vddci)
- si_populate_voltage_value(adev, &eg_pi->vddci_voltage_table,
- eg_pi->acpi_vddci,
- &table->ACPIState.levels[0].vddci);
- }
-
- mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET;
- mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
-
- dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS);
-
- spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
- spll_func_cntl_2 |= SCLK_MUX_SEL(4);
-
- table->ACPIState.levels[0].mclk.vDLL_CNTL =
- cpu_to_be32(dll_cntl);
- table->ACPIState.levels[0].mclk.vMCLK_PWRMGT_CNTL =
- cpu_to_be32(mclk_pwrmgt_cntl);
- table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL =
- cpu_to_be32(mpll_ad_func_cntl);
- table->ACPIState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL =
- cpu_to_be32(mpll_dq_func_cntl);
- table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL =
- cpu_to_be32(mpll_func_cntl);
- table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL_1 =
- cpu_to_be32(mpll_func_cntl_1);
- table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL_2 =
- cpu_to_be32(mpll_func_cntl_2);
- table->ACPIState.levels[0].mclk.vMPLL_SS =
- cpu_to_be32(si_pi->clock_registers.mpll_ss1);
- table->ACPIState.levels[0].mclk.vMPLL_SS2 =
- cpu_to_be32(si_pi->clock_registers.mpll_ss2);
-
- table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
- cpu_to_be32(spll_func_cntl);
- table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
- cpu_to_be32(spll_func_cntl_2);
- table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
- cpu_to_be32(spll_func_cntl_3);
- table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 =
- cpu_to_be32(spll_func_cntl_4);
-
- table->ACPIState.levels[0].mclk.mclk_value = 0;
- table->ACPIState.levels[0].sclk.sclk_value = 0;
-
- si_populate_mvdd_value(adev, 0, &table->ACPIState.levels[0].mvdd);
-
- if (eg_pi->dynamic_ac_timing)
- table->ACPIState.levels[0].ACIndex = 0;
-
- table->ACPIState.levels[0].dpm2.MaxPS = 0;
- table->ACPIState.levels[0].dpm2.NearTDPDec = 0;
- table->ACPIState.levels[0].dpm2.AboveSafeInc = 0;
- table->ACPIState.levels[0].dpm2.BelowSafeInc = 0;
- table->ACPIState.levels[0].dpm2.PwrEfficiencyRatio = 0;
-
- reg = MIN_POWER_MASK | MAX_POWER_MASK;
- table->ACPIState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
-
- reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
- table->ACPIState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
-
- return 0;
-}
-
-static int si_populate_ulv_state(struct amdgpu_device *adev,
- SISLANDS_SMC_SWSTATE *state)
-{
- struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
- struct si_power_info *si_pi = si_get_pi(adev);
- struct si_ulv_param *ulv = &si_pi->ulv;
- u32 sclk_in_sr = 1350; /* ??? */
- int ret;
-
- ret = si_convert_power_level_to_smc(adev, &ulv->pl,
- &state->levels[0]);
- if (!ret) {
- if (eg_pi->sclk_deep_sleep) {
- if (sclk_in_sr <= SCLK_MIN_DEEPSLEEP_FREQ)
- state->levels[0].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS;
- else
- state->levels[0].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE;
- }
- if (ulv->one_pcie_lane_in_ulv)
- state->flags |= PPSMC_SWSTATE_FLAG_PCIE_X1;
- state->levels[0].arbRefreshState = (u8)(SISLANDS_ULV_STATE_ARB_INDEX);
- state->levels[0].ACIndex = 1;
- state->levels[0].std_vddc = state->levels[0].vddc;
- state->levelCount = 1;
-
- state->flags |= PPSMC_SWSTATE_FLAG_DC;
- }
-
- return ret;
-}
-
-static int si_program_ulv_memory_timing_parameters(struct amdgpu_device *adev)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
- struct si_ulv_param *ulv = &si_pi->ulv;
- SMC_SIslands_MCArbDramTimingRegisterSet arb_regs = { 0 };
- int ret;
-
- ret = si_populate_memory_timing_parameters(adev, &ulv->pl,
- &arb_regs);
- if (ret)
- return ret;
-
- si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_ulv_volt_change_delay,
- ulv->volt_change_delay);
-
- ret = amdgpu_si_copy_bytes_to_smc(adev,
- si_pi->arb_table_start +
- offsetof(SMC_SIslands_MCArbDramTimingRegisters, data) +
- sizeof(SMC_SIslands_MCArbDramTimingRegisterSet) * SISLANDS_ULV_STATE_ARB_INDEX,
- (u8 *)&arb_regs,
- sizeof(SMC_SIslands_MCArbDramTimingRegisterSet),
- si_pi->sram_end);
-
- return ret;
-}
-
-static void si_get_mvdd_configuration(struct amdgpu_device *adev)
-{
- struct rv7xx_power_info *pi = rv770_get_pi(adev);
-
- pi->mvdd_split_frequency = 30000;
-}
-
-static int si_init_smc_table(struct amdgpu_device *adev)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
- struct amdgpu_ps *amdgpu_boot_state = adev->pm.dpm.boot_ps;
- const struct si_ulv_param *ulv = &si_pi->ulv;
- SISLANDS_SMC_STATETABLE *table = &si_pi->smc_statetable;
- int ret;
- u32 lane_width;
- u32 vr_hot_gpio;
-
- si_populate_smc_voltage_tables(adev, table);
-
- switch (adev->pm.int_thermal_type) {
- case THERMAL_TYPE_SI:
- case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
- table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL;
- break;
- case THERMAL_TYPE_NONE:
- table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE;
- break;
- default:
- table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL;
- break;
- }
-
- if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
- table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
-
- if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT) {
- if ((adev->pdev->device != 0x6818) && (adev->pdev->device != 0x6819))
- table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT;
- }
-
- if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
- table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
-
- if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
- table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
-
- if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY)
- table->extraFlags |= PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH;
-
- if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE) {
- table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT_PROG_GPIO;
- vr_hot_gpio = adev->pm.dpm.backbias_response_time;
- si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_vr_hot_gpio,
- vr_hot_gpio);
- }
-
- ret = si_populate_smc_initial_state(adev, amdgpu_boot_state, table);
- if (ret)
- return ret;
-
- ret = si_populate_smc_acpi_state(adev, table);
- if (ret)
- return ret;
-
- table->driverState = table->initialState;
-
- ret = si_do_program_memory_timing_parameters(adev, amdgpu_boot_state,
- SISLANDS_INITIAL_STATE_ARB_INDEX);
- if (ret)
- return ret;
-
- if (ulv->supported && ulv->pl.vddc) {
- ret = si_populate_ulv_state(adev, &table->ULVState);
- if (ret)
- return ret;
-
- ret = si_program_ulv_memory_timing_parameters(adev);
- if (ret)
- return ret;
-
- WREG32(CG_ULV_CONTROL, ulv->cg_ulv_control);
- WREG32(CG_ULV_PARAMETER, ulv->cg_ulv_parameter);
-
- lane_width = amdgpu_get_pcie_lanes(adev);
- si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width, lane_width);
- } else {
- table->ULVState = table->initialState;
- }
-
- return amdgpu_si_copy_bytes_to_smc(adev, si_pi->state_table_start,
- (u8 *)table, sizeof(SISLANDS_SMC_STATETABLE),
- si_pi->sram_end);
-}
-
-static int si_calculate_sclk_params(struct amdgpu_device *adev,
- u32 engine_clock,
- SISLANDS_SMC_SCLK_VALUE *sclk)
-{
- struct rv7xx_power_info *pi = rv770_get_pi(adev);
- struct si_power_info *si_pi = si_get_pi(adev);
- struct atom_clock_dividers dividers;
- u32 spll_func_cntl = si_pi->clock_registers.cg_spll_func_cntl;
- u32 spll_func_cntl_2 = si_pi->clock_registers.cg_spll_func_cntl_2;
- u32 spll_func_cntl_3 = si_pi->clock_registers.cg_spll_func_cntl_3;
- u32 spll_func_cntl_4 = si_pi->clock_registers.cg_spll_func_cntl_4;
- u32 cg_spll_spread_spectrum = si_pi->clock_registers.cg_spll_spread_spectrum;
- u32 cg_spll_spread_spectrum_2 = si_pi->clock_registers.cg_spll_spread_spectrum_2;
- u64 tmp;
- u32 reference_clock = adev->clock.spll.reference_freq;
- u32 reference_divider;
- u32 fbdiv;
- int ret;
-
- ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
- engine_clock, false, &dividers);
- if (ret)
- return ret;
-
- reference_divider = 1 + dividers.ref_div;
-
- tmp = (u64) engine_clock * reference_divider * dividers.post_div * 16384;
- do_div(tmp, reference_clock);
- fbdiv = (u32) tmp;
-
- spll_func_cntl &= ~(SPLL_PDIV_A_MASK | SPLL_REF_DIV_MASK);
- spll_func_cntl |= SPLL_REF_DIV(dividers.ref_div);
- spll_func_cntl |= SPLL_PDIV_A(dividers.post_div);
-
- spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
- spll_func_cntl_2 |= SCLK_MUX_SEL(2);
-
- spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
- spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
- spll_func_cntl_3 |= SPLL_DITHEN;
-
- if (pi->sclk_ss) {
- struct amdgpu_atom_ss ss;
- u32 vco_freq = engine_clock * dividers.post_div;
-
- if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
- ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
- u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
- u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
-
- cg_spll_spread_spectrum &= ~CLK_S_MASK;
- cg_spll_spread_spectrum |= CLK_S(clk_s);
- cg_spll_spread_spectrum |= SSEN;
-
- cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
- cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
- }
- }
-
- sclk->sclk_value = engine_clock;
- sclk->vCG_SPLL_FUNC_CNTL = spll_func_cntl;
- sclk->vCG_SPLL_FUNC_CNTL_2 = spll_func_cntl_2;
- sclk->vCG_SPLL_FUNC_CNTL_3 = spll_func_cntl_3;
- sclk->vCG_SPLL_FUNC_CNTL_4 = spll_func_cntl_4;
- sclk->vCG_SPLL_SPREAD_SPECTRUM = cg_spll_spread_spectrum;
- sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cg_spll_spread_spectrum_2;
-
- return 0;
-}
-
-static int si_populate_sclk_value(struct amdgpu_device *adev,
- u32 engine_clock,
- SISLANDS_SMC_SCLK_VALUE *sclk)
-{
- SISLANDS_SMC_SCLK_VALUE sclk_tmp;
- int ret;
-
- ret = si_calculate_sclk_params(adev, engine_clock, &sclk_tmp);
- if (!ret) {
- sclk->sclk_value = cpu_to_be32(sclk_tmp.sclk_value);
- sclk->vCG_SPLL_FUNC_CNTL = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL);
- sclk->vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_2);
- sclk->vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_3);
- sclk->vCG_SPLL_FUNC_CNTL_4 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_4);
- sclk->vCG_SPLL_SPREAD_SPECTRUM = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM);
- sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM_2);
- }
-
- return ret;
-}
-
-static int si_populate_mclk_value(struct amdgpu_device *adev,
- u32 engine_clock,
- u32 memory_clock,
- SISLANDS_SMC_MCLK_VALUE *mclk,
- bool strobe_mode,
- bool dll_state_on)
-{
- struct rv7xx_power_info *pi = rv770_get_pi(adev);
- struct si_power_info *si_pi = si_get_pi(adev);
- u32 dll_cntl = si_pi->clock_registers.dll_cntl;
- u32 mclk_pwrmgt_cntl = si_pi->clock_registers.mclk_pwrmgt_cntl;
- u32 mpll_ad_func_cntl = si_pi->clock_registers.mpll_ad_func_cntl;
- u32 mpll_dq_func_cntl = si_pi->clock_registers.mpll_dq_func_cntl;
- u32 mpll_func_cntl = si_pi->clock_registers.mpll_func_cntl;
- u32 mpll_func_cntl_1 = si_pi->clock_registers.mpll_func_cntl_1;
- u32 mpll_func_cntl_2 = si_pi->clock_registers.mpll_func_cntl_2;
- u32 mpll_ss1 = si_pi->clock_registers.mpll_ss1;
- u32 mpll_ss2 = si_pi->clock_registers.mpll_ss2;
- struct atom_mpll_param mpll_param;
- int ret;
-
- ret = amdgpu_atombios_get_memory_pll_dividers(adev, memory_clock, strobe_mode, &mpll_param);
- if (ret)
- return ret;
-
- mpll_func_cntl &= ~BWCTRL_MASK;
- mpll_func_cntl |= BWCTRL(mpll_param.bwcntl);
-
- mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK);
- mpll_func_cntl_1 |= CLKF(mpll_param.clkf) |
- CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode);
-
- mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK;
- mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div);
-
- if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
- mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK);
- mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) |
- YCLK_POST_DIV(mpll_param.post_div);
- }
-
- if (pi->mclk_ss) {
- struct amdgpu_atom_ss ss;
- u32 freq_nom;
- u32 tmp;
- u32 reference_clock = adev->clock.mpll.reference_freq;
-
- if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
- freq_nom = memory_clock * 4;
- else
- freq_nom = memory_clock * 2;
-
- tmp = freq_nom / reference_clock;
- tmp = tmp * tmp;
- if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
- ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
- u32 clks = reference_clock * 5 / ss.rate;
- u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
-
- mpll_ss1 &= ~CLKV_MASK;
- mpll_ss1 |= CLKV(clkv);
-
- mpll_ss2 &= ~CLKS_MASK;
- mpll_ss2 |= CLKS(clks);
- }
- }
-
- mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
- mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed);
-
- if (dll_state_on)
- mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB;
- else
- mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
-
- mclk->mclk_value = cpu_to_be32(memory_clock);
- mclk->vMPLL_FUNC_CNTL = cpu_to_be32(mpll_func_cntl);
- mclk->vMPLL_FUNC_CNTL_1 = cpu_to_be32(mpll_func_cntl_1);
- mclk->vMPLL_FUNC_CNTL_2 = cpu_to_be32(mpll_func_cntl_2);
- mclk->vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
- mclk->vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
- mclk->vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
- mclk->vDLL_CNTL = cpu_to_be32(dll_cntl);
- mclk->vMPLL_SS = cpu_to_be32(mpll_ss1);
- mclk->vMPLL_SS2 = cpu_to_be32(mpll_ss2);
-
- return 0;
-}
-
-static void si_populate_smc_sp(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_state,
- SISLANDS_SMC_SWSTATE *smc_state)
-{
- struct si_ps *ps = si_get_ps(amdgpu_state);
- struct rv7xx_power_info *pi = rv770_get_pi(adev);
- int i;
-
- for (i = 0; i < ps->performance_level_count - 1; i++)
- smc_state->levels[i].bSP = cpu_to_be32(pi->dsp);
-
- smc_state->levels[ps->performance_level_count - 1].bSP =
- cpu_to_be32(pi->psp);
-}
-
-static int si_convert_power_level_to_smc(struct amdgpu_device *adev,
- struct rv7xx_pl *pl,
- SISLANDS_SMC_HW_PERFORMANCE_LEVEL *level)
-{
- struct rv7xx_power_info *pi = rv770_get_pi(adev);
- struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
- struct si_power_info *si_pi = si_get_pi(adev);
- int ret;
- bool dll_state_on;
- u16 std_vddc;
- bool gmc_pg = false;
-
- if (eg_pi->pcie_performance_request &&
- (si_pi->force_pcie_gen != AMDGPU_PCIE_GEN_INVALID))
- level->gen2PCIE = (u8)si_pi->force_pcie_gen;
- else
- level->gen2PCIE = (u8)pl->pcie_gen;
-
- ret = si_populate_sclk_value(adev, pl->sclk, &level->sclk);
- if (ret)
- return ret;
-
- level->mcFlags = 0;
-
- if (pi->mclk_stutter_mode_threshold &&
- (pl->mclk <= pi->mclk_stutter_mode_threshold) &&
- !eg_pi->uvd_enabled &&
- (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) &&
- (adev->pm.dpm.new_active_crtc_count <= 2)) {
- level->mcFlags |= SISLANDS_SMC_MC_STUTTER_EN;
-
- if (gmc_pg)
- level->mcFlags |= SISLANDS_SMC_MC_PG_EN;
- }
-
- if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
- if (pl->mclk > pi->mclk_edc_enable_threshold)
- level->mcFlags |= SISLANDS_SMC_MC_EDC_RD_FLAG;
-
- if (pl->mclk > eg_pi->mclk_edc_wr_enable_threshold)
- level->mcFlags |= SISLANDS_SMC_MC_EDC_WR_FLAG;
-
- level->strobeMode = si_get_strobe_mode_settings(adev, pl->mclk);
-
- if (level->strobeMode & SISLANDS_SMC_STROBE_ENABLE) {
- if (si_get_mclk_frequency_ratio(pl->mclk, true) >=
- ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
- dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
- else
- dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
- } else {
- dll_state_on = false;
- }
- } else {
- level->strobeMode = si_get_strobe_mode_settings(adev,
- pl->mclk);
-
- dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
- }
-
- ret = si_populate_mclk_value(adev,
- pl->sclk,
- pl->mclk,
- &level->mclk,
- (level->strobeMode & SISLANDS_SMC_STROBE_ENABLE) != 0, dll_state_on);
- if (ret)
- return ret;
-
- ret = si_populate_voltage_value(adev,
- &eg_pi->vddc_voltage_table,
- pl->vddc, &level->vddc);
- if (ret)
- return ret;
-
-
- ret = si_get_std_voltage_value(adev, &level->vddc, &std_vddc);
- if (ret)
- return ret;
-
- ret = si_populate_std_voltage_value(adev, std_vddc,
- level->vddc.index, &level->std_vddc);
- if (ret)
- return ret;
-
- if (eg_pi->vddci_control) {
- ret = si_populate_voltage_value(adev, &eg_pi->vddci_voltage_table,
- pl->vddci, &level->vddci);
- if (ret)
- return ret;
- }
-
- if (si_pi->vddc_phase_shed_control) {
- ret = si_populate_phase_shedding_value(adev,
- &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
- pl->vddc,
- pl->sclk,
- pl->mclk,
- &level->vddc);
- if (ret)
- return ret;
- }
-
- level->MaxPoweredUpCU = si_pi->max_cu;
-
- ret = si_populate_mvdd_value(adev, pl->mclk, &level->mvdd);
-
- return ret;
-}
-
-static int si_populate_smc_t(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_state,
- SISLANDS_SMC_SWSTATE *smc_state)
-{
- struct rv7xx_power_info *pi = rv770_get_pi(adev);
- struct si_ps *state = si_get_ps(amdgpu_state);
- u32 a_t;
- u32 t_l, t_h;
- u32 high_bsp;
- int i, ret;
-
- if (state->performance_level_count >= 9)
- return -EINVAL;
-
- if (state->performance_level_count < 2) {
- a_t = CG_R(0xffff) | CG_L(0);
- smc_state->levels[0].aT = cpu_to_be32(a_t);
- return 0;
- }
-
- smc_state->levels[0].aT = cpu_to_be32(0);
-
- for (i = 0; i <= state->performance_level_count - 2; i++) {
- ret = r600_calculate_at(
- (50 / SISLANDS_MAX_HARDWARE_POWERLEVELS) * 100 * (i + 1),
- 100 * R600_AH_DFLT,
- state->performance_levels[i + 1].sclk,
- state->performance_levels[i].sclk,
- &t_l,
- &t_h);
-
- if (ret) {
- t_h = (i + 1) * 1000 - 50 * R600_AH_DFLT;
- t_l = (i + 1) * 1000 + 50 * R600_AH_DFLT;
- }
-
- a_t = be32_to_cpu(smc_state->levels[i].aT) & ~CG_R_MASK;
- a_t |= CG_R(t_l * pi->bsp / 20000);
- smc_state->levels[i].aT = cpu_to_be32(a_t);
-
- high_bsp = (i == state->performance_level_count - 2) ?
- pi->pbsp : pi->bsp;
- a_t = CG_R(0xffff) | CG_L(t_h * high_bsp / 20000);
- smc_state->levels[i + 1].aT = cpu_to_be32(a_t);
- }
-
- return 0;
-}
-
-static int si_disable_ulv(struct amdgpu_device *adev)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
- struct si_ulv_param *ulv = &si_pi->ulv;
-
- if (ulv->supported)
- return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
- 0 : -EINVAL;
-
- return 0;
-}
-
-static bool si_is_state_ulv_compatible(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_state)
-{
- const struct si_power_info *si_pi = si_get_pi(adev);
- const struct si_ulv_param *ulv = &si_pi->ulv;
- const struct si_ps *state = si_get_ps(amdgpu_state);
- int i;
-
- if (state->performance_levels[0].mclk != ulv->pl.mclk)
- return false;
-
- /* XXX validate against display requirements! */
-
- for (i = 0; i < adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count; i++) {
- if (adev->clock.current_dispclk <=
- adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].clk) {
- if (ulv->pl.vddc <
- adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].v)
- return false;
- }
- }
-
- if ((amdgpu_state->vclk != 0) || (amdgpu_state->dclk != 0))
- return false;
-
- return true;
-}
-
-static int si_set_power_state_conditionally_enable_ulv(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_new_state)
-{
- const struct si_power_info *si_pi = si_get_pi(adev);
- const struct si_ulv_param *ulv = &si_pi->ulv;
-
- if (ulv->supported) {
- if (si_is_state_ulv_compatible(adev, amdgpu_new_state))
- return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
- 0 : -EINVAL;
- }
- return 0;
-}
-
-static int si_convert_power_state_to_smc(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_state,
- SISLANDS_SMC_SWSTATE *smc_state)
-{
- struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
- struct ni_power_info *ni_pi = ni_get_pi(adev);
- struct si_power_info *si_pi = si_get_pi(adev);
- struct si_ps *state = si_get_ps(amdgpu_state);
- int i, ret;
- u32 threshold;
- u32 sclk_in_sr = 1350; /* ??? */
-
- if (state->performance_level_count > SISLANDS_MAX_HARDWARE_POWERLEVELS)
- return -EINVAL;
-
- threshold = state->performance_levels[state->performance_level_count-1].sclk * 100 / 100;
-
- if (amdgpu_state->vclk && amdgpu_state->dclk) {
- eg_pi->uvd_enabled = true;
- if (eg_pi->smu_uvd_hs)
- smc_state->flags |= PPSMC_SWSTATE_FLAG_UVD;
- } else {
- eg_pi->uvd_enabled = false;
- }
-
- if (state->dc_compatible)
- smc_state->flags |= PPSMC_SWSTATE_FLAG_DC;
-
- smc_state->levelCount = 0;
- for (i = 0; i < state->performance_level_count; i++) {
- if (eg_pi->sclk_deep_sleep) {
- if ((i == 0) || si_pi->sclk_deep_sleep_above_low) {
- if (sclk_in_sr <= SCLK_MIN_DEEPSLEEP_FREQ)
- smc_state->levels[i].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS;
- else
- smc_state->levels[i].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE;
- }
- }
-
- ret = si_convert_power_level_to_smc(adev, &state->performance_levels[i],
- &smc_state->levels[i]);
- smc_state->levels[i].arbRefreshState =
- (u8)(SISLANDS_DRIVER_STATE_ARB_INDEX + i);
-
- if (ret)
- return ret;
-
- if (ni_pi->enable_power_containment)
- smc_state->levels[i].displayWatermark =
- (state->performance_levels[i].sclk < threshold) ?
- PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH;
- else
- smc_state->levels[i].displayWatermark = (i < 2) ?
- PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH;
-
- if (eg_pi->dynamic_ac_timing)
- smc_state->levels[i].ACIndex = SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i;
- else
- smc_state->levels[i].ACIndex = 0;
-
- smc_state->levelCount++;
- }
-
- si_write_smc_soft_register(adev,
- SI_SMC_SOFT_REGISTER_watermark_threshold,
- threshold / 512);
-
- si_populate_smc_sp(adev, amdgpu_state, smc_state);
-
- ret = si_populate_power_containment_values(adev, amdgpu_state, smc_state);
- if (ret)
- ni_pi->enable_power_containment = false;
-
- ret = si_populate_sq_ramping_values(adev, amdgpu_state, smc_state);
- if (ret)
- ni_pi->enable_sq_ramping = false;
-
- return si_populate_smc_t(adev, amdgpu_state, smc_state);
-}
-
-static int si_upload_sw_state(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_new_state)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
- struct si_ps *new_state = si_get_ps(amdgpu_new_state);
- int ret;
- u32 address = si_pi->state_table_start +
- offsetof(SISLANDS_SMC_STATETABLE, driverState);
- u32 state_size = sizeof(SISLANDS_SMC_SWSTATE) +
- ((new_state->performance_level_count - 1) *
- sizeof(SISLANDS_SMC_HW_PERFORMANCE_LEVEL));
- SISLANDS_SMC_SWSTATE *smc_state = &si_pi->smc_statetable.driverState;
-
- memset(smc_state, 0, state_size);
-
- ret = si_convert_power_state_to_smc(adev, amdgpu_new_state, smc_state);
- if (ret)
- return ret;
-
- return amdgpu_si_copy_bytes_to_smc(adev, address, (u8 *)smc_state,
- state_size, si_pi->sram_end);
-}
-
-static int si_upload_ulv_state(struct amdgpu_device *adev)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
- struct si_ulv_param *ulv = &si_pi->ulv;
- int ret = 0;
-
- if (ulv->supported && ulv->pl.vddc) {
- u32 address = si_pi->state_table_start +
- offsetof(SISLANDS_SMC_STATETABLE, ULVState);
- SISLANDS_SMC_SWSTATE *smc_state = &si_pi->smc_statetable.ULVState;
- u32 state_size = sizeof(SISLANDS_SMC_SWSTATE);
-
- memset(smc_state, 0, state_size);
-
- ret = si_populate_ulv_state(adev, smc_state);
- if (!ret)
- ret = amdgpu_si_copy_bytes_to_smc(adev, address, (u8 *)smc_state,
- state_size, si_pi->sram_end);
- }
-
- return ret;
-}
-
-static int si_upload_smc_data(struct amdgpu_device *adev)
-{
- struct amdgpu_crtc *amdgpu_crtc = NULL;
- int i;
-
- if (adev->pm.dpm.new_active_crtc_count == 0)
- return 0;
-
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- if (adev->pm.dpm.new_active_crtcs & (1 << i)) {
- amdgpu_crtc = adev->mode_info.crtcs[i];
- break;
- }
- }
-
- if (amdgpu_crtc == NULL)
- return 0;
-
- if (amdgpu_crtc->line_time <= 0)
- return 0;
-
- if (si_write_smc_soft_register(adev,
- SI_SMC_SOFT_REGISTER_crtc_index,
- amdgpu_crtc->crtc_id) != PPSMC_Result_OK)
- return 0;
-
- if (si_write_smc_soft_register(adev,
- SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min,
- amdgpu_crtc->wm_high / amdgpu_crtc->line_time) != PPSMC_Result_OK)
- return 0;
-
- if (si_write_smc_soft_register(adev,
- SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max,
- amdgpu_crtc->wm_low / amdgpu_crtc->line_time) != PPSMC_Result_OK)
- return 0;
-
- return 0;
-}
-
-static int si_set_mc_special_registers(struct amdgpu_device *adev,
- struct si_mc_reg_table *table)
-{
- u8 i, j, k;
- u32 temp_reg;
-
- for (i = 0, j = table->last; i < table->last; i++) {
- if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
- return -EINVAL;
- switch (table->mc_reg_address[i].s1) {
- case MC_SEQ_MISC1:
- temp_reg = RREG32(MC_PMG_CMD_EMRS);
- table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS;
- table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP;
- for (k = 0; k < table->num_entries; k++)
- table->mc_reg_table_entry[k].mc_data[j] =
- ((temp_reg & 0xffff0000)) |
- ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
- j++;
-
- if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
- return -EINVAL;
- temp_reg = RREG32(MC_PMG_CMD_MRS);
- table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS;
- table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP;
- for (k = 0; k < table->num_entries; k++) {
- table->mc_reg_table_entry[k].mc_data[j] =
- (temp_reg & 0xffff0000) |
- (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
- if (adev->gmc.vram_type != AMDGPU_VRAM_TYPE_GDDR5)
- table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
- }
- j++;
-
- if (adev->gmc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) {
- if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
- return -EINVAL;
- table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD;
- table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD;
- for (k = 0; k < table->num_entries; k++)
- table->mc_reg_table_entry[k].mc_data[j] =
- (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
- j++;
- }
- break;
- case MC_SEQ_RESERVE_M:
- temp_reg = RREG32(MC_PMG_CMD_MRS1);
- table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1;
- table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP;
- for(k = 0; k < table->num_entries; k++)
- table->mc_reg_table_entry[k].mc_data[j] =
- (temp_reg & 0xffff0000) |
- (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
- j++;
- break;
- default:
- break;
- }
- }
-
- table->last = j;
-
- return 0;
-}
-
-static bool si_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
-{
- bool result = true;
- switch (in_reg) {
- case MC_SEQ_RAS_TIMING:
- *out_reg = MC_SEQ_RAS_TIMING_LP;
- break;
- case MC_SEQ_CAS_TIMING:
- *out_reg = MC_SEQ_CAS_TIMING_LP;
- break;
- case MC_SEQ_MISC_TIMING:
- *out_reg = MC_SEQ_MISC_TIMING_LP;
- break;
- case MC_SEQ_MISC_TIMING2:
- *out_reg = MC_SEQ_MISC_TIMING2_LP;
- break;
- case MC_SEQ_RD_CTL_D0:
- *out_reg = MC_SEQ_RD_CTL_D0_LP;
- break;
- case MC_SEQ_RD_CTL_D1:
- *out_reg = MC_SEQ_RD_CTL_D1_LP;
- break;
- case MC_SEQ_WR_CTL_D0:
- *out_reg = MC_SEQ_WR_CTL_D0_LP;
- break;
- case MC_SEQ_WR_CTL_D1:
- *out_reg = MC_SEQ_WR_CTL_D1_LP;
- break;
- case MC_PMG_CMD_EMRS:
- *out_reg = MC_SEQ_PMG_CMD_EMRS_LP;
- break;
- case MC_PMG_CMD_MRS:
- *out_reg = MC_SEQ_PMG_CMD_MRS_LP;
- break;
- case MC_PMG_CMD_MRS1:
- *out_reg = MC_SEQ_PMG_CMD_MRS1_LP;
- break;
- case MC_SEQ_PMG_TIMING:
- *out_reg = MC_SEQ_PMG_TIMING_LP;
- break;
- case MC_PMG_CMD_MRS2:
- *out_reg = MC_SEQ_PMG_CMD_MRS2_LP;
- break;
- case MC_SEQ_WR_CTL_2:
- *out_reg = MC_SEQ_WR_CTL_2_LP;
- break;
- default:
- result = false;
- break;
- }
-
- return result;
-}
-
-static void si_set_valid_flag(struct si_mc_reg_table *table)
-{
- u8 i, j;
-
- for (i = 0; i < table->last; i++) {
- for (j = 1; j < table->num_entries; j++) {
- if (table->mc_reg_table_entry[j-1].mc_data[i] != table->mc_reg_table_entry[j].mc_data[i]) {
- table->valid_flag |= 1 << i;
- break;
- }
- }
- }
-}
-
-static void si_set_s0_mc_reg_index(struct si_mc_reg_table *table)
-{
- u32 i;
- u16 address;
-
- for (i = 0; i < table->last; i++)
- table->mc_reg_address[i].s0 = si_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
- address : table->mc_reg_address[i].s1;
-
-}
-
-static int si_copy_vbios_mc_reg_table(struct atom_mc_reg_table *table,
- struct si_mc_reg_table *si_table)
-{
- u8 i, j;
-
- if (table->last > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
- return -EINVAL;
- if (table->num_entries > MAX_AC_TIMING_ENTRIES)
- return -EINVAL;
-
- for (i = 0; i < table->last; i++)
- si_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
- si_table->last = table->last;
-
- for (i = 0; i < table->num_entries; i++) {
- si_table->mc_reg_table_entry[i].mclk_max =
- table->mc_reg_table_entry[i].mclk_max;
- for (j = 0; j < table->last; j++) {
- si_table->mc_reg_table_entry[i].mc_data[j] =
- table->mc_reg_table_entry[i].mc_data[j];
- }
- }
- si_table->num_entries = table->num_entries;
-
- return 0;
-}
-
-static int si_initialize_mc_reg_table(struct amdgpu_device *adev)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
- struct atom_mc_reg_table *table;
- struct si_mc_reg_table *si_table = &si_pi->mc_reg_table;
- u8 module_index = rv770_get_memory_module_index(adev);
- int ret;
-
- table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
- if (!table)
- return -ENOMEM;
-
- WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
- WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
- WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING));
- WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2));
- WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS));
- WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS));
- WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1));
- WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0));
- WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1));
- WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0));
- WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1));
- WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING));
- WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
- WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2));
-
- ret = amdgpu_atombios_init_mc_reg_table(adev, module_index, table);
- if (ret)
- goto init_mc_done;
-
- ret = si_copy_vbios_mc_reg_table(table, si_table);
- if (ret)
- goto init_mc_done;
-
- si_set_s0_mc_reg_index(si_table);
-
- ret = si_set_mc_special_registers(adev, si_table);
- if (ret)
- goto init_mc_done;
-
- si_set_valid_flag(si_table);
-
-init_mc_done:
- kfree(table);
-
- return ret;
-
-}
-
-static void si_populate_mc_reg_addresses(struct amdgpu_device *adev,
- SMC_SIslands_MCRegisters *mc_reg_table)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
- u32 i, j;
-
- for (i = 0, j = 0; j < si_pi->mc_reg_table.last; j++) {
- if (si_pi->mc_reg_table.valid_flag & (1 << j)) {
- if (i >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
- break;
- mc_reg_table->address[i].s0 =
- cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s0);
- mc_reg_table->address[i].s1 =
- cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s1);
- i++;
- }
- }
- mc_reg_table->last = (u8)i;
-}
-
-static void si_convert_mc_registers(const struct si_mc_reg_entry *entry,
- SMC_SIslands_MCRegisterSet *data,
- u32 num_entries, u32 valid_flag)
-{
- u32 i, j;
-
- for(i = 0, j = 0; j < num_entries; j++) {
- if (valid_flag & (1 << j)) {
- data->value[i] = cpu_to_be32(entry->mc_data[j]);
- i++;
- }
- }
-}
-
-static void si_convert_mc_reg_table_entry_to_smc(struct amdgpu_device *adev,
- struct rv7xx_pl *pl,
- SMC_SIslands_MCRegisterSet *mc_reg_table_data)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
- u32 i = 0;
-
- for (i = 0; i < si_pi->mc_reg_table.num_entries; i++) {
- if (pl->mclk <= si_pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
- break;
- }
-
- if ((i == si_pi->mc_reg_table.num_entries) && (i > 0))
- --i;
-
- si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[i],
- mc_reg_table_data, si_pi->mc_reg_table.last,
- si_pi->mc_reg_table.valid_flag);
-}
-
-static void si_convert_mc_reg_table_to_smc(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_state,
- SMC_SIslands_MCRegisters *mc_reg_table)
-{
- struct si_ps *state = si_get_ps(amdgpu_state);
- int i;
-
- for (i = 0; i < state->performance_level_count; i++) {
- si_convert_mc_reg_table_entry_to_smc(adev,
- &state->performance_levels[i],
- &mc_reg_table->data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i]);
- }
-}
-
-static int si_populate_mc_reg_table(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_boot_state)
-{
- struct si_ps *boot_state = si_get_ps(amdgpu_boot_state);
- struct si_power_info *si_pi = si_get_pi(adev);
- struct si_ulv_param *ulv = &si_pi->ulv;
- SMC_SIslands_MCRegisters *smc_mc_reg_table = &si_pi->smc_mc_reg_table;
-
- memset(smc_mc_reg_table, 0, sizeof(SMC_SIslands_MCRegisters));
-
- si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_seq_index, 1);
-
- si_populate_mc_reg_addresses(adev, smc_mc_reg_table);
-
- si_convert_mc_reg_table_entry_to_smc(adev, &boot_state->performance_levels[0],
- &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_INITIAL_SLOT]);
-
- si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[0],
- &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ACPI_SLOT],
- si_pi->mc_reg_table.last,
- si_pi->mc_reg_table.valid_flag);
-
- if (ulv->supported && ulv->pl.vddc != 0)
- si_convert_mc_reg_table_entry_to_smc(adev, &ulv->pl,
- &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ULV_SLOT]);
- else
- si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[0],
- &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ULV_SLOT],
- si_pi->mc_reg_table.last,
- si_pi->mc_reg_table.valid_flag);
-
- si_convert_mc_reg_table_to_smc(adev, amdgpu_boot_state, smc_mc_reg_table);
-
- return amdgpu_si_copy_bytes_to_smc(adev, si_pi->mc_reg_table_start,
- (u8 *)smc_mc_reg_table,
- sizeof(SMC_SIslands_MCRegisters), si_pi->sram_end);
-}
-
-static int si_upload_mc_reg_table(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_new_state)
-{
- struct si_ps *new_state = si_get_ps(amdgpu_new_state);
- struct si_power_info *si_pi = si_get_pi(adev);
- u32 address = si_pi->mc_reg_table_start +
- offsetof(SMC_SIslands_MCRegisters,
- data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT]);
- SMC_SIslands_MCRegisters *smc_mc_reg_table = &si_pi->smc_mc_reg_table;
-
- memset(smc_mc_reg_table, 0, sizeof(SMC_SIslands_MCRegisters));
-
- si_convert_mc_reg_table_to_smc(adev, amdgpu_new_state, smc_mc_reg_table);
-
- return amdgpu_si_copy_bytes_to_smc(adev, address,
- (u8 *)&smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT],
- sizeof(SMC_SIslands_MCRegisterSet) * new_state->performance_level_count,
- si_pi->sram_end);
-}
-
-static void si_enable_voltage_control(struct amdgpu_device *adev, bool enable)
-{
- if (enable)
- WREG32_P(GENERAL_PWRMGT, VOLT_PWRMGT_EN, ~VOLT_PWRMGT_EN);
- else
- WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN);
-}
-
-static enum amdgpu_pcie_gen si_get_maximum_link_speed(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_state)
-{
- struct si_ps *state = si_get_ps(amdgpu_state);
- int i;
- u16 pcie_speed, max_speed = 0;
-
- for (i = 0; i < state->performance_level_count; i++) {
- pcie_speed = state->performance_levels[i].pcie_gen;
- if (max_speed < pcie_speed)
- max_speed = pcie_speed;
- }
- return max_speed;
-}
-
-static u16 si_get_current_pcie_speed(struct amdgpu_device *adev)
-{
- u32 speed_cntl;
-
- speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK;
- speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT;
-
- return (u16)speed_cntl;
-}
-
-static void si_request_link_speed_change_before_state_change(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_new_state,
- struct amdgpu_ps *amdgpu_current_state)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
- enum amdgpu_pcie_gen target_link_speed = si_get_maximum_link_speed(adev, amdgpu_new_state);
- enum amdgpu_pcie_gen current_link_speed;
-
- if (si_pi->force_pcie_gen == AMDGPU_PCIE_GEN_INVALID)
- current_link_speed = si_get_maximum_link_speed(adev, amdgpu_current_state);
- else
- current_link_speed = si_pi->force_pcie_gen;
-
- si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
- si_pi->pspp_notify_required = false;
- if (target_link_speed > current_link_speed) {
- switch (target_link_speed) {
-#if defined(CONFIG_ACPI)
- case AMDGPU_PCIE_GEN3:
- if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
- break;
- si_pi->force_pcie_gen = AMDGPU_PCIE_GEN2;
- if (current_link_speed == AMDGPU_PCIE_GEN2)
- break;
- /* fall through */
- case AMDGPU_PCIE_GEN2:
- if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
- break;
-#endif
- /* fall through */
- default:
- si_pi->force_pcie_gen = si_get_current_pcie_speed(adev);
- break;
- }
- } else {
- if (target_link_speed < current_link_speed)
- si_pi->pspp_notify_required = true;
- }
-}
-
-static void si_notify_link_speed_change_after_state_change(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_new_state,
- struct amdgpu_ps *amdgpu_current_state)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
- enum amdgpu_pcie_gen target_link_speed = si_get_maximum_link_speed(adev, amdgpu_new_state);
- u8 request;
-
- if (si_pi->pspp_notify_required) {
- if (target_link_speed == AMDGPU_PCIE_GEN3)
- request = PCIE_PERF_REQ_PECI_GEN3;
- else if (target_link_speed == AMDGPU_PCIE_GEN2)
- request = PCIE_PERF_REQ_PECI_GEN2;
- else
- request = PCIE_PERF_REQ_PECI_GEN1;
-
- if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
- (si_get_current_pcie_speed(adev) > 0))
- return;
-
-#if defined(CONFIG_ACPI)
- amdgpu_acpi_pcie_performance_request(adev, request, false);
-#endif
- }
-}
-
-#if 0
-static int si_ds_request(struct amdgpu_device *adev,
- bool ds_status_on, u32 count_write)
-{
- struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
-
- if (eg_pi->sclk_deep_sleep) {
- if (ds_status_on)
- return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_CancelThrottleOVRDSCLKDS) ==
- PPSMC_Result_OK) ?
- 0 : -EINVAL;
- else
- return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_ThrottleOVRDSCLKDS) ==
- PPSMC_Result_OK) ? 0 : -EINVAL;
- }
- return 0;
-}
-#endif
-
-static void si_set_max_cu_value(struct amdgpu_device *adev)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
-
- if (adev->asic_type == CHIP_VERDE) {
- switch (adev->pdev->device) {
- case 0x6820:
- case 0x6825:
- case 0x6821:
- case 0x6823:
- case 0x6827:
- si_pi->max_cu = 10;
- break;
- case 0x682D:
- case 0x6824:
- case 0x682F:
- case 0x6826:
- si_pi->max_cu = 8;
- break;
- case 0x6828:
- case 0x6830:
- case 0x6831:
- case 0x6838:
- case 0x6839:
- case 0x683D:
- si_pi->max_cu = 10;
- break;
- case 0x683B:
- case 0x683F:
- case 0x6829:
- si_pi->max_cu = 8;
- break;
- default:
- si_pi->max_cu = 0;
- break;
- }
- } else {
- si_pi->max_cu = 0;
- }
-}
-
-static int si_patch_single_dependency_table_based_on_leakage(struct amdgpu_device *adev,
- struct amdgpu_clock_voltage_dependency_table *table)
-{
- u32 i;
- int j;
- u16 leakage_voltage;
-
- if (table) {
- for (i = 0; i < table->count; i++) {
- switch (si_get_leakage_voltage_from_leakage_index(adev,
- table->entries[i].v,
- &leakage_voltage)) {
- case 0:
- table->entries[i].v = leakage_voltage;
- break;
- case -EAGAIN:
- return -EINVAL;
- case -EINVAL:
- default:
- break;
- }
- }
-
- for (j = (table->count - 2); j >= 0; j--) {
- table->entries[j].v = (table->entries[j].v <= table->entries[j + 1].v) ?
- table->entries[j].v : table->entries[j + 1].v;
- }
- }
- return 0;
-}
-
-static int si_patch_dependency_tables_based_on_leakage(struct amdgpu_device *adev)
-{
- int ret = 0;
-
- ret = si_patch_single_dependency_table_based_on_leakage(adev,
- &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
- if (ret)
- DRM_ERROR("Could not patch vddc_on_sclk leakage table\n");
- ret = si_patch_single_dependency_table_based_on_leakage(adev,
- &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
- if (ret)
- DRM_ERROR("Could not patch vddc_on_mclk leakage table\n");
- ret = si_patch_single_dependency_table_based_on_leakage(adev,
- &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
- if (ret)
- DRM_ERROR("Could not patch vddci_on_mclk leakage table\n");
- return ret;
-}
-
-static void si_set_pcie_lane_width_in_smc(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_new_state,
- struct amdgpu_ps *amdgpu_current_state)
-{
- u32 lane_width;
- u32 new_lane_width =
- ((amdgpu_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
- u32 current_lane_width =
- ((amdgpu_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
-
- if (new_lane_width != current_lane_width) {
- amdgpu_set_pcie_lanes(adev, new_lane_width);
- lane_width = amdgpu_get_pcie_lanes(adev);
- si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width, lane_width);
- }
-}
-
-static void si_dpm_setup_asic(struct amdgpu_device *adev)
-{
- si_read_clock_registers(adev);
- si_enable_acpi_power_management(adev);
-}
-
-static int si_thermal_enable_alert(struct amdgpu_device *adev,
- bool enable)
-{
- u32 thermal_int = RREG32(CG_THERMAL_INT);
-
- if (enable) {
- PPSMC_Result result;
-
- thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
- WREG32(CG_THERMAL_INT, thermal_int);
- result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt);
- if (result != PPSMC_Result_OK) {
- DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
- return -EINVAL;
- }
- } else {
- thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
- WREG32(CG_THERMAL_INT, thermal_int);
- }
-
- return 0;
-}
-
-static int si_thermal_set_temperature_range(struct amdgpu_device *adev,
- int min_temp, int max_temp)
-{
- int low_temp = 0 * 1000;
- int high_temp = 255 * 1000;
-
- if (low_temp < min_temp)
- low_temp = min_temp;
- if (high_temp > max_temp)
- high_temp = max_temp;
- if (high_temp < low_temp) {
- DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
- return -EINVAL;
- }
-
- WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK);
- WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK);
- WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK);
-
- adev->pm.dpm.thermal.min_temp = low_temp;
- adev->pm.dpm.thermal.max_temp = high_temp;
-
- return 0;
-}
-
-static void si_fan_ctrl_set_static_mode(struct amdgpu_device *adev, u32 mode)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
- u32 tmp;
-
- if (si_pi->fan_ctrl_is_in_default_mode) {
- tmp = (RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT;
- si_pi->fan_ctrl_default_mode = tmp;
- tmp = (RREG32(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT;
- si_pi->t_min = tmp;
- si_pi->fan_ctrl_is_in_default_mode = false;
- }
-
- tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK;
- tmp |= TMIN(0);
- WREG32(CG_FDO_CTRL2, tmp);
-
- tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
- tmp |= FDO_PWM_MODE(mode);
- WREG32(CG_FDO_CTRL2, tmp);
-}
-
-static int si_thermal_setup_fan_table(struct amdgpu_device *adev)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
- PP_SIslands_FanTable fan_table = { FDO_MODE_HARDWARE };
- u32 duty100;
- u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
- u16 fdo_min, slope1, slope2;
- u32 reference_clock, tmp;
- int ret;
- u64 tmp64;
-
- if (!si_pi->fan_table_start) {
- adev->pm.dpm.fan.ucode_fan_control = false;
- return 0;
- }
-
- duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
-
- if (duty100 == 0) {
- adev->pm.dpm.fan.ucode_fan_control = false;
- return 0;
- }
-
- tmp64 = (u64)adev->pm.dpm.fan.pwm_min * duty100;
- do_div(tmp64, 10000);
- fdo_min = (u16)tmp64;
-
- t_diff1 = adev->pm.dpm.fan.t_med - adev->pm.dpm.fan.t_min;
- t_diff2 = adev->pm.dpm.fan.t_high - adev->pm.dpm.fan.t_med;
-
- pwm_diff1 = adev->pm.dpm.fan.pwm_med - adev->pm.dpm.fan.pwm_min;
- pwm_diff2 = adev->pm.dpm.fan.pwm_high - adev->pm.dpm.fan.pwm_med;
-
- slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
- slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
-
- fan_table.temp_min = cpu_to_be16((50 + adev->pm.dpm.fan.t_min) / 100);
- fan_table.temp_med = cpu_to_be16((50 + adev->pm.dpm.fan.t_med) / 100);
- fan_table.temp_max = cpu_to_be16((50 + adev->pm.dpm.fan.t_max) / 100);
- fan_table.slope1 = cpu_to_be16(slope1);
- fan_table.slope2 = cpu_to_be16(slope2);
- fan_table.fdo_min = cpu_to_be16(fdo_min);
- fan_table.hys_down = cpu_to_be16(adev->pm.dpm.fan.t_hyst);
- fan_table.hys_up = cpu_to_be16(1);
- fan_table.hys_slope = cpu_to_be16(1);
- fan_table.temp_resp_lim = cpu_to_be16(5);
- reference_clock = amdgpu_asic_get_xclk(adev);
-
- fan_table.refresh_period = cpu_to_be32((adev->pm.dpm.fan.cycle_delay *
- reference_clock) / 1600);
- fan_table.fdo_max = cpu_to_be16((u16)duty100);
-
- tmp = (RREG32(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT;
- fan_table.temp_src = (uint8_t)tmp;
-
- ret = amdgpu_si_copy_bytes_to_smc(adev,
- si_pi->fan_table_start,
- (u8 *)(&fan_table),
- sizeof(fan_table),
- si_pi->sram_end);
-
- if (ret) {
- DRM_ERROR("Failed to load fan table to the SMC.");
- adev->pm.dpm.fan.ucode_fan_control = false;
- }
-
- return ret;
-}
-
-static int si_fan_ctrl_start_smc_fan_control(struct amdgpu_device *adev)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
- PPSMC_Result ret;
-
- ret = amdgpu_si_send_msg_to_smc(adev, PPSMC_StartFanControl);
- if (ret == PPSMC_Result_OK) {
- si_pi->fan_is_controlled_by_smc = true;
- return 0;
- } else {
- return -EINVAL;
- }
-}
-
-static int si_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
- PPSMC_Result ret;
-
- ret = amdgpu_si_send_msg_to_smc(adev, PPSMC_StopFanControl);
-
- if (ret == PPSMC_Result_OK) {
- si_pi->fan_is_controlled_by_smc = false;
- return 0;
- } else {
- return -EINVAL;
- }
-}
-
-static int si_dpm_get_fan_speed_percent(void *handle,
- u32 *speed)
-{
- u32 duty, duty100;
- u64 tmp64;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->pm.no_fan)
- return -ENOENT;
-
- duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
- duty = (RREG32(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT;
-
- if (duty100 == 0)
- return -EINVAL;
-
- tmp64 = (u64)duty * 100;
- do_div(tmp64, duty100);
- *speed = (u32)tmp64;
-
- if (*speed > 100)
- *speed = 100;
-
- return 0;
-}
-
-static int si_dpm_set_fan_speed_percent(void *handle,
- u32 speed)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct si_power_info *si_pi = si_get_pi(adev);
- u32 tmp;
- u32 duty, duty100;
- u64 tmp64;
-
- if (adev->pm.no_fan)
- return -ENOENT;
-
- if (si_pi->fan_is_controlled_by_smc)
- return -EINVAL;
-
- if (speed > 100)
- return -EINVAL;
-
- duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
-
- if (duty100 == 0)
- return -EINVAL;
-
- tmp64 = (u64)speed * duty100;
- do_div(tmp64, 100);
- duty = (u32)tmp64;
-
- tmp = RREG32(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK;
- tmp |= FDO_STATIC_DUTY(duty);
- WREG32(CG_FDO_CTRL0, tmp);
-
- return 0;
-}
-
-static void si_dpm_set_fan_control_mode(void *handle, u32 mode)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (mode) {
- /* stop auto-manage */
- if (adev->pm.dpm.fan.ucode_fan_control)
- si_fan_ctrl_stop_smc_fan_control(adev);
- si_fan_ctrl_set_static_mode(adev, mode);
- } else {
- /* restart auto-manage */
- if (adev->pm.dpm.fan.ucode_fan_control)
- si_thermal_start_smc_fan_control(adev);
- else
- si_fan_ctrl_set_default_mode(adev);
- }
-}
-
-static u32 si_dpm_get_fan_control_mode(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct si_power_info *si_pi = si_get_pi(adev);
- u32 tmp;
-
- if (si_pi->fan_is_controlled_by_smc)
- return 0;
-
- tmp = RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK;
- return (tmp >> FDO_PWM_MODE_SHIFT);
-}
-
-#if 0
-static int si_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device *adev,
- u32 *speed)
-{
- u32 tach_period;
- u32 xclk = amdgpu_asic_get_xclk(adev);
-
- if (adev->pm.no_fan)
- return -ENOENT;
-
- if (adev->pm.fan_pulses_per_revolution == 0)
- return -ENOENT;
-
- tach_period = (RREG32(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT;
- if (tach_period == 0)
- return -ENOENT;
-
- *speed = 60 * xclk * 10000 / tach_period;
-
- return 0;
-}
-
-static int si_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device *adev,
- u32 speed)
-{
- u32 tach_period, tmp;
- u32 xclk = amdgpu_asic_get_xclk(adev);
-
- if (adev->pm.no_fan)
- return -ENOENT;
-
- if (adev->pm.fan_pulses_per_revolution == 0)
- return -ENOENT;
-
- if ((speed < adev->pm.fan_min_rpm) ||
- (speed > adev->pm.fan_max_rpm))
- return -EINVAL;
-
- if (adev->pm.dpm.fan.ucode_fan_control)
- si_fan_ctrl_stop_smc_fan_control(adev);
-
- tach_period = 60 * xclk * 10000 / (8 * speed);
- tmp = RREG32(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK;
- tmp |= TARGET_PERIOD(tach_period);
- WREG32(CG_TACH_CTRL, tmp);
-
- si_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC_RPM);
-
- return 0;
-}
-#endif
-
-static void si_fan_ctrl_set_default_mode(struct amdgpu_device *adev)
-{
- struct si_power_info *si_pi = si_get_pi(adev);
- u32 tmp;
-
- if (!si_pi->fan_ctrl_is_in_default_mode) {
- tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
- tmp |= FDO_PWM_MODE(si_pi->fan_ctrl_default_mode);
- WREG32(CG_FDO_CTRL2, tmp);
-
- tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK;
- tmp |= TMIN(si_pi->t_min);
- WREG32(CG_FDO_CTRL2, tmp);
- si_pi->fan_ctrl_is_in_default_mode = true;
- }
-}
-
-static void si_thermal_start_smc_fan_control(struct amdgpu_device *adev)
-{
- if (adev->pm.dpm.fan.ucode_fan_control) {
- si_fan_ctrl_start_smc_fan_control(adev);
- si_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC);
- }
-}
-
-static void si_thermal_initialize(struct amdgpu_device *adev)
-{
- u32 tmp;
-
- if (adev->pm.fan_pulses_per_revolution) {
- tmp = RREG32(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK;
- tmp |= EDGE_PER_REV(adev->pm.fan_pulses_per_revolution -1);
- WREG32(CG_TACH_CTRL, tmp);
- }
-
- tmp = RREG32(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK;
- tmp |= TACH_PWM_RESP_RATE(0x28);
- WREG32(CG_FDO_CTRL2, tmp);
-}
-
-static int si_thermal_start_thermal_controller(struct amdgpu_device *adev)
-{
- int ret;
-
- si_thermal_initialize(adev);
- ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
- if (ret)
- return ret;
- ret = si_thermal_enable_alert(adev, true);
- if (ret)
- return ret;
- if (adev->pm.dpm.fan.ucode_fan_control) {
- ret = si_halt_smc(adev);
- if (ret)
- return ret;
- ret = si_thermal_setup_fan_table(adev);
- if (ret)
- return ret;
- ret = si_resume_smc(adev);
- if (ret)
- return ret;
- si_thermal_start_smc_fan_control(adev);
- }
-
- return 0;
-}
-
-static void si_thermal_stop_thermal_controller(struct amdgpu_device *adev)
-{
- if (!adev->pm.no_fan) {
- si_fan_ctrl_set_default_mode(adev);
- si_fan_ctrl_stop_smc_fan_control(adev);
- }
-}
-
-static int si_dpm_enable(struct amdgpu_device *adev)
-{
- struct rv7xx_power_info *pi = rv770_get_pi(adev);
- struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
- struct si_power_info *si_pi = si_get_pi(adev);
- struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
- int ret;
-
- if (amdgpu_si_is_smc_running(adev))
- return -EINVAL;
- if (pi->voltage_control || si_pi->voltage_control_svi2)
- si_enable_voltage_control(adev, true);
- if (pi->mvdd_control)
- si_get_mvdd_configuration(adev);
- if (pi->voltage_control || si_pi->voltage_control_svi2) {
- ret = si_construct_voltage_tables(adev);
- if (ret) {
- DRM_ERROR("si_construct_voltage_tables failed\n");
- return ret;
- }
- }
- if (eg_pi->dynamic_ac_timing) {
- ret = si_initialize_mc_reg_table(adev);
- if (ret)
- eg_pi->dynamic_ac_timing = false;
- }
- if (pi->dynamic_ss)
- si_enable_spread_spectrum(adev, true);
- if (pi->thermal_protection)
- si_enable_thermal_protection(adev, true);
- si_setup_bsp(adev);
- si_program_git(adev);
- si_program_tp(adev);
- si_program_tpp(adev);
- si_program_sstp(adev);
- si_enable_display_gap(adev);
- si_program_vc(adev);
- ret = si_upload_firmware(adev);
- if (ret) {
- DRM_ERROR("si_upload_firmware failed\n");
- return ret;
- }
- ret = si_process_firmware_header(adev);
- if (ret) {
- DRM_ERROR("si_process_firmware_header failed\n");
- return ret;
- }
- ret = si_initial_switch_from_arb_f0_to_f1(adev);
- if (ret) {
- DRM_ERROR("si_initial_switch_from_arb_f0_to_f1 failed\n");
- return ret;
- }
- ret = si_init_smc_table(adev);
- if (ret) {
- DRM_ERROR("si_init_smc_table failed\n");
- return ret;
- }
- ret = si_init_smc_spll_table(adev);
- if (ret) {
- DRM_ERROR("si_init_smc_spll_table failed\n");
- return ret;
- }
- ret = si_init_arb_table_index(adev);
- if (ret) {
- DRM_ERROR("si_init_arb_table_index failed\n");
- return ret;
- }
- if (eg_pi->dynamic_ac_timing) {
- ret = si_populate_mc_reg_table(adev, boot_ps);
- if (ret) {
- DRM_ERROR("si_populate_mc_reg_table failed\n");
- return ret;
- }
- }
- ret = si_initialize_smc_cac_tables(adev);
- if (ret) {
- DRM_ERROR("si_initialize_smc_cac_tables failed\n");
- return ret;
- }
- ret = si_initialize_hardware_cac_manager(adev);
- if (ret) {
- DRM_ERROR("si_initialize_hardware_cac_manager failed\n");
- return ret;
- }
- ret = si_initialize_smc_dte_tables(adev);
- if (ret) {
- DRM_ERROR("si_initialize_smc_dte_tables failed\n");
- return ret;
- }
- ret = si_populate_smc_tdp_limits(adev, boot_ps);
- if (ret) {
- DRM_ERROR("si_populate_smc_tdp_limits failed\n");
- return ret;
- }
- ret = si_populate_smc_tdp_limits_2(adev, boot_ps);
- if (ret) {
- DRM_ERROR("si_populate_smc_tdp_limits_2 failed\n");
- return ret;
- }
- si_program_response_times(adev);
- si_program_ds_registers(adev);
- si_dpm_start_smc(adev);
- ret = si_notify_smc_display_change(adev, false);
- if (ret) {
- DRM_ERROR("si_notify_smc_display_change failed\n");
- return ret;
- }
- si_enable_sclk_control(adev, true);
- si_start_dpm(adev);
-
- si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
- si_thermal_start_thermal_controller(adev);
-
- return 0;
-}
-
-static int si_set_temperature_range(struct amdgpu_device *adev)
-{
- int ret;
-
- ret = si_thermal_enable_alert(adev, false);
- if (ret)
- return ret;
- ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
- if (ret)
- return ret;
- ret = si_thermal_enable_alert(adev, true);
- if (ret)
- return ret;
-
- return ret;
-}
-
-static void si_dpm_disable(struct amdgpu_device *adev)
-{
- struct rv7xx_power_info *pi = rv770_get_pi(adev);
- struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
-
- if (!amdgpu_si_is_smc_running(adev))
- return;
- si_thermal_stop_thermal_controller(adev);
- si_disable_ulv(adev);
- si_clear_vc(adev);
- if (pi->thermal_protection)
- si_enable_thermal_protection(adev, false);
- si_enable_power_containment(adev, boot_ps, false);
- si_enable_smc_cac(adev, boot_ps, false);
- si_enable_spread_spectrum(adev, false);
- si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
- si_stop_dpm(adev);
- si_reset_to_default(adev);
- si_dpm_stop_smc(adev);
- si_force_switch_to_arb_f0(adev);
-
- ni_update_current_ps(adev, boot_ps);
-}
-
-static int si_dpm_pre_set_power_state(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
- struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
- struct amdgpu_ps *new_ps = &requested_ps;
-
- ni_update_requested_ps(adev, new_ps);
- si_apply_state_adjust_rules(adev, &eg_pi->requested_rps);
-
- return 0;
-}
-
-static int si_power_control_set_level(struct amdgpu_device *adev)
-{
- struct amdgpu_ps *new_ps = adev->pm.dpm.requested_ps;
- int ret;
-
- ret = si_restrict_performance_levels_before_switch(adev);
- if (ret)
- return ret;
- ret = si_halt_smc(adev);
- if (ret)
- return ret;
- ret = si_populate_smc_tdp_limits(adev, new_ps);
- if (ret)
- return ret;
- ret = si_populate_smc_tdp_limits_2(adev, new_ps);
- if (ret)
- return ret;
- ret = si_resume_smc(adev);
- if (ret)
- return ret;
- ret = si_set_sw_state(adev);
- if (ret)
- return ret;
- return 0;
-}
-
-static void si_set_vce_clock(struct amdgpu_device *adev,
- struct amdgpu_ps *new_rps,
- struct amdgpu_ps *old_rps)
-{
- if ((old_rps->evclk != new_rps->evclk) ||
- (old_rps->ecclk != new_rps->ecclk)) {
- /* Turn the clocks on when encoding, off otherwise */
- if (new_rps->evclk || new_rps->ecclk) {
- /* Place holder for future VCE1.0 porting to amdgpu
- vce_v1_0_enable_mgcg(adev, false, false);*/
- } else {
- /* Place holder for future VCE1.0 porting to amdgpu
- vce_v1_0_enable_mgcg(adev, true, false);
- amdgpu_asic_set_vce_clocks(adev, new_rps->evclk, new_rps->ecclk);*/
- }
- }
-}
-
-static int si_dpm_set_power_state(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
- struct amdgpu_ps *new_ps = &eg_pi->requested_rps;
- struct amdgpu_ps *old_ps = &eg_pi->current_rps;
- int ret;
-
- ret = si_disable_ulv(adev);
- if (ret) {
- DRM_ERROR("si_disable_ulv failed\n");
- return ret;
- }
- ret = si_restrict_performance_levels_before_switch(adev);
- if (ret) {
- DRM_ERROR("si_restrict_performance_levels_before_switch failed\n");
- return ret;
- }
- if (eg_pi->pcie_performance_request)
- si_request_link_speed_change_before_state_change(adev, new_ps, old_ps);
- ni_set_uvd_clock_before_set_eng_clock(adev, new_ps, old_ps);
- ret = si_enable_power_containment(adev, new_ps, false);
- if (ret) {
- DRM_ERROR("si_enable_power_containment failed\n");
- return ret;
- }
- ret = si_enable_smc_cac(adev, new_ps, false);
- if (ret) {
- DRM_ERROR("si_enable_smc_cac failed\n");
- return ret;
- }
- ret = si_halt_smc(adev);
- if (ret) {
- DRM_ERROR("si_halt_smc failed\n");
- return ret;
- }
- ret = si_upload_sw_state(adev, new_ps);
- if (ret) {
- DRM_ERROR("si_upload_sw_state failed\n");
- return ret;
- }
- ret = si_upload_smc_data(adev);
- if (ret) {
- DRM_ERROR("si_upload_smc_data failed\n");
- return ret;
- }
- ret = si_upload_ulv_state(adev);
- if (ret) {
- DRM_ERROR("si_upload_ulv_state failed\n");
- return ret;
- }
- if (eg_pi->dynamic_ac_timing) {
- ret = si_upload_mc_reg_table(adev, new_ps);
- if (ret) {
- DRM_ERROR("si_upload_mc_reg_table failed\n");
- return ret;
- }
- }
- ret = si_program_memory_timing_parameters(adev, new_ps);
- if (ret) {
- DRM_ERROR("si_program_memory_timing_parameters failed\n");
- return ret;
- }
- si_set_pcie_lane_width_in_smc(adev, new_ps, old_ps);
-
- ret = si_resume_smc(adev);
- if (ret) {
- DRM_ERROR("si_resume_smc failed\n");
- return ret;
- }
- ret = si_set_sw_state(adev);
- if (ret) {
- DRM_ERROR("si_set_sw_state failed\n");
- return ret;
- }
- ni_set_uvd_clock_after_set_eng_clock(adev, new_ps, old_ps);
- si_set_vce_clock(adev, new_ps, old_ps);
- if (eg_pi->pcie_performance_request)
- si_notify_link_speed_change_after_state_change(adev, new_ps, old_ps);
- ret = si_set_power_state_conditionally_enable_ulv(adev, new_ps);
- if (ret) {
- DRM_ERROR("si_set_power_state_conditionally_enable_ulv failed\n");
- return ret;
- }
- ret = si_enable_smc_cac(adev, new_ps, true);
- if (ret) {
- DRM_ERROR("si_enable_smc_cac failed\n");
- return ret;
- }
- ret = si_enable_power_containment(adev, new_ps, true);
- if (ret) {
- DRM_ERROR("si_enable_power_containment failed\n");
- return ret;
- }
-
- ret = si_power_control_set_level(adev);
- if (ret) {
- DRM_ERROR("si_power_control_set_level failed\n");
- return ret;
- }
-
- return 0;
-}
-
-static void si_dpm_post_set_power_state(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
- struct amdgpu_ps *new_ps = &eg_pi->requested_rps;
-
- ni_update_current_ps(adev, new_ps);
-}
-
-#if 0
-void si_dpm_reset_asic(struct amdgpu_device *adev)
-{
- si_restrict_performance_levels_before_switch(adev);
- si_disable_ulv(adev);
- si_set_boot_state(adev);
-}
-#endif
-
-static void si_dpm_display_configuration_changed(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- si_program_display_gap(adev);
-}
-
-
-static void si_parse_pplib_non_clock_info(struct amdgpu_device *adev,
- struct amdgpu_ps *rps,
- struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
- u8 table_rev)
-{
- rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
- rps->class = le16_to_cpu(non_clock_info->usClassification);
- rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
-
- if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
- rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
- rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
- } else if (r600_is_uvd_state(rps->class, rps->class2)) {
- rps->vclk = RV770_DEFAULT_VCLK_FREQ;
- rps->dclk = RV770_DEFAULT_DCLK_FREQ;
- } else {
- rps->vclk = 0;
- rps->dclk = 0;
- }
-
- if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
- adev->pm.dpm.boot_ps = rps;
- if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
- adev->pm.dpm.uvd_ps = rps;
-}
-
-static void si_parse_pplib_clock_info(struct amdgpu_device *adev,
- struct amdgpu_ps *rps, int index,
- union pplib_clock_info *clock_info)
-{
- struct rv7xx_power_info *pi = rv770_get_pi(adev);
- struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
- struct si_power_info *si_pi = si_get_pi(adev);
- struct si_ps *ps = si_get_ps(rps);
- u16 leakage_voltage;
- struct rv7xx_pl *pl = &ps->performance_levels[index];
- int ret;
-
- ps->performance_level_count = index + 1;
-
- pl->sclk = le16_to_cpu(clock_info->si.usEngineClockLow);
- pl->sclk |= clock_info->si.ucEngineClockHigh << 16;
- pl->mclk = le16_to_cpu(clock_info->si.usMemoryClockLow);
- pl->mclk |= clock_info->si.ucMemoryClockHigh << 16;
-
- pl->vddc = le16_to_cpu(clock_info->si.usVDDC);
- pl->vddci = le16_to_cpu(clock_info->si.usVDDCI);
- pl->flags = le32_to_cpu(clock_info->si.ulFlags);
- pl->pcie_gen = amdgpu_get_pcie_gen_support(adev,
- si_pi->sys_pcie_mask,
- si_pi->boot_pcie_gen,
- clock_info->si.ucPCIEGen);
-
- /* patch up vddc if necessary */
- ret = si_get_leakage_voltage_from_leakage_index(adev, pl->vddc,
- &leakage_voltage);
- if (ret == 0)
- pl->vddc = leakage_voltage;
-
- if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
- pi->acpi_vddc = pl->vddc;
- eg_pi->acpi_vddci = pl->vddci;
- si_pi->acpi_pcie_gen = pl->pcie_gen;
- }
-
- if ((rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) &&
- index == 0) {
- /* XXX disable for A0 tahiti */
- si_pi->ulv.supported = false;
- si_pi->ulv.pl = *pl;
- si_pi->ulv.one_pcie_lane_in_ulv = false;
- si_pi->ulv.volt_change_delay = SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT;
- si_pi->ulv.cg_ulv_parameter = SISLANDS_CGULVPARAMETER_DFLT;
- si_pi->ulv.cg_ulv_control = SISLANDS_CGULVCONTROL_DFLT;
- }
-
- if (pi->min_vddc_in_table > pl->vddc)
- pi->min_vddc_in_table = pl->vddc;
-
- if (pi->max_vddc_in_table < pl->vddc)
- pi->max_vddc_in_table = pl->vddc;
-
- /* patch up boot state */
- if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
- u16 vddc, vddci, mvdd;
- amdgpu_atombios_get_default_voltages(adev, &vddc, &vddci, &mvdd);
- pl->mclk = adev->clock.default_mclk;
- pl->sclk = adev->clock.default_sclk;
- pl->vddc = vddc;
- pl->vddci = vddci;
- si_pi->mvdd_bootup_value = mvdd;
- }
-
- if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
- ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
- adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = pl->sclk;
- adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = pl->mclk;
- adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = pl->vddc;
- adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = pl->vddci;
- }
-}
-
-union pplib_power_state {
- struct _ATOM_PPLIB_STATE v1;
- struct _ATOM_PPLIB_STATE_V2 v2;
-};
-
-static int si_parse_power_table(struct amdgpu_device *adev)
-{
- struct amdgpu_mode_info *mode_info = &adev->mode_info;
- struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
- union pplib_power_state *power_state;
- int i, j, k, non_clock_array_index, clock_array_index;
- union pplib_clock_info *clock_info;
- struct _StateArray *state_array;
- struct _ClockInfoArray *clock_info_array;
- struct _NonClockInfoArray *non_clock_info_array;
- union power_info *power_info;
- int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
- u16 data_offset;
- u8 frev, crev;
- u8 *power_state_offset;
- struct si_ps *ps;
-
- if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
- &frev, &crev, &data_offset))
- return -EINVAL;
- power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
-
- amdgpu_add_thermal_controller(adev);
-
- state_array = (struct _StateArray *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib.usStateArrayOffset));
- clock_info_array = (struct _ClockInfoArray *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
- non_clock_info_array = (struct _NonClockInfoArray *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
-
- adev->pm.dpm.ps = kcalloc(state_array->ucNumEntries,
- sizeof(struct amdgpu_ps),
- GFP_KERNEL);
- if (!adev->pm.dpm.ps)
- return -ENOMEM;
- power_state_offset = (u8 *)state_array->states;
- for (i = 0; i < state_array->ucNumEntries; i++) {
- u8 *idx;
- power_state = (union pplib_power_state *)power_state_offset;
- non_clock_array_index = power_state->v2.nonClockInfoIndex;
- non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
- &non_clock_info_array->nonClockInfo[non_clock_array_index];
- ps = kzalloc(sizeof(struct si_ps), GFP_KERNEL);
- if (ps == NULL) {
- kfree(adev->pm.dpm.ps);
- return -ENOMEM;
- }
- adev->pm.dpm.ps[i].ps_priv = ps;
- si_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
- non_clock_info,
- non_clock_info_array->ucEntrySize);
- k = 0;
- idx = (u8 *)&power_state->v2.clockInfoIndex[0];
- for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
- clock_array_index = idx[j];
- if (clock_array_index >= clock_info_array->ucNumEntries)
- continue;
- if (k >= SISLANDS_MAX_HARDWARE_POWERLEVELS)
- break;
- clock_info = (union pplib_clock_info *)
- ((u8 *)&clock_info_array->clockInfo[0] +
- (clock_array_index * clock_info_array->ucEntrySize));
- si_parse_pplib_clock_info(adev,
- &adev->pm.dpm.ps[i], k,
- clock_info);
- k++;
- }
- power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
- }
- adev->pm.dpm.num_ps = state_array->ucNumEntries;
-
- /* fill in the vce power states */
- for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
- u32 sclk, mclk;
- clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
- clock_info = (union pplib_clock_info *)
- &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
- sclk = le16_to_cpu(clock_info->si.usEngineClockLow);
- sclk |= clock_info->si.ucEngineClockHigh << 16;
- mclk = le16_to_cpu(clock_info->si.usMemoryClockLow);
- mclk |= clock_info->si.ucMemoryClockHigh << 16;
- adev->pm.dpm.vce_states[i].sclk = sclk;
- adev->pm.dpm.vce_states[i].mclk = mclk;
- }
-
- return 0;
-}
-
-static int si_dpm_init(struct amdgpu_device *adev)
-{
- struct rv7xx_power_info *pi;
- struct evergreen_power_info *eg_pi;
- struct ni_power_info *ni_pi;
- struct si_power_info *si_pi;
- struct atom_clock_dividers dividers;
- int ret;
-
- si_pi = kzalloc(sizeof(struct si_power_info), GFP_KERNEL);
- if (si_pi == NULL)
- return -ENOMEM;
- adev->pm.dpm.priv = si_pi;
- ni_pi = &si_pi->ni;
- eg_pi = &ni_pi->eg;
- pi = &eg_pi->rv7xx;
-
- si_pi->sys_pcie_mask =
- adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK;
- si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
- si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev);
-
- si_set_max_cu_value(adev);
-
- rv770_get_max_vddc(adev);
- si_get_leakage_vddc(adev);
- si_patch_dependency_tables_based_on_leakage(adev);
-
- pi->acpi_vddc = 0;
- eg_pi->acpi_vddci = 0;
- pi->min_vddc_in_table = 0;
- pi->max_vddc_in_table = 0;
-
- ret = amdgpu_get_platform_caps(adev);
- if (ret)
- return ret;
-
- ret = amdgpu_parse_extended_power_table(adev);
- if (ret)
- return ret;
-
- ret = si_parse_power_table(adev);
- if (ret)
- return ret;
-
- adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
- kcalloc(4,
- sizeof(struct amdgpu_clock_voltage_dependency_entry),
- GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
- amdgpu_free_extended_power_table(adev);
- return -ENOMEM;
- }
- adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
- adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
- adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
- adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
- adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
- adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
- adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
- adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
- adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
-
- if (adev->pm.dpm.voltage_response_time == 0)
- adev->pm.dpm.voltage_response_time = R600_VOLTAGERESPONSETIME_DFLT;
- if (adev->pm.dpm.backbias_response_time == 0)
- adev->pm.dpm.backbias_response_time = R600_BACKBIASRESPONSETIME_DFLT;
-
- ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
- 0, false, &dividers);
- if (ret)
- pi->ref_div = dividers.ref_div + 1;
- else
- pi->ref_div = R600_REFERENCEDIVIDER_DFLT;
-
- eg_pi->smu_uvd_hs = false;
-
- pi->mclk_strobe_mode_threshold = 40000;
- if (si_is_special_1gb_platform(adev))
- pi->mclk_stutter_mode_threshold = 0;
- else
- pi->mclk_stutter_mode_threshold = pi->mclk_strobe_mode_threshold;
- pi->mclk_edc_enable_threshold = 40000;
- eg_pi->mclk_edc_wr_enable_threshold = 40000;
-
- ni_pi->mclk_rtt_mode_threshold = eg_pi->mclk_edc_wr_enable_threshold;
-
- pi->voltage_control =
- amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC,
- VOLTAGE_OBJ_GPIO_LUT);
- if (!pi->voltage_control) {
- si_pi->voltage_control_svi2 =
- amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC,
- VOLTAGE_OBJ_SVID2);
- if (si_pi->voltage_control_svi2)
- amdgpu_atombios_get_svi2_info(adev, SET_VOLTAGE_TYPE_ASIC_VDDC,
- &si_pi->svd_gpio_id, &si_pi->svc_gpio_id);
- }
-
- pi->mvdd_control =
- amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_MVDDC,
- VOLTAGE_OBJ_GPIO_LUT);
-
- eg_pi->vddci_control =
- amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDCI,
- VOLTAGE_OBJ_GPIO_LUT);
- if (!eg_pi->vddci_control)
- si_pi->vddci_control_svi2 =
- amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDCI,
- VOLTAGE_OBJ_SVID2);
-
- si_pi->vddc_phase_shed_control =
- amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC,
- VOLTAGE_OBJ_PHASE_LUT);
-
- rv770_get_engine_memory_ss(adev);
-
- pi->asi = RV770_ASI_DFLT;
- pi->pasi = CYPRESS_HASI_DFLT;
- pi->vrc = SISLANDS_VRC_DFLT;
-
- pi->gfx_clock_gating = true;
-
- eg_pi->sclk_deep_sleep = true;
- si_pi->sclk_deep_sleep_above_low = false;
-
- if (adev->pm.int_thermal_type != THERMAL_TYPE_NONE)
- pi->thermal_protection = true;
- else
- pi->thermal_protection = false;
-
- eg_pi->dynamic_ac_timing = true;
-
- eg_pi->light_sleep = true;
-#if defined(CONFIG_ACPI)
- eg_pi->pcie_performance_request =
- amdgpu_acpi_is_pcie_performance_request_supported(adev);
-#else
- eg_pi->pcie_performance_request = false;
-#endif
-
- si_pi->sram_end = SMC_RAM_END;
-
- adev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
- adev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
- adev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
- adev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
- adev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
- adev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
- adev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
-
- si_initialize_powertune_defaults(adev);
-
- /* make sure dc limits are valid */
- if ((adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
- (adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
- adev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
- adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
-
- si_pi->fan_ctrl_is_in_default_mode = true;
-
- return 0;
-}
-
-static void si_dpm_fini(struct amdgpu_device *adev)
-{
- int i;
-
- if (adev->pm.dpm.ps)
- for (i = 0; i < adev->pm.dpm.num_ps; i++)
- kfree(adev->pm.dpm.ps[i].ps_priv);
- kfree(adev->pm.dpm.ps);
- kfree(adev->pm.dpm.priv);
- kfree(adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
- amdgpu_free_extended_power_table(adev);
-}
-
-static void si_dpm_debugfs_print_current_performance_level(void *handle,
- struct seq_file *m)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
- struct amdgpu_ps *rps = &eg_pi->current_rps;
- struct si_ps *ps = si_get_ps(rps);
- struct rv7xx_pl *pl;
- u32 current_index =
- (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >>
- CURRENT_STATE_INDEX_SHIFT;
-
- if (current_index >= ps->performance_level_count) {
- seq_printf(m, "invalid dpm profile %d\n", current_index);
- } else {
- pl = &ps->performance_levels[current_index];
- seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
- seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
- current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1);
- }
-}
-
-static int si_dpm_set_interrupt_state(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- unsigned type,
- enum amdgpu_interrupt_state state)
-{
- u32 cg_thermal_int;
-
- switch (type) {
- case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH:
- switch (state) {
- case AMDGPU_IRQ_STATE_DISABLE:
- cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
- cg_thermal_int |= THERM_INT_MASK_HIGH;
- WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
- break;
- case AMDGPU_IRQ_STATE_ENABLE:
- cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
- cg_thermal_int &= ~THERM_INT_MASK_HIGH;
- WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
- break;
- default:
- break;
- }
- break;
-
- case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW:
- switch (state) {
- case AMDGPU_IRQ_STATE_DISABLE:
- cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
- cg_thermal_int |= THERM_INT_MASK_LOW;
- WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
- break;
- case AMDGPU_IRQ_STATE_ENABLE:
- cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
- cg_thermal_int &= ~THERM_INT_MASK_LOW;
- WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
- break;
- default:
- break;
- }
- break;
-
- default:
- break;
- }
- return 0;
-}
-
-static int si_dpm_process_interrupt(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
-{
- bool queue_thermal = false;
-
- if (entry == NULL)
- return -EINVAL;
-
- switch (entry->src_id) {
- case 230: /* thermal low to high */
- DRM_DEBUG("IH: thermal low to high\n");
- adev->pm.dpm.thermal.high_to_low = false;
- queue_thermal = true;
- break;
- case 231: /* thermal high to low */
- DRM_DEBUG("IH: thermal high to low\n");
- adev->pm.dpm.thermal.high_to_low = true;
- queue_thermal = true;
- break;
- default:
- break;
- }
-
- if (queue_thermal)
- schedule_work(&adev->pm.dpm.thermal.work);
-
- return 0;
-}
-
-static int si_dpm_late_init(void *handle)
-{
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (!adev->pm.dpm_enabled)
- return 0;
-
- ret = si_set_temperature_range(adev);
- if (ret)
- return ret;
-#if 0 //TODO ?
- si_dpm_powergate_uvd(adev, true);
-#endif
- return 0;
-}
-
-/**
- * si_dpm_init_microcode - load ucode images from disk
- *
- * @adev: amdgpu_device pointer
- *
- * Use the firmware interface to load the ucode images into
- * the driver (not loaded into hw).
- * Returns 0 on success, error on failure.
- */
-static int si_dpm_init_microcode(struct amdgpu_device *adev)
-{
- const char *chip_name;
- char fw_name[30];
- int err;
-
- DRM_DEBUG("\n");
- switch (adev->asic_type) {
- case CHIP_TAHITI:
- chip_name = "tahiti";
- break;
- case CHIP_PITCAIRN:
- if ((adev->pdev->revision == 0x81) &&
- ((adev->pdev->device == 0x6810) ||
- (adev->pdev->device == 0x6811)))
- chip_name = "pitcairn_k";
- else
- chip_name = "pitcairn";
- break;
- case CHIP_VERDE:
- if (((adev->pdev->device == 0x6820) &&
- ((adev->pdev->revision == 0x81) ||
- (adev->pdev->revision == 0x83))) ||
- ((adev->pdev->device == 0x6821) &&
- ((adev->pdev->revision == 0x83) ||
- (adev->pdev->revision == 0x87))) ||
- ((adev->pdev->revision == 0x87) &&
- ((adev->pdev->device == 0x6823) ||
- (adev->pdev->device == 0x682b))))
- chip_name = "verde_k";
- else
- chip_name = "verde";
- break;
- case CHIP_OLAND:
- if (((adev->pdev->revision == 0x81) &&
- ((adev->pdev->device == 0x6600) ||
- (adev->pdev->device == 0x6604) ||
- (adev->pdev->device == 0x6605) ||
- (adev->pdev->device == 0x6610))) ||
- ((adev->pdev->revision == 0x83) &&
- (adev->pdev->device == 0x6610)))
- chip_name = "oland_k";
- else
- chip_name = "oland";
- break;
- case CHIP_HAINAN:
- if (((adev->pdev->revision == 0x81) &&
- (adev->pdev->device == 0x6660)) ||
- ((adev->pdev->revision == 0x83) &&
- ((adev->pdev->device == 0x6660) ||
- (adev->pdev->device == 0x6663) ||
- (adev->pdev->device == 0x6665) ||
- (adev->pdev->device == 0x6667))))
- chip_name = "hainan_k";
- else if ((adev->pdev->revision == 0xc3) &&
- (adev->pdev->device == 0x6665))
- chip_name = "banks_k_2";
- else
- chip_name = "hainan";
- break;
- default: BUG();
- }
-
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
- err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->pm.fw);
-
-out:
- if (err) {
- DRM_ERROR("si_smc: Failed to load firmware. err = %d\"%s\"\n",
- err, fw_name);
- release_firmware(adev->pm.fw);
- adev->pm.fw = NULL;
- }
- return err;
-
-}
-
-static int si_dpm_sw_init(void *handle)
-{
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 230, &adev->pm.dpm.thermal.irq);
- if (ret)
- return ret;
-
- ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 231, &adev->pm.dpm.thermal.irq);
- if (ret)
- return ret;
-
- /* default to balanced state */
- adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
- adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
- adev->pm.dpm.forced_level = AMD_DPM_FORCED_LEVEL_AUTO;
- adev->pm.default_sclk = adev->clock.default_sclk;
- adev->pm.default_mclk = adev->clock.default_mclk;
- adev->pm.current_sclk = adev->clock.default_sclk;
- adev->pm.current_mclk = adev->clock.default_mclk;
- adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
-
- if (amdgpu_dpm == 0)
- return 0;
-
- ret = si_dpm_init_microcode(adev);
- if (ret)
- return ret;
-
- INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
- mutex_lock(&adev->pm.mutex);
- ret = si_dpm_init(adev);
- if (ret)
- goto dpm_failed;
- adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
- if (amdgpu_dpm == 1)
- amdgpu_pm_print_power_states(adev);
- mutex_unlock(&adev->pm.mutex);
- DRM_INFO("amdgpu: dpm initialized\n");
-
- return 0;
-
-dpm_failed:
- si_dpm_fini(adev);
- mutex_unlock(&adev->pm.mutex);
- DRM_ERROR("amdgpu: dpm initialization failed\n");
- return ret;
-}
-
-static int si_dpm_sw_fini(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- flush_work(&adev->pm.dpm.thermal.work);
-
- mutex_lock(&adev->pm.mutex);
- si_dpm_fini(adev);
- mutex_unlock(&adev->pm.mutex);
-
- return 0;
-}
-
-static int si_dpm_hw_init(void *handle)
-{
- int ret;
-
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (!amdgpu_dpm)
- return 0;
-
- mutex_lock(&adev->pm.mutex);
- si_dpm_setup_asic(adev);
- ret = si_dpm_enable(adev);
- if (ret)
- adev->pm.dpm_enabled = false;
- else
- adev->pm.dpm_enabled = true;
- mutex_unlock(&adev->pm.mutex);
- amdgpu_pm_compute_clocks(adev);
- return ret;
-}
-
-static int si_dpm_hw_fini(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->pm.dpm_enabled) {
- mutex_lock(&adev->pm.mutex);
- si_dpm_disable(adev);
- mutex_unlock(&adev->pm.mutex);
- }
-
- return 0;
-}
-
-static int si_dpm_suspend(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->pm.dpm_enabled) {
- mutex_lock(&adev->pm.mutex);
- /* disable dpm */
- si_dpm_disable(adev);
- /* reset the power state */
- adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
- mutex_unlock(&adev->pm.mutex);
- }
- return 0;
-}
-
-static int si_dpm_resume(void *handle)
-{
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->pm.dpm_enabled) {
- /* asic init will reset to the boot state */
- mutex_lock(&adev->pm.mutex);
- si_dpm_setup_asic(adev);
- ret = si_dpm_enable(adev);
- if (ret)
- adev->pm.dpm_enabled = false;
- else
- adev->pm.dpm_enabled = true;
- mutex_unlock(&adev->pm.mutex);
- if (adev->pm.dpm_enabled)
- amdgpu_pm_compute_clocks(adev);
- }
- return 0;
-}
-
-static bool si_dpm_is_idle(void *handle)
-{
- /* XXX */
- return true;
-}
-
-static int si_dpm_wait_for_idle(void *handle)
-{
- /* XXX */
- return 0;
-}
-
-static int si_dpm_soft_reset(void *handle)
-{
- return 0;
-}
-
-static int si_dpm_set_clockgating_state(void *handle,
- enum amd_clockgating_state state)
-{
- return 0;
-}
-
-static int si_dpm_set_powergating_state(void *handle,
- enum amd_powergating_state state)
-{
- return 0;
-}
-
-/* get temperature in millidegrees */
-static int si_dpm_get_temp(void *handle)
-{
- u32 temp;
- int actual_temp = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- temp = (RREG32(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >>
- CTF_TEMP_SHIFT;
-
- if (temp & 0x200)
- actual_temp = 255;
- else
- actual_temp = temp & 0x1ff;
-
- actual_temp = (actual_temp * 1000);
-
- return actual_temp;
-}
-
-static u32 si_dpm_get_sclk(void *handle, bool low)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
- struct si_ps *requested_state = si_get_ps(&eg_pi->requested_rps);
-
- if (low)
- return requested_state->performance_levels[0].sclk;
- else
- return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
-}
-
-static u32 si_dpm_get_mclk(void *handle, bool low)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
- struct si_ps *requested_state = si_get_ps(&eg_pi->requested_rps);
-
- if (low)
- return requested_state->performance_levels[0].mclk;
- else
- return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
-}
-
-static void si_dpm_print_power_state(void *handle,
- void *current_ps)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_ps *rps = (struct amdgpu_ps *)current_ps;
- struct si_ps *ps = si_get_ps(rps);
- struct rv7xx_pl *pl;
- int i;
-
- amdgpu_dpm_print_class_info(rps->class, rps->class2);
- amdgpu_dpm_print_cap_info(rps->caps);
- DRM_INFO("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
- for (i = 0; i < ps->performance_level_count; i++) {
- pl = &ps->performance_levels[i];
- if (adev->asic_type >= CHIP_TAHITI)
- DRM_INFO("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
- i, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1);
- else
- DRM_INFO("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u\n",
- i, pl->sclk, pl->mclk, pl->vddc, pl->vddci);
- }
- amdgpu_dpm_print_ps_status(adev, rps);
-}
-
-static int si_dpm_early_init(void *handle)
-{
-
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- adev->powerplay.pp_funcs = &si_dpm_funcs;
- adev->powerplay.pp_handle = adev;
- si_dpm_set_irq_funcs(adev);
- return 0;
-}
-
-static inline bool si_are_power_levels_equal(const struct rv7xx_pl *si_cpl1,
- const struct rv7xx_pl *si_cpl2)
-{
- return ((si_cpl1->mclk == si_cpl2->mclk) &&
- (si_cpl1->sclk == si_cpl2->sclk) &&
- (si_cpl1->pcie_gen == si_cpl2->pcie_gen) &&
- (si_cpl1->vddc == si_cpl2->vddc) &&
- (si_cpl1->vddci == si_cpl2->vddci));
-}
-
-static int si_check_state_equal(void *handle,
- void *current_ps,
- void *request_ps,
- bool *equal)
-{
- struct si_ps *si_cps;
- struct si_ps *si_rps;
- int i;
- struct amdgpu_ps *cps = (struct amdgpu_ps *)current_ps;
- struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
- return -EINVAL;
-
- si_cps = si_get_ps((struct amdgpu_ps *)cps);
- si_rps = si_get_ps((struct amdgpu_ps *)rps);
-
- if (si_cps == NULL) {
- printk("si_cps is NULL\n");
- *equal = false;
- return 0;
- }
-
- if (si_cps->performance_level_count != si_rps->performance_level_count) {
- *equal = false;
- return 0;
- }
-
- for (i = 0; i < si_cps->performance_level_count; i++) {
- if (!si_are_power_levels_equal(&(si_cps->performance_levels[i]),
- &(si_rps->performance_levels[i]))) {
- *equal = false;
- return 0;
- }
- }
-
- /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
- *equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk));
- *equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk));
-
- return 0;
-}
-
-static int si_dpm_read_sensor(void *handle, int idx,
- void *value, int *size)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
- struct amdgpu_ps *rps = &eg_pi->current_rps;
- struct si_ps *ps = si_get_ps(rps);
- uint32_t sclk, mclk;
- u32 pl_index =
- (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >>
- CURRENT_STATE_INDEX_SHIFT;
-
- /* size must be at least 4 bytes for all sensors */
- if (*size < 4)
- return -EINVAL;
-
- switch (idx) {
- case AMDGPU_PP_SENSOR_GFX_SCLK:
- if (pl_index < ps->performance_level_count) {
- sclk = ps->performance_levels[pl_index].sclk;
- *((uint32_t *)value) = sclk;
- *size = 4;
- return 0;
- }
- return -EINVAL;
- case AMDGPU_PP_SENSOR_GFX_MCLK:
- if (pl_index < ps->performance_level_count) {
- mclk = ps->performance_levels[pl_index].mclk;
- *((uint32_t *)value) = mclk;
- *size = 4;
- return 0;
- }
- return -EINVAL;
- case AMDGPU_PP_SENSOR_GPU_TEMP:
- *((uint32_t *)value) = si_dpm_get_temp(adev);
- *size = 4;
- return 0;
- default:
- return -EINVAL;
- }
-}
-
-static const struct amd_ip_funcs si_dpm_ip_funcs = {
- .name = "si_dpm",
- .early_init = si_dpm_early_init,
- .late_init = si_dpm_late_init,
- .sw_init = si_dpm_sw_init,
- .sw_fini = si_dpm_sw_fini,
- .hw_init = si_dpm_hw_init,
- .hw_fini = si_dpm_hw_fini,
- .suspend = si_dpm_suspend,
- .resume = si_dpm_resume,
- .is_idle = si_dpm_is_idle,
- .wait_for_idle = si_dpm_wait_for_idle,
- .soft_reset = si_dpm_soft_reset,
- .set_clockgating_state = si_dpm_set_clockgating_state,
- .set_powergating_state = si_dpm_set_powergating_state,
-};
-
-const struct amdgpu_ip_block_version si_smu_ip_block =
-{
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 6,
- .minor = 0,
- .rev = 0,
- .funcs = &si_dpm_ip_funcs,
-};
-
-static const struct amd_pm_funcs si_dpm_funcs = {
- .pre_set_power_state = &si_dpm_pre_set_power_state,
- .set_power_state = &si_dpm_set_power_state,
- .post_set_power_state = &si_dpm_post_set_power_state,
- .display_configuration_changed = &si_dpm_display_configuration_changed,
- .get_sclk = &si_dpm_get_sclk,
- .get_mclk = &si_dpm_get_mclk,
- .print_power_state = &si_dpm_print_power_state,
- .debugfs_print_current_performance_level = &si_dpm_debugfs_print_current_performance_level,
- .force_performance_level = &si_dpm_force_performance_level,
- .vblank_too_short = &si_dpm_vblank_too_short,
- .set_fan_control_mode = &si_dpm_set_fan_control_mode,
- .get_fan_control_mode = &si_dpm_get_fan_control_mode,
- .set_fan_speed_percent = &si_dpm_set_fan_speed_percent,
- .get_fan_speed_percent = &si_dpm_get_fan_speed_percent,
- .check_state_equal = &si_check_state_equal,
- .get_vce_clock_state = amdgpu_get_vce_clock_state,
- .read_sensor = &si_dpm_read_sensor,
-};
-
-static const struct amdgpu_irq_src_funcs si_dpm_irq_funcs = {
- .set = si_dpm_set_interrupt_state,
- .process = si_dpm_process_interrupt,
-};
-
-static void si_dpm_set_irq_funcs(struct amdgpu_device *adev)
-{
- adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
- adev->pm.dpm.thermal.irq.funcs = &si_dpm_irq_funcs;
-}
-
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.h b/drivers/gpu/drm/amd/amdgpu/si_dpm.h
deleted file mode 100644
index bc0be6818e21..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.h
+++ /dev/null
@@ -1,1015 +0,0 @@
-/*
- * Copyright 2012 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef __SI_DPM_H__
-#define __SI_DPM_H__
-
-#include "amdgpu_atombios.h"
-#include "sislands_smc.h"
-
-#define MC_CG_CONFIG 0x96f
-#define MC_ARB_CG 0x9fa
-#define CG_ARB_REQ(x) ((x) << 0)
-#define CG_ARB_REQ_MASK (0xff << 0)
-
-#define MC_ARB_DRAM_TIMING_1 0x9fc
-#define MC_ARB_DRAM_TIMING_2 0x9fd
-#define MC_ARB_DRAM_TIMING_3 0x9fe
-#define MC_ARB_DRAM_TIMING2_1 0x9ff
-#define MC_ARB_DRAM_TIMING2_2 0xa00
-#define MC_ARB_DRAM_TIMING2_3 0xa01
-
-#define MAX_NO_OF_MVDD_VALUES 2
-#define MAX_NO_VREG_STEPS 32
-#define NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16
-#define SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE 32
-#define SMC_NISLANDS_MC_REGISTER_ARRAY_SET_COUNT 20
-#define RV770_ASI_DFLT 1000
-#define CYPRESS_HASI_DFLT 400000
-#define PCIE_PERF_REQ_PECI_GEN1 2
-#define PCIE_PERF_REQ_PECI_GEN2 3
-#define PCIE_PERF_REQ_PECI_GEN3 4
-#define RV770_DEFAULT_VCLK_FREQ 53300 /* 10 khz */
-#define RV770_DEFAULT_DCLK_FREQ 40000 /* 10 khz */
-
-#define SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE 16
-
-#define RV770_SMC_TABLE_ADDRESS 0xB000
-#define RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 3
-
-#define SMC_STROBE_RATIO 0x0F
-#define SMC_STROBE_ENABLE 0x10
-
-#define SMC_MC_EDC_RD_FLAG 0x01
-#define SMC_MC_EDC_WR_FLAG 0x02
-#define SMC_MC_RTT_ENABLE 0x04
-#define SMC_MC_STUTTER_EN 0x08
-
-#define RV770_SMC_VOLTAGEMASK_VDDC 0
-#define RV770_SMC_VOLTAGEMASK_MVDD 1
-#define RV770_SMC_VOLTAGEMASK_VDDCI 2
-#define RV770_SMC_VOLTAGEMASK_MAX 4
-
-#define NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16
-#define NISLANDS_SMC_STROBE_RATIO 0x0F
-#define NISLANDS_SMC_STROBE_ENABLE 0x10
-
-#define NISLANDS_SMC_MC_EDC_RD_FLAG 0x01
-#define NISLANDS_SMC_MC_EDC_WR_FLAG 0x02
-#define NISLANDS_SMC_MC_RTT_ENABLE 0x04
-#define NISLANDS_SMC_MC_STUTTER_EN 0x08
-
-#define MAX_NO_VREG_STEPS 32
-
-#define NISLANDS_SMC_VOLTAGEMASK_VDDC 0
-#define NISLANDS_SMC_VOLTAGEMASK_MVDD 1
-#define NISLANDS_SMC_VOLTAGEMASK_VDDCI 2
-#define NISLANDS_SMC_VOLTAGEMASK_MAX 4
-
-#define SISLANDS_MCREGISTERTABLE_INITIAL_SLOT 0
-#define SISLANDS_MCREGISTERTABLE_ACPI_SLOT 1
-#define SISLANDS_MCREGISTERTABLE_ULV_SLOT 2
-#define SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT 3
-
-#define SISLANDS_LEAKAGE_INDEX0 0xff01
-#define SISLANDS_MAX_LEAKAGE_COUNT 4
-
-#define SISLANDS_MAX_HARDWARE_POWERLEVELS 5
-#define SISLANDS_INITIAL_STATE_ARB_INDEX 0
-#define SISLANDS_ACPI_STATE_ARB_INDEX 1
-#define SISLANDS_ULV_STATE_ARB_INDEX 2
-#define SISLANDS_DRIVER_STATE_ARB_INDEX 3
-
-#define SISLANDS_DPM2_MAX_PULSE_SKIP 256
-
-#define SISLANDS_DPM2_NEAR_TDP_DEC 10
-#define SISLANDS_DPM2_ABOVE_SAFE_INC 5
-#define SISLANDS_DPM2_BELOW_SAFE_INC 20
-
-#define SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT 80
-
-#define SISLANDS_DPM2_MAXPS_PERCENT_H 99
-#define SISLANDS_DPM2_MAXPS_PERCENT_M 99
-
-#define SISLANDS_DPM2_SQ_RAMP_MAX_POWER 0x3FFF
-#define SISLANDS_DPM2_SQ_RAMP_MIN_POWER 0x12
-#define SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA 0x15
-#define SISLANDS_DPM2_SQ_RAMP_STI_SIZE 0x1E
-#define SISLANDS_DPM2_SQ_RAMP_LTI_RATIO 0xF
-
-#define SISLANDS_DPM2_PWREFFICIENCYRATIO_MARGIN 10
-
-#define SISLANDS_VRC_DFLT 0xC000B3
-#define SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT 1687
-#define SISLANDS_CGULVPARAMETER_DFLT 0x00040035
-#define SISLANDS_CGULVCONTROL_DFLT 0x1f007550
-
-#define SI_ASI_DFLT 10000
-#define SI_BSP_DFLT 0x41EB
-#define SI_BSU_DFLT 0x2
-#define SI_AH_DFLT 5
-#define SI_RLP_DFLT 25
-#define SI_RMP_DFLT 65
-#define SI_LHP_DFLT 40
-#define SI_LMP_DFLT 15
-#define SI_TD_DFLT 0
-#define SI_UTC_DFLT_00 0x24
-#define SI_UTC_DFLT_01 0x22
-#define SI_UTC_DFLT_02 0x22
-#define SI_UTC_DFLT_03 0x22
-#define SI_UTC_DFLT_04 0x22
-#define SI_UTC_DFLT_05 0x22
-#define SI_UTC_DFLT_06 0x22
-#define SI_UTC_DFLT_07 0x22
-#define SI_UTC_DFLT_08 0x22
-#define SI_UTC_DFLT_09 0x22
-#define SI_UTC_DFLT_10 0x22
-#define SI_UTC_DFLT_11 0x22
-#define SI_UTC_DFLT_12 0x22
-#define SI_UTC_DFLT_13 0x22
-#define SI_UTC_DFLT_14 0x22
-#define SI_DTC_DFLT_00 0x24
-#define SI_DTC_DFLT_01 0x22
-#define SI_DTC_DFLT_02 0x22
-#define SI_DTC_DFLT_03 0x22
-#define SI_DTC_DFLT_04 0x22
-#define SI_DTC_DFLT_05 0x22
-#define SI_DTC_DFLT_06 0x22
-#define SI_DTC_DFLT_07 0x22
-#define SI_DTC_DFLT_08 0x22
-#define SI_DTC_DFLT_09 0x22
-#define SI_DTC_DFLT_10 0x22
-#define SI_DTC_DFLT_11 0x22
-#define SI_DTC_DFLT_12 0x22
-#define SI_DTC_DFLT_13 0x22
-#define SI_DTC_DFLT_14 0x22
-#define SI_VRC_DFLT 0x0000C003
-#define SI_VOLTAGERESPONSETIME_DFLT 1000
-#define SI_BACKBIASRESPONSETIME_DFLT 1000
-#define SI_VRU_DFLT 0x3
-#define SI_SPLLSTEPTIME_DFLT 0x1000
-#define SI_SPLLSTEPUNIT_DFLT 0x3
-#define SI_TPU_DFLT 0
-#define SI_TPC_DFLT 0x200
-#define SI_SSTU_DFLT 0
-#define SI_SST_DFLT 0x00C8
-#define SI_GICST_DFLT 0x200
-#define SI_FCT_DFLT 0x0400
-#define SI_FCTU_DFLT 0
-#define SI_CTXCGTT3DRPHC_DFLT 0x20
-#define SI_CTXCGTT3DRSDC_DFLT 0x40
-#define SI_VDDC3DOORPHC_DFLT 0x100
-#define SI_VDDC3DOORSDC_DFLT 0x7
-#define SI_VDDC3DOORSU_DFLT 0
-#define SI_MPLLLOCKTIME_DFLT 100
-#define SI_MPLLRESETTIME_DFLT 150
-#define SI_VCOSTEPPCT_DFLT 20
-#define SI_ENDINGVCOSTEPPCT_DFLT 5
-#define SI_REFERENCEDIVIDER_DFLT 4
-
-#define SI_PM_NUMBER_OF_TC 15
-#define SI_PM_NUMBER_OF_SCLKS 20
-#define SI_PM_NUMBER_OF_MCLKS 4
-#define SI_PM_NUMBER_OF_VOLTAGE_LEVELS 4
-#define SI_PM_NUMBER_OF_ACTIVITY_LEVELS 3
-
-/* XXX are these ok? */
-#define SI_TEMP_RANGE_MIN (90 * 1000)
-#define SI_TEMP_RANGE_MAX (120 * 1000)
-
-#define FDO_PWM_MODE_STATIC 1
-#define FDO_PWM_MODE_STATIC_RPM 5
-
-enum ni_dc_cac_level
-{
- NISLANDS_DCCAC_LEVEL_0 = 0,
- NISLANDS_DCCAC_LEVEL_1,
- NISLANDS_DCCAC_LEVEL_2,
- NISLANDS_DCCAC_LEVEL_3,
- NISLANDS_DCCAC_LEVEL_4,
- NISLANDS_DCCAC_LEVEL_5,
- NISLANDS_DCCAC_LEVEL_6,
- NISLANDS_DCCAC_LEVEL_7,
- NISLANDS_DCCAC_MAX_LEVELS
-};
-
-enum si_cac_config_reg_type
-{
- SISLANDS_CACCONFIG_MMR = 0,
- SISLANDS_CACCONFIG_CGIND,
- SISLANDS_CACCONFIG_MAX
-};
-
-enum si_power_level {
- SI_POWER_LEVEL_LOW = 0,
- SI_POWER_LEVEL_MEDIUM = 1,
- SI_POWER_LEVEL_HIGH = 2,
- SI_POWER_LEVEL_CTXSW = 3,
-};
-
-enum si_td {
- SI_TD_AUTO,
- SI_TD_UP,
- SI_TD_DOWN,
-};
-
-enum si_display_watermark {
- SI_DISPLAY_WATERMARK_LOW = 0,
- SI_DISPLAY_WATERMARK_HIGH = 1,
-};
-
-enum si_display_gap
-{
- SI_PM_DISPLAY_GAP_VBLANK_OR_WM = 0,
- SI_PM_DISPLAY_GAP_VBLANK = 1,
- SI_PM_DISPLAY_GAP_WATERMARK = 2,
- SI_PM_DISPLAY_GAP_IGNORE = 3,
-};
-
-extern const struct amdgpu_ip_block_version si_smu_ip_block;
-
-struct ni_leakage_coeffients
-{
- u32 at;
- u32 bt;
- u32 av;
- u32 bv;
- s32 t_slope;
- s32 t_intercept;
- u32 t_ref;
-};
-
-struct SMC_Evergreen_MCRegisterAddress
-{
- uint16_t s0;
- uint16_t s1;
-};
-
-typedef struct SMC_Evergreen_MCRegisterAddress SMC_Evergreen_MCRegisterAddress;
-
-struct evergreen_mc_reg_entry {
- u32 mclk_max;
- u32 mc_data[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
-};
-
-struct evergreen_mc_reg_table {
- u8 last;
- u8 num_entries;
- u16 valid_flag;
- struct evergreen_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
- SMC_Evergreen_MCRegisterAddress mc_reg_address[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
-};
-
-struct SMC_Evergreen_MCRegisterSet
-{
- uint32_t value[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
-};
-
-typedef struct SMC_Evergreen_MCRegisterSet SMC_Evergreen_MCRegisterSet;
-
-struct SMC_Evergreen_MCRegisters
-{
- uint8_t last;
- uint8_t reserved[3];
- SMC_Evergreen_MCRegisterAddress address[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
- SMC_Evergreen_MCRegisterSet data[5];
-};
-
-typedef struct SMC_Evergreen_MCRegisters SMC_Evergreen_MCRegisters;
-
-struct SMC_NIslands_MCRegisterSet
-{
- uint32_t value[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
-};
-
-typedef struct SMC_NIslands_MCRegisterSet SMC_NIslands_MCRegisterSet;
-
-struct ni_mc_reg_entry {
- u32 mclk_max;
- u32 mc_data[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
-};
-
-struct SMC_NIslands_MCRegisterAddress
-{
- uint16_t s0;
- uint16_t s1;
-};
-
-typedef struct SMC_NIslands_MCRegisterAddress SMC_NIslands_MCRegisterAddress;
-
-struct SMC_NIslands_MCRegisters
-{
- uint8_t last;
- uint8_t reserved[3];
- SMC_NIslands_MCRegisterAddress address[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
- SMC_NIslands_MCRegisterSet data[SMC_NISLANDS_MC_REGISTER_ARRAY_SET_COUNT];
-};
-
-typedef struct SMC_NIslands_MCRegisters SMC_NIslands_MCRegisters;
-
-struct evergreen_ulv_param {
- bool supported;
- struct rv7xx_pl *pl;
-};
-
-struct evergreen_arb_registers {
- u32 mc_arb_dram_timing;
- u32 mc_arb_dram_timing2;
- u32 mc_arb_rfsh_rate;
- u32 mc_arb_burst_time;
-};
-
-struct at {
- u32 rlp;
- u32 rmp;
- u32 lhp;
- u32 lmp;
-};
-
-struct ni_clock_registers {
- u32 cg_spll_func_cntl;
- u32 cg_spll_func_cntl_2;
- u32 cg_spll_func_cntl_3;
- u32 cg_spll_func_cntl_4;
- u32 cg_spll_spread_spectrum;
- u32 cg_spll_spread_spectrum_2;
- u32 mclk_pwrmgt_cntl;
- u32 dll_cntl;
- u32 mpll_ad_func_cntl;
- u32 mpll_ad_func_cntl_2;
- u32 mpll_dq_func_cntl;
- u32 mpll_dq_func_cntl_2;
- u32 mpll_ss1;
- u32 mpll_ss2;
-};
-
-struct RV770_SMC_SCLK_VALUE
-{
- uint32_t vCG_SPLL_FUNC_CNTL;
- uint32_t vCG_SPLL_FUNC_CNTL_2;
- uint32_t vCG_SPLL_FUNC_CNTL_3;
- uint32_t vCG_SPLL_SPREAD_SPECTRUM;
- uint32_t vCG_SPLL_SPREAD_SPECTRUM_2;
- uint32_t sclk_value;
-};
-
-typedef struct RV770_SMC_SCLK_VALUE RV770_SMC_SCLK_VALUE;
-
-struct RV770_SMC_MCLK_VALUE
-{
- uint32_t vMPLL_AD_FUNC_CNTL;
- uint32_t vMPLL_AD_FUNC_CNTL_2;
- uint32_t vMPLL_DQ_FUNC_CNTL;
- uint32_t vMPLL_DQ_FUNC_CNTL_2;
- uint32_t vMCLK_PWRMGT_CNTL;
- uint32_t vDLL_CNTL;
- uint32_t vMPLL_SS;
- uint32_t vMPLL_SS2;
- uint32_t mclk_value;
-};
-
-typedef struct RV770_SMC_MCLK_VALUE RV770_SMC_MCLK_VALUE;
-
-
-struct RV730_SMC_MCLK_VALUE
-{
- uint32_t vMCLK_PWRMGT_CNTL;
- uint32_t vDLL_CNTL;
- uint32_t vMPLL_FUNC_CNTL;
- uint32_t vMPLL_FUNC_CNTL2;
- uint32_t vMPLL_FUNC_CNTL3;
- uint32_t vMPLL_SS;
- uint32_t vMPLL_SS2;
- uint32_t mclk_value;
-};
-
-typedef struct RV730_SMC_MCLK_VALUE RV730_SMC_MCLK_VALUE;
-
-struct RV770_SMC_VOLTAGE_VALUE
-{
- uint16_t value;
- uint8_t index;
- uint8_t padding;
-};
-
-typedef struct RV770_SMC_VOLTAGE_VALUE RV770_SMC_VOLTAGE_VALUE;
-
-union RV7XX_SMC_MCLK_VALUE
-{
- RV770_SMC_MCLK_VALUE mclk770;
- RV730_SMC_MCLK_VALUE mclk730;
-};
-
-typedef union RV7XX_SMC_MCLK_VALUE RV7XX_SMC_MCLK_VALUE, *LPRV7XX_SMC_MCLK_VALUE;
-
-struct RV770_SMC_HW_PERFORMANCE_LEVEL
-{
- uint8_t arbValue;
- union{
- uint8_t seqValue;
- uint8_t ACIndex;
- };
- uint8_t displayWatermark;
- uint8_t gen2PCIE;
- uint8_t gen2XSP;
- uint8_t backbias;
- uint8_t strobeMode;
- uint8_t mcFlags;
- uint32_t aT;
- uint32_t bSP;
- RV770_SMC_SCLK_VALUE sclk;
- RV7XX_SMC_MCLK_VALUE mclk;
- RV770_SMC_VOLTAGE_VALUE vddc;
- RV770_SMC_VOLTAGE_VALUE mvdd;
- RV770_SMC_VOLTAGE_VALUE vddci;
- uint8_t reserved1;
- uint8_t reserved2;
- uint8_t stateFlags;
- uint8_t padding;
-};
-
-typedef struct RV770_SMC_HW_PERFORMANCE_LEVEL RV770_SMC_HW_PERFORMANCE_LEVEL;
-
-struct RV770_SMC_SWSTATE
-{
- uint8_t flags;
- uint8_t padding1;
- uint8_t padding2;
- uint8_t padding3;
- RV770_SMC_HW_PERFORMANCE_LEVEL levels[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE];
-};
-
-typedef struct RV770_SMC_SWSTATE RV770_SMC_SWSTATE;
-
-struct RV770_SMC_VOLTAGEMASKTABLE
-{
- uint8_t highMask[RV770_SMC_VOLTAGEMASK_MAX];
- uint32_t lowMask[RV770_SMC_VOLTAGEMASK_MAX];
-};
-
-typedef struct RV770_SMC_VOLTAGEMASKTABLE RV770_SMC_VOLTAGEMASKTABLE;
-
-struct RV770_SMC_STATETABLE
-{
- uint8_t thermalProtectType;
- uint8_t systemFlags;
- uint8_t maxVDDCIndexInPPTable;
- uint8_t extraFlags;
- uint8_t highSMIO[MAX_NO_VREG_STEPS];
- uint32_t lowSMIO[MAX_NO_VREG_STEPS];
- RV770_SMC_VOLTAGEMASKTABLE voltageMaskTable;
- RV770_SMC_SWSTATE initialState;
- RV770_SMC_SWSTATE ACPIState;
- RV770_SMC_SWSTATE driverState;
- RV770_SMC_SWSTATE ULVState;
-};
-
-typedef struct RV770_SMC_STATETABLE RV770_SMC_STATETABLE;
-
-struct vddc_table_entry {
- u16 vddc;
- u8 vddc_index;
- u8 high_smio;
- u32 low_smio;
-};
-
-struct rv770_clock_registers {
- u32 cg_spll_func_cntl;
- u32 cg_spll_func_cntl_2;
- u32 cg_spll_func_cntl_3;
- u32 cg_spll_spread_spectrum;
- u32 cg_spll_spread_spectrum_2;
- u32 mpll_ad_func_cntl;
- u32 mpll_ad_func_cntl_2;
- u32 mpll_dq_func_cntl;
- u32 mpll_dq_func_cntl_2;
- u32 mclk_pwrmgt_cntl;
- u32 dll_cntl;
- u32 mpll_ss1;
- u32 mpll_ss2;
-};
-
-struct rv730_clock_registers {
- u32 cg_spll_func_cntl;
- u32 cg_spll_func_cntl_2;
- u32 cg_spll_func_cntl_3;
- u32 cg_spll_spread_spectrum;
- u32 cg_spll_spread_spectrum_2;
- u32 mclk_pwrmgt_cntl;
- u32 dll_cntl;
- u32 mpll_func_cntl;
- u32 mpll_func_cntl2;
- u32 mpll_func_cntl3;
- u32 mpll_ss;
- u32 mpll_ss2;
-};
-
-union r7xx_clock_registers {
- struct rv770_clock_registers rv770;
- struct rv730_clock_registers rv730;
-};
-
-struct rv7xx_power_info {
- /* flags */
- bool mem_gddr5;
- bool pcie_gen2;
- bool dynamic_pcie_gen2;
- bool acpi_pcie_gen2;
- bool boot_in_gen2;
- bool voltage_control; /* vddc */
- bool mvdd_control;
- bool sclk_ss;
- bool mclk_ss;
- bool dynamic_ss;
- bool gfx_clock_gating;
- bool mg_clock_gating;
- bool mgcgtssm;
- bool power_gating;
- bool thermal_protection;
- bool display_gap;
- bool dcodt;
- bool ulps;
- /* registers */
- union r7xx_clock_registers clk_regs;
- u32 s0_vid_lower_smio_cntl;
- /* voltage */
- u32 vddc_mask_low;
- u32 mvdd_mask_low;
- u32 mvdd_split_frequency;
- u32 mvdd_low_smio[MAX_NO_OF_MVDD_VALUES];
- u16 max_vddc;
- u16 max_vddc_in_table;
- u16 min_vddc_in_table;
- struct vddc_table_entry vddc_table[MAX_NO_VREG_STEPS];
- u8 valid_vddc_entries;
- /* dc odt */
- u32 mclk_odt_threshold;
- u8 odt_value_0[2];
- u8 odt_value_1[2];
- /* stored values */
- u32 boot_sclk;
- u16 acpi_vddc;
- u32 ref_div;
- u32 active_auto_throttle_sources;
- u32 mclk_stutter_mode_threshold;
- u32 mclk_strobe_mode_threshold;
- u32 mclk_edc_enable_threshold;
- u32 bsp;
- u32 bsu;
- u32 pbsp;
- u32 pbsu;
- u32 dsp;
- u32 psp;
- u32 asi;
- u32 pasi;
- u32 vrc;
- u32 restricted_levels;
- u32 rlp;
- u32 rmp;
- u32 lhp;
- u32 lmp;
- /* smc offsets */
- u16 state_table_start;
- u16 soft_regs_start;
- u16 sram_end;
- /* scratch structs */
- RV770_SMC_STATETABLE smc_statetable;
-};
-
-struct rv7xx_pl {
- u32 sclk;
- u32 mclk;
- u16 vddc;
- u16 vddci; /* eg+ only */
- u32 flags;
- enum amdgpu_pcie_gen pcie_gen; /* si+ only */
-};
-
-struct rv7xx_ps {
- struct rv7xx_pl high;
- struct rv7xx_pl medium;
- struct rv7xx_pl low;
- bool dc_compatible;
-};
-
-struct si_ps {
- u16 performance_level_count;
- bool dc_compatible;
- struct rv7xx_pl performance_levels[NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE];
-};
-
-struct ni_mc_reg_table {
- u8 last;
- u8 num_entries;
- u16 valid_flag;
- struct ni_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
- SMC_NIslands_MCRegisterAddress mc_reg_address[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
-};
-
-struct ni_cac_data
-{
- struct ni_leakage_coeffients leakage_coefficients;
- u32 i_leakage;
- s32 leakage_minimum_temperature;
- u32 pwr_const;
- u32 dc_cac_value;
- u32 bif_cac_value;
- u32 lkge_pwr;
- u8 mc_wr_weight;
- u8 mc_rd_weight;
- u8 allow_ovrflw;
- u8 num_win_tdp;
- u8 l2num_win_tdp;
- u8 lts_truncate_n;
-};
-
-struct evergreen_power_info {
- /* must be first! */
- struct rv7xx_power_info rv7xx;
- /* flags */
- bool vddci_control;
- bool dynamic_ac_timing;
- bool abm;
- bool mcls;
- bool light_sleep;
- bool memory_transition;
- bool pcie_performance_request;
- bool pcie_performance_request_registered;
- bool sclk_deep_sleep;
- bool dll_default_on;
- bool ls_clock_gating;
- bool smu_uvd_hs;
- bool uvd_enabled;
- /* stored values */
- u16 acpi_vddci;
- u8 mvdd_high_index;
- u8 mvdd_low_index;
- u32 mclk_edc_wr_enable_threshold;
- struct evergreen_mc_reg_table mc_reg_table;
- struct atom_voltage_table vddc_voltage_table;
- struct atom_voltage_table vddci_voltage_table;
- struct evergreen_arb_registers bootup_arb_registers;
- struct evergreen_ulv_param ulv;
- struct at ats[2];
- /* smc offsets */
- u16 mc_reg_table_start;
- struct amdgpu_ps current_rps;
- struct rv7xx_ps current_ps;
- struct amdgpu_ps requested_rps;
- struct rv7xx_ps requested_ps;
-};
-
-struct PP_NIslands_Dpm2PerfLevel
-{
- uint8_t MaxPS;
- uint8_t TgtAct;
- uint8_t MaxPS_StepInc;
- uint8_t MaxPS_StepDec;
- uint8_t PSST;
- uint8_t NearTDPDec;
- uint8_t AboveSafeInc;
- uint8_t BelowSafeInc;
- uint8_t PSDeltaLimit;
- uint8_t PSDeltaWin;
- uint8_t Reserved[6];
-};
-
-typedef struct PP_NIslands_Dpm2PerfLevel PP_NIslands_Dpm2PerfLevel;
-
-struct PP_NIslands_DPM2Parameters
-{
- uint32_t TDPLimit;
- uint32_t NearTDPLimit;
- uint32_t SafePowerLimit;
- uint32_t PowerBoostLimit;
-};
-typedef struct PP_NIslands_DPM2Parameters PP_NIslands_DPM2Parameters;
-
-struct NISLANDS_SMC_SCLK_VALUE
-{
- uint32_t vCG_SPLL_FUNC_CNTL;
- uint32_t vCG_SPLL_FUNC_CNTL_2;
- uint32_t vCG_SPLL_FUNC_CNTL_3;
- uint32_t vCG_SPLL_FUNC_CNTL_4;
- uint32_t vCG_SPLL_SPREAD_SPECTRUM;
- uint32_t vCG_SPLL_SPREAD_SPECTRUM_2;
- uint32_t sclk_value;
-};
-
-typedef struct NISLANDS_SMC_SCLK_VALUE NISLANDS_SMC_SCLK_VALUE;
-
-struct NISLANDS_SMC_MCLK_VALUE
-{
- uint32_t vMPLL_FUNC_CNTL;
- uint32_t vMPLL_FUNC_CNTL_1;
- uint32_t vMPLL_FUNC_CNTL_2;
- uint32_t vMPLL_AD_FUNC_CNTL;
- uint32_t vMPLL_AD_FUNC_CNTL_2;
- uint32_t vMPLL_DQ_FUNC_CNTL;
- uint32_t vMPLL_DQ_FUNC_CNTL_2;
- uint32_t vMCLK_PWRMGT_CNTL;
- uint32_t vDLL_CNTL;
- uint32_t vMPLL_SS;
- uint32_t vMPLL_SS2;
- uint32_t mclk_value;
-};
-
-typedef struct NISLANDS_SMC_MCLK_VALUE NISLANDS_SMC_MCLK_VALUE;
-
-struct NISLANDS_SMC_VOLTAGE_VALUE
-{
- uint16_t value;
- uint8_t index;
- uint8_t padding;
-};
-
-typedef struct NISLANDS_SMC_VOLTAGE_VALUE NISLANDS_SMC_VOLTAGE_VALUE;
-
-struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL
-{
- uint8_t arbValue;
- uint8_t ACIndex;
- uint8_t displayWatermark;
- uint8_t gen2PCIE;
- uint8_t reserved1;
- uint8_t reserved2;
- uint8_t strobeMode;
- uint8_t mcFlags;
- uint32_t aT;
- uint32_t bSP;
- NISLANDS_SMC_SCLK_VALUE sclk;
- NISLANDS_SMC_MCLK_VALUE mclk;
- NISLANDS_SMC_VOLTAGE_VALUE vddc;
- NISLANDS_SMC_VOLTAGE_VALUE mvdd;
- NISLANDS_SMC_VOLTAGE_VALUE vddci;
- NISLANDS_SMC_VOLTAGE_VALUE std_vddc;
- uint32_t powergate_en;
- uint8_t hUp;
- uint8_t hDown;
- uint8_t stateFlags;
- uint8_t arbRefreshState;
- uint32_t SQPowerThrottle;
- uint32_t SQPowerThrottle_2;
- uint32_t reserved[2];
- PP_NIslands_Dpm2PerfLevel dpm2;
-};
-
-typedef struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL NISLANDS_SMC_HW_PERFORMANCE_LEVEL;
-
-struct NISLANDS_SMC_SWSTATE
-{
- uint8_t flags;
- uint8_t levelCount;
- uint8_t padding2;
- uint8_t padding3;
- NISLANDS_SMC_HW_PERFORMANCE_LEVEL levels[];
-};
-
-typedef struct NISLANDS_SMC_SWSTATE NISLANDS_SMC_SWSTATE;
-
-struct NISLANDS_SMC_VOLTAGEMASKTABLE
-{
- uint8_t highMask[NISLANDS_SMC_VOLTAGEMASK_MAX];
- uint32_t lowMask[NISLANDS_SMC_VOLTAGEMASK_MAX];
-};
-
-typedef struct NISLANDS_SMC_VOLTAGEMASKTABLE NISLANDS_SMC_VOLTAGEMASKTABLE;
-
-#define NISLANDS_MAX_NO_VREG_STEPS 32
-
-struct NISLANDS_SMC_STATETABLE
-{
- uint8_t thermalProtectType;
- uint8_t systemFlags;
- uint8_t maxVDDCIndexInPPTable;
- uint8_t extraFlags;
- uint8_t highSMIO[NISLANDS_MAX_NO_VREG_STEPS];
- uint32_t lowSMIO[NISLANDS_MAX_NO_VREG_STEPS];
- NISLANDS_SMC_VOLTAGEMASKTABLE voltageMaskTable;
- PP_NIslands_DPM2Parameters dpm2Params;
- NISLANDS_SMC_SWSTATE initialState;
- NISLANDS_SMC_SWSTATE ACPIState;
- NISLANDS_SMC_SWSTATE ULVState;
- NISLANDS_SMC_SWSTATE driverState;
- NISLANDS_SMC_HW_PERFORMANCE_LEVEL dpmLevels[NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1];
-};
-
-typedef struct NISLANDS_SMC_STATETABLE NISLANDS_SMC_STATETABLE;
-
-struct ni_power_info {
- /* must be first! */
- struct evergreen_power_info eg;
- struct ni_clock_registers clock_registers;
- struct ni_mc_reg_table mc_reg_table;
- u32 mclk_rtt_mode_threshold;
- /* flags */
- bool use_power_boost_limit;
- bool support_cac_long_term_average;
- bool cac_enabled;
- bool cac_configuration_required;
- bool driver_calculate_cac_leakage;
- bool pc_enabled;
- bool enable_power_containment;
- bool enable_cac;
- bool enable_sq_ramping;
- /* smc offsets */
- u16 arb_table_start;
- u16 fan_table_start;
- u16 cac_table_start;
- u16 spll_table_start;
- /* CAC stuff */
- struct ni_cac_data cac_data;
- u32 dc_cac_table[NISLANDS_DCCAC_MAX_LEVELS];
- const struct ni_cac_weights *cac_weights;
- u8 lta_window_size;
- u8 lts_truncate;
- struct si_ps current_ps;
- struct si_ps requested_ps;
- /* scratch structs */
- SMC_NIslands_MCRegisters smc_mc_reg_table;
- NISLANDS_SMC_STATETABLE smc_statetable;
-};
-
-struct si_cac_config_reg
-{
- u32 offset;
- u32 mask;
- u32 shift;
- u32 value;
- enum si_cac_config_reg_type type;
-};
-
-struct si_powertune_data
-{
- u32 cac_window;
- u32 l2_lta_window_size_default;
- u8 lts_truncate_default;
- u8 shift_n_default;
- u8 operating_temp;
- struct ni_leakage_coeffients leakage_coefficients;
- u32 fixed_kt;
- u32 lkge_lut_v0_percent;
- u8 dc_cac[NISLANDS_DCCAC_MAX_LEVELS];
- bool enable_powertune_by_default;
-};
-
-struct si_dyn_powertune_data
-{
- u32 cac_leakage;
- s32 leakage_minimum_temperature;
- u32 wintime;
- u32 l2_lta_window_size;
- u8 lts_truncate;
- u8 shift_n;
- u8 dc_pwr_value;
- bool disable_uvd_powertune;
-};
-
-struct si_dte_data
-{
- u32 tau[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
- u32 r[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
- u32 k;
- u32 t0;
- u32 max_t;
- u8 window_size;
- u8 temp_select;
- u8 dte_mode;
- u8 tdep_count;
- u8 t_limits[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
- u32 tdep_tau[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
- u32 tdep_r[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
- u32 t_threshold;
- bool enable_dte_by_default;
-};
-
-struct si_clock_registers {
- u32 cg_spll_func_cntl;
- u32 cg_spll_func_cntl_2;
- u32 cg_spll_func_cntl_3;
- u32 cg_spll_func_cntl_4;
- u32 cg_spll_spread_spectrum;
- u32 cg_spll_spread_spectrum_2;
- u32 dll_cntl;
- u32 mclk_pwrmgt_cntl;
- u32 mpll_ad_func_cntl;
- u32 mpll_dq_func_cntl;
- u32 mpll_func_cntl;
- u32 mpll_func_cntl_1;
- u32 mpll_func_cntl_2;
- u32 mpll_ss1;
- u32 mpll_ss2;
-};
-
-struct si_mc_reg_entry {
- u32 mclk_max;
- u32 mc_data[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE];
-};
-
-struct si_mc_reg_table {
- u8 last;
- u8 num_entries;
- u16 valid_flag;
- struct si_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
- SMC_NIslands_MCRegisterAddress mc_reg_address[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE];
-};
-
-struct si_leakage_voltage_entry
-{
- u16 voltage;
- u16 leakage_index;
-};
-
-struct si_leakage_voltage
-{
- u16 count;
- struct si_leakage_voltage_entry entries[SISLANDS_MAX_LEAKAGE_COUNT];
-};
-
-
-struct si_ulv_param {
- bool supported;
- u32 cg_ulv_control;
- u32 cg_ulv_parameter;
- u32 volt_change_delay;
- struct rv7xx_pl pl;
- bool one_pcie_lane_in_ulv;
-};
-
-struct si_power_info {
- /* must be first! */
- struct ni_power_info ni;
- struct si_clock_registers clock_registers;
- struct si_mc_reg_table mc_reg_table;
- struct atom_voltage_table mvdd_voltage_table;
- struct atom_voltage_table vddc_phase_shed_table;
- struct si_leakage_voltage leakage_voltage;
- u16 mvdd_bootup_value;
- struct si_ulv_param ulv;
- u32 max_cu;
- /* pcie gen */
- enum amdgpu_pcie_gen force_pcie_gen;
- enum amdgpu_pcie_gen boot_pcie_gen;
- enum amdgpu_pcie_gen acpi_pcie_gen;
- u32 sys_pcie_mask;
- /* flags */
- bool enable_dte;
- bool enable_ppm;
- bool vddc_phase_shed_control;
- bool pspp_notify_required;
- bool sclk_deep_sleep_above_low;
- bool voltage_control_svi2;
- bool vddci_control_svi2;
- /* smc offsets */
- u32 sram_end;
- u32 state_table_start;
- u32 soft_regs_start;
- u32 mc_reg_table_start;
- u32 arb_table_start;
- u32 cac_table_start;
- u32 dte_table_start;
- u32 spll_table_start;
- u32 papm_cfg_table_start;
- u32 fan_table_start;
- /* CAC stuff */
- const struct si_cac_config_reg *cac_weights;
- const struct si_cac_config_reg *lcac_config;
- const struct si_cac_config_reg *cac_override;
- const struct si_powertune_data *powertune_data;
- struct si_dyn_powertune_data dyn_powertune_data;
- /* DTE stuff */
- struct si_dte_data dte_data;
- /* scratch structs */
- SMC_SIslands_MCRegisters smc_mc_reg_table;
- SISLANDS_SMC_STATETABLE smc_statetable;
- PP_SIslands_PAPMParameters papm_parm;
- /* SVI2 */
- u8 svd_gpio_id;
- u8 svc_gpio_id;
- /* fan control */
- bool fan_ctrl_is_in_default_mode;
- u32 t_min;
- u32 fan_ctrl_default_mode;
- bool fan_is_controlled_by_smc;
-};
-
-#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/si_smc.c b/drivers/gpu/drm/amd/amdgpu/si_smc.c
deleted file mode 100644
index 8f994ffa9cd1..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/si_smc.c
+++ /dev/null
@@ -1,273 +0,0 @@
-/*
- * Copyright 2011 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Alex Deucher
- */
-
-#include <linux/firmware.h>
-
-#include "amdgpu.h"
-#include "sid.h"
-#include "ppsmc.h"
-#include "amdgpu_ucode.h"
-#include "sislands_smc.h"
-
-static int si_set_smc_sram_address(struct amdgpu_device *adev,
- u32 smc_address, u32 limit)
-{
- if (smc_address & 3)
- return -EINVAL;
- if ((smc_address + 3) > limit)
- return -EINVAL;
-
- WREG32(SMC_IND_INDEX_0, smc_address);
- WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
-
- return 0;
-}
-
-int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev,
- u32 smc_start_address,
- const u8 *src, u32 byte_count, u32 limit)
-{
- unsigned long flags;
- int ret = 0;
- u32 data, original_data, addr, extra_shift;
-
- if (smc_start_address & 3)
- return -EINVAL;
- if ((smc_start_address + byte_count) > limit)
- return -EINVAL;
-
- addr = smc_start_address;
-
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
- while (byte_count >= 4) {
- /* SMC address space is BE */
- data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
-
- ret = si_set_smc_sram_address(adev, addr, limit);
- if (ret)
- goto done;
-
- WREG32(SMC_IND_DATA_0, data);
-
- src += 4;
- byte_count -= 4;
- addr += 4;
- }
-
- /* RMW for the final bytes */
- if (byte_count > 0) {
- data = 0;
-
- ret = si_set_smc_sram_address(adev, addr, limit);
- if (ret)
- goto done;
-
- original_data = RREG32(SMC_IND_DATA_0);
- extra_shift = 8 * (4 - byte_count);
-
- while (byte_count > 0) {
- /* SMC address space is BE */
- data = (data << 8) + *src++;
- byte_count--;
- }
-
- data <<= extra_shift;
- data |= (original_data & ~((~0UL) << extra_shift));
-
- ret = si_set_smc_sram_address(adev, addr, limit);
- if (ret)
- goto done;
-
- WREG32(SMC_IND_DATA_0, data);
- }
-
-done:
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
-
- return ret;
-}
-
-void amdgpu_si_start_smc(struct amdgpu_device *adev)
-{
- u32 tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL);
-
- tmp &= ~RST_REG;
-
- WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp);
-}
-
-void amdgpu_si_reset_smc(struct amdgpu_device *adev)
-{
- u32 tmp;
-
- RREG32(CB_CGTT_SCLK_CTRL);
- RREG32(CB_CGTT_SCLK_CTRL);
- RREG32(CB_CGTT_SCLK_CTRL);
- RREG32(CB_CGTT_SCLK_CTRL);
-
- tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL) |
- RST_REG;
- WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp);
-}
-
-int amdgpu_si_program_jump_on_start(struct amdgpu_device *adev)
-{
- static const u8 data[] = { 0x0E, 0x00, 0x40, 0x40 };
-
- return amdgpu_si_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
-}
-
-void amdgpu_si_smc_clock(struct amdgpu_device *adev, bool enable)
-{
- u32 tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
-
- if (enable)
- tmp &= ~CK_DISABLE;
- else
- tmp |= CK_DISABLE;
-
- WREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0, tmp);
-}
-
-bool amdgpu_si_is_smc_running(struct amdgpu_device *adev)
-{
- u32 rst = RREG32_SMC(SMC_SYSCON_RESET_CNTL);
- u32 clk = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
-
- if (!(rst & RST_REG) && !(clk & CK_DISABLE))
- return true;
-
- return false;
-}
-
-PPSMC_Result amdgpu_si_send_msg_to_smc(struct amdgpu_device *adev,
- PPSMC_Msg msg)
-{
- u32 tmp;
- int i;
-
- if (!amdgpu_si_is_smc_running(adev))
- return PPSMC_Result_Failed;
-
- WREG32(SMC_MESSAGE_0, msg);
-
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32(SMC_RESP_0);
- if (tmp != 0)
- break;
- udelay(1);
- }
-
- return (PPSMC_Result)RREG32(SMC_RESP_0);
-}
-
-PPSMC_Result amdgpu_si_wait_for_smc_inactive(struct amdgpu_device *adev)
-{
- u32 tmp;
- int i;
-
- if (!amdgpu_si_is_smc_running(adev))
- return PPSMC_Result_OK;
-
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
- if ((tmp & CKEN) == 0)
- break;
- udelay(1);
- }
-
- return PPSMC_Result_OK;
-}
-
-int amdgpu_si_load_smc_ucode(struct amdgpu_device *adev, u32 limit)
-{
- const struct smc_firmware_header_v1_0 *hdr;
- unsigned long flags;
- u32 ucode_start_address;
- u32 ucode_size;
- const u8 *src;
- u32 data;
-
- if (!adev->pm.fw)
- return -EINVAL;
-
- hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
-
- amdgpu_ucode_print_smc_hdr(&hdr->header);
-
- adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
- ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
- ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
- src = (const u8 *)
- (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- if (ucode_size & 3)
- return -EINVAL;
-
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
- WREG32(SMC_IND_INDEX_0, ucode_start_address);
- WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
- while (ucode_size >= 4) {
- /* SMC address space is BE */
- data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
-
- WREG32(SMC_IND_DATA_0, data);
-
- src += 4;
- ucode_size -= 4;
- }
- WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
-
- return 0;
-}
-
-int amdgpu_si_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
- u32 *value, u32 limit)
-{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
- ret = si_set_smc_sram_address(adev, smc_address, limit);
- if (ret == 0)
- *value = RREG32(SMC_IND_DATA_0);
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
-
- return ret;
-}
-
-int amdgpu_si_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
- u32 value, u32 limit)
-{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
- ret = si_set_smc_sram_address(adev, smc_address, limit);
- if (ret == 0)
- WREG32(SMC_IND_DATA_0, value);
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
-
- return ret;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/sislands_smc.h b/drivers/gpu/drm/amd/amdgpu/sislands_smc.h
deleted file mode 100644
index d2930eceaf3c..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/sislands_smc.h
+++ /dev/null
@@ -1,423 +0,0 @@
-/*
- * Copyright 2013 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef PP_SISLANDS_SMC_H
-#define PP_SISLANDS_SMC_H
-
-#include "ppsmc.h"
-
-#pragma pack(push, 1)
-
-#define SISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16
-
-struct PP_SIslands_Dpm2PerfLevel
-{
- uint8_t MaxPS;
- uint8_t TgtAct;
- uint8_t MaxPS_StepInc;
- uint8_t MaxPS_StepDec;
- uint8_t PSSamplingTime;
- uint8_t NearTDPDec;
- uint8_t AboveSafeInc;
- uint8_t BelowSafeInc;
- uint8_t PSDeltaLimit;
- uint8_t PSDeltaWin;
- uint16_t PwrEfficiencyRatio;
- uint8_t Reserved[4];
-};
-
-typedef struct PP_SIslands_Dpm2PerfLevel PP_SIslands_Dpm2PerfLevel;
-
-struct PP_SIslands_DPM2Status
-{
- uint32_t dpm2Flags;
- uint8_t CurrPSkip;
- uint8_t CurrPSkipPowerShift;
- uint8_t CurrPSkipTDP;
- uint8_t CurrPSkipOCP;
- uint8_t MaxSPLLIndex;
- uint8_t MinSPLLIndex;
- uint8_t CurrSPLLIndex;
- uint8_t InfSweepMode;
- uint8_t InfSweepDir;
- uint8_t TDPexceeded;
- uint8_t reserved;
- uint8_t SwitchDownThreshold;
- uint32_t SwitchDownCounter;
- uint32_t SysScalingFactor;
-};
-
-typedef struct PP_SIslands_DPM2Status PP_SIslands_DPM2Status;
-
-struct PP_SIslands_DPM2Parameters
-{
- uint32_t TDPLimit;
- uint32_t NearTDPLimit;
- uint32_t SafePowerLimit;
- uint32_t PowerBoostLimit;
- uint32_t MinLimitDelta;
-};
-typedef struct PP_SIslands_DPM2Parameters PP_SIslands_DPM2Parameters;
-
-struct PP_SIslands_PAPMStatus
-{
- uint32_t EstimatedDGPU_T;
- uint32_t EstimatedDGPU_P;
- uint32_t EstimatedAPU_T;
- uint32_t EstimatedAPU_P;
- uint8_t dGPU_T_Limit_Exceeded;
- uint8_t reserved[3];
-};
-typedef struct PP_SIslands_PAPMStatus PP_SIslands_PAPMStatus;
-
-struct PP_SIslands_PAPMParameters
-{
- uint32_t NearTDPLimitTherm;
- uint32_t NearTDPLimitPAPM;
- uint32_t PlatformPowerLimit;
- uint32_t dGPU_T_Limit;
- uint32_t dGPU_T_Warning;
- uint32_t dGPU_T_Hysteresis;
-};
-typedef struct PP_SIslands_PAPMParameters PP_SIslands_PAPMParameters;
-
-struct SISLANDS_SMC_SCLK_VALUE
-{
- uint32_t vCG_SPLL_FUNC_CNTL;
- uint32_t vCG_SPLL_FUNC_CNTL_2;
- uint32_t vCG_SPLL_FUNC_CNTL_3;
- uint32_t vCG_SPLL_FUNC_CNTL_4;
- uint32_t vCG_SPLL_SPREAD_SPECTRUM;
- uint32_t vCG_SPLL_SPREAD_SPECTRUM_2;
- uint32_t sclk_value;
-};
-
-typedef struct SISLANDS_SMC_SCLK_VALUE SISLANDS_SMC_SCLK_VALUE;
-
-struct SISLANDS_SMC_MCLK_VALUE
-{
- uint32_t vMPLL_FUNC_CNTL;
- uint32_t vMPLL_FUNC_CNTL_1;
- uint32_t vMPLL_FUNC_CNTL_2;
- uint32_t vMPLL_AD_FUNC_CNTL;
- uint32_t vMPLL_DQ_FUNC_CNTL;
- uint32_t vMCLK_PWRMGT_CNTL;
- uint32_t vDLL_CNTL;
- uint32_t vMPLL_SS;
- uint32_t vMPLL_SS2;
- uint32_t mclk_value;
-};
-
-typedef struct SISLANDS_SMC_MCLK_VALUE SISLANDS_SMC_MCLK_VALUE;
-
-struct SISLANDS_SMC_VOLTAGE_VALUE
-{
- uint16_t value;
- uint8_t index;
- uint8_t phase_settings;
-};
-
-typedef struct SISLANDS_SMC_VOLTAGE_VALUE SISLANDS_SMC_VOLTAGE_VALUE;
-
-struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL
-{
- uint8_t ACIndex;
- uint8_t displayWatermark;
- uint8_t gen2PCIE;
- uint8_t UVDWatermark;
- uint8_t VCEWatermark;
- uint8_t strobeMode;
- uint8_t mcFlags;
- uint8_t padding;
- uint32_t aT;
- uint32_t bSP;
- SISLANDS_SMC_SCLK_VALUE sclk;
- SISLANDS_SMC_MCLK_VALUE mclk;
- SISLANDS_SMC_VOLTAGE_VALUE vddc;
- SISLANDS_SMC_VOLTAGE_VALUE mvdd;
- SISLANDS_SMC_VOLTAGE_VALUE vddci;
- SISLANDS_SMC_VOLTAGE_VALUE std_vddc;
- uint8_t hysteresisUp;
- uint8_t hysteresisDown;
- uint8_t stateFlags;
- uint8_t arbRefreshState;
- uint32_t SQPowerThrottle;
- uint32_t SQPowerThrottle_2;
- uint32_t MaxPoweredUpCU;
- SISLANDS_SMC_VOLTAGE_VALUE high_temp_vddc;
- SISLANDS_SMC_VOLTAGE_VALUE low_temp_vddc;
- uint32_t reserved[2];
- PP_SIslands_Dpm2PerfLevel dpm2;
-};
-
-#define SISLANDS_SMC_STROBE_RATIO 0x0F
-#define SISLANDS_SMC_STROBE_ENABLE 0x10
-
-#define SISLANDS_SMC_MC_EDC_RD_FLAG 0x01
-#define SISLANDS_SMC_MC_EDC_WR_FLAG 0x02
-#define SISLANDS_SMC_MC_RTT_ENABLE 0x04
-#define SISLANDS_SMC_MC_STUTTER_EN 0x08
-#define SISLANDS_SMC_MC_PG_EN 0x10
-
-typedef struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL SISLANDS_SMC_HW_PERFORMANCE_LEVEL;
-
-struct SISLANDS_SMC_SWSTATE
-{
- uint8_t flags;
- uint8_t levelCount;
- uint8_t padding2;
- uint8_t padding3;
- SISLANDS_SMC_HW_PERFORMANCE_LEVEL levels[1];
-};
-
-typedef struct SISLANDS_SMC_SWSTATE SISLANDS_SMC_SWSTATE;
-
-#define SISLANDS_SMC_VOLTAGEMASK_VDDC 0
-#define SISLANDS_SMC_VOLTAGEMASK_MVDD 1
-#define SISLANDS_SMC_VOLTAGEMASK_VDDCI 2
-#define SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING 3
-#define SISLANDS_SMC_VOLTAGEMASK_MAX 4
-
-struct SISLANDS_SMC_VOLTAGEMASKTABLE
-{
- uint32_t lowMask[SISLANDS_SMC_VOLTAGEMASK_MAX];
-};
-
-typedef struct SISLANDS_SMC_VOLTAGEMASKTABLE SISLANDS_SMC_VOLTAGEMASKTABLE;
-
-#define SISLANDS_MAX_NO_VREG_STEPS 32
-
-struct SISLANDS_SMC_STATETABLE
-{
- uint8_t thermalProtectType;
- uint8_t systemFlags;
- uint8_t maxVDDCIndexInPPTable;
- uint8_t extraFlags;
- uint32_t lowSMIO[SISLANDS_MAX_NO_VREG_STEPS];
- SISLANDS_SMC_VOLTAGEMASKTABLE voltageMaskTable;
- SISLANDS_SMC_VOLTAGEMASKTABLE phaseMaskTable;
- PP_SIslands_DPM2Parameters dpm2Params;
- SISLANDS_SMC_SWSTATE initialState;
- SISLANDS_SMC_SWSTATE ACPIState;
- SISLANDS_SMC_SWSTATE ULVState;
- SISLANDS_SMC_SWSTATE driverState;
- SISLANDS_SMC_HW_PERFORMANCE_LEVEL dpmLevels[SISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1];
-};
-
-typedef struct SISLANDS_SMC_STATETABLE SISLANDS_SMC_STATETABLE;
-
-#define SI_SMC_SOFT_REGISTER_mclk_chg_timeout 0x0
-#define SI_SMC_SOFT_REGISTER_delay_vreg 0xC
-#define SI_SMC_SOFT_REGISTER_delay_acpi 0x28
-#define SI_SMC_SOFT_REGISTER_seq_index 0x5C
-#define SI_SMC_SOFT_REGISTER_mvdd_chg_time 0x60
-#define SI_SMC_SOFT_REGISTER_mclk_switch_lim 0x70
-#define SI_SMC_SOFT_REGISTER_watermark_threshold 0x78
-#define SI_SMC_SOFT_REGISTER_phase_shedding_delay 0x88
-#define SI_SMC_SOFT_REGISTER_ulv_volt_change_delay 0x8C
-#define SI_SMC_SOFT_REGISTER_mc_block_delay 0x98
-#define SI_SMC_SOFT_REGISTER_ticks_per_us 0xA8
-#define SI_SMC_SOFT_REGISTER_crtc_index 0xC4
-#define SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min 0xC8
-#define SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max 0xCC
-#define SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width 0xF4
-#define SI_SMC_SOFT_REGISTER_tdr_is_about_to_happen 0xFC
-#define SI_SMC_SOFT_REGISTER_vr_hot_gpio 0x100
-#define SI_SMC_SOFT_REGISTER_svi_rework_plat_type 0x118
-#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd 0x11c
-#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc 0x120
-
-struct PP_SIslands_FanTable
-{
- uint8_t fdo_mode;
- uint8_t padding;
- int16_t temp_min;
- int16_t temp_med;
- int16_t temp_max;
- int16_t slope1;
- int16_t slope2;
- int16_t fdo_min;
- int16_t hys_up;
- int16_t hys_down;
- int16_t hys_slope;
- int16_t temp_resp_lim;
- int16_t temp_curr;
- int16_t slope_curr;
- int16_t pwm_curr;
- uint32_t refresh_period;
- int16_t fdo_max;
- uint8_t temp_src;
- int8_t padding2;
-};
-
-typedef struct PP_SIslands_FanTable PP_SIslands_FanTable;
-
-#define SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES 16
-#define SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES 32
-
-#define SMC_SISLANDS_SCALE_I 7
-#define SMC_SISLANDS_SCALE_R 12
-
-struct PP_SIslands_CacConfig
-{
- uint16_t cac_lkge_lut[SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES];
- uint32_t lkge_lut_V0;
- uint32_t lkge_lut_Vstep;
- uint32_t WinTime;
- uint32_t R_LL;
- uint32_t calculation_repeats;
- uint32_t l2numWin_TDP;
- uint32_t dc_cac;
- uint8_t lts_truncate_n;
- uint8_t SHIFT_N;
- uint8_t log2_PG_LKG_SCALE;
- uint8_t cac_temp;
- uint32_t lkge_lut_T0;
- uint32_t lkge_lut_Tstep;
-};
-
-typedef struct PP_SIslands_CacConfig PP_SIslands_CacConfig;
-
-#define SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE 16
-#define SMC_SISLANDS_MC_REGISTER_ARRAY_SET_COUNT 20
-
-struct SMC_SIslands_MCRegisterAddress
-{
- uint16_t s0;
- uint16_t s1;
-};
-
-typedef struct SMC_SIslands_MCRegisterAddress SMC_SIslands_MCRegisterAddress;
-
-struct SMC_SIslands_MCRegisterSet
-{
- uint32_t value[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE];
-};
-
-typedef struct SMC_SIslands_MCRegisterSet SMC_SIslands_MCRegisterSet;
-
-struct SMC_SIslands_MCRegisters
-{
- uint8_t last;
- uint8_t reserved[3];
- SMC_SIslands_MCRegisterAddress address[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE];
- SMC_SIslands_MCRegisterSet data[SMC_SISLANDS_MC_REGISTER_ARRAY_SET_COUNT];
-};
-
-typedef struct SMC_SIslands_MCRegisters SMC_SIslands_MCRegisters;
-
-struct SMC_SIslands_MCArbDramTimingRegisterSet
-{
- uint32_t mc_arb_dram_timing;
- uint32_t mc_arb_dram_timing2;
- uint8_t mc_arb_rfsh_rate;
- uint8_t mc_arb_burst_time;
- uint8_t padding[2];
-};
-
-typedef struct SMC_SIslands_MCArbDramTimingRegisterSet SMC_SIslands_MCArbDramTimingRegisterSet;
-
-struct SMC_SIslands_MCArbDramTimingRegisters
-{
- uint8_t arb_current;
- uint8_t reserved[3];
- SMC_SIslands_MCArbDramTimingRegisterSet data[16];
-};
-
-typedef struct SMC_SIslands_MCArbDramTimingRegisters SMC_SIslands_MCArbDramTimingRegisters;
-
-struct SMC_SISLANDS_SPLL_DIV_TABLE
-{
- uint32_t freq[256];
- uint32_t ss[256];
-};
-
-#define SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK 0x01ffffff
-#define SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT 0
-#define SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK 0xfe000000
-#define SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT 25
-#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK 0x000fffff
-#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT 0
-#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK 0xfff00000
-#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT 20
-
-typedef struct SMC_SISLANDS_SPLL_DIV_TABLE SMC_SISLANDS_SPLL_DIV_TABLE;
-
-#define SMC_SISLANDS_DTE_MAX_FILTER_STAGES 5
-
-#define SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE 16
-
-struct Smc_SIslands_DTE_Configuration
-{
- uint32_t tau[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
- uint32_t R[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
- uint32_t K;
- uint32_t T0;
- uint32_t MaxT;
- uint8_t WindowSize;
- uint8_t Tdep_count;
- uint8_t temp_select;
- uint8_t DTE_mode;
- uint8_t T_limits[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
- uint32_t Tdep_tau[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
- uint32_t Tdep_R[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
- uint32_t Tthreshold;
-};
-
-typedef struct Smc_SIslands_DTE_Configuration Smc_SIslands_DTE_Configuration;
-
-#define SMC_SISLANDS_DTE_STATUS_FLAG_DTE_ON 1
-
-#define SISLANDS_SMC_FIRMWARE_HEADER_LOCATION 0x10000
-
-#define SISLANDS_SMC_FIRMWARE_HEADER_version 0x0
-#define SISLANDS_SMC_FIRMWARE_HEADER_flags 0x4
-#define SISLANDS_SMC_FIRMWARE_HEADER_softRegisters 0xC
-#define SISLANDS_SMC_FIRMWARE_HEADER_stateTable 0x10
-#define SISLANDS_SMC_FIRMWARE_HEADER_fanTable 0x14
-#define SISLANDS_SMC_FIRMWARE_HEADER_CacConfigTable 0x18
-#define SISLANDS_SMC_FIRMWARE_HEADER_mcRegisterTable 0x24
-#define SISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable 0x30
-#define SISLANDS_SMC_FIRMWARE_HEADER_spllTable 0x38
-#define SISLANDS_SMC_FIRMWARE_HEADER_DteConfiguration 0x40
-#define SISLANDS_SMC_FIRMWARE_HEADER_PAPMParameters 0x48
-
-#pragma pack(pop)
-
-int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev,
- u32 smc_start_address,
- const u8 *src, u32 byte_count, u32 limit);
-void amdgpu_si_start_smc(struct amdgpu_device *adev);
-void amdgpu_si_reset_smc(struct amdgpu_device *adev);
-int amdgpu_si_program_jump_on_start(struct amdgpu_device *adev);
-void amdgpu_si_smc_clock(struct amdgpu_device *adev, bool enable);
-bool amdgpu_si_is_smc_running(struct amdgpu_device *adev);
-PPSMC_Result amdgpu_si_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg);
-PPSMC_Result amdgpu_si_wait_for_smc_inactive(struct amdgpu_device *adev);
-int amdgpu_si_load_smc_ucode(struct amdgpu_device *adev, u32 limit);
-int amdgpu_si_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
- u32 *value, u32 limit);
-int amdgpu_si_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
- u32 value, u32 limit);
-
-#endif
-
diff --git a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
index d55bf64770c4..7fb240c4990c 100644
--- a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
@@ -508,14 +508,9 @@ static bool smu_v11_0_i2c_bus_lock(struct i2c_adapter *control)
struct amdgpu_device *adev = to_amdgpu_device(control);
/* Send PPSMC_MSG_RequestI2CBus */
- if (!adev->powerplay.pp_funcs->smu_i2c_bus_access)
- goto Fail;
-
-
- if (!adev->powerplay.pp_funcs->smu_i2c_bus_access(adev->powerplay.pp_handle, true))
+ if (!amdgpu_dpm_smu_i2c_bus_access(adev, true))
return true;
-Fail:
return false;
}
@@ -523,16 +518,10 @@ static bool smu_v11_0_i2c_bus_unlock(struct i2c_adapter *control)
{
struct amdgpu_device *adev = to_amdgpu_device(control);
- /* Send PPSMC_MSG_RequestI2CBus */
- if (!adev->powerplay.pp_funcs->smu_i2c_bus_access)
- goto Fail;
-
/* Send PPSMC_MSG_ReleaseI2CBus */
- if (!adev->powerplay.pp_funcs->smu_i2c_bus_access(adev->powerplay.pp_handle,
- false))
+ if (!amdgpu_dpm_smu_i2c_bus_access(adev, false))
return true;
-Fail:
return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 84d811b6e48b..f57c5f57efa8 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -101,75 +101,40 @@
*/
static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
{
- unsigned long flags, address, data;
- u32 r;
+ unsigned long address, data;
address = adev->nbio.funcs->get_pcie_index_offset(adev);
data = adev->nbio.funcs->get_pcie_data_offset(adev);
- spin_lock_irqsave(&adev->pcie_idx_lock, flags);
- WREG32(address, reg);
- (void)RREG32(address);
- r = RREG32(data);
- spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
- return r;
+ return amdgpu_device_indirect_rreg(adev, address, data, reg);
}
static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{
- unsigned long flags, address, data;
+ unsigned long address, data;
address = adev->nbio.funcs->get_pcie_index_offset(adev);
data = adev->nbio.funcs->get_pcie_data_offset(adev);
- spin_lock_irqsave(&adev->pcie_idx_lock, flags);
- WREG32(address, reg);
- (void)RREG32(address);
- WREG32(data, v);
- (void)RREG32(data);
- spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+ amdgpu_device_indirect_wreg(adev, address, data, reg, v);
}
static u64 soc15_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
{
- unsigned long flags, address, data;
- u64 r;
+ unsigned long address, data;
address = adev->nbio.funcs->get_pcie_index_offset(adev);
data = adev->nbio.funcs->get_pcie_data_offset(adev);
- spin_lock_irqsave(&adev->pcie_idx_lock, flags);
- /* read low 32 bit */
- WREG32(address, reg);
- (void)RREG32(address);
- r = RREG32(data);
-
- /* read high 32 bit*/
- WREG32(address, reg + 4);
- (void)RREG32(address);
- r |= ((u64)RREG32(data) << 32);
- spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
- return r;
+ return amdgpu_device_indirect_rreg64(adev, address, data, reg);
}
static void soc15_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
{
- unsigned long flags, address, data;
+ unsigned long address, data;
address = adev->nbio.funcs->get_pcie_index_offset(adev);
data = adev->nbio.funcs->get_pcie_data_offset(adev);
- spin_lock_irqsave(&adev->pcie_idx_lock, flags);
- /* write low 32 bit */
- WREG32(address, reg);
- (void)RREG32(address);
- WREG32(data, (u32)(v & 0xffffffffULL));
- (void)RREG32(data);
-
- /* write high 32 bit */
- WREG32(address, reg + 4);
- (void)RREG32(address);
- WREG32(data, (u32)(v >> 32));
- (void)RREG32(data);
- spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+ amdgpu_device_indirect_wreg64(adev, address, data, reg, v);
}
static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
@@ -484,13 +449,13 @@ static int soc15_asic_mode1_reset(struct amdgpu_device *adev)
/* disable BM */
pci_clear_master(adev->pdev);
- pci_save_state(adev->pdev);
+ amdgpu_device_cache_pci_state(adev->pdev);
ret = psp_gpu_reset(adev);
if (ret)
dev_err(adev->dev, "GPU mode1 reset failed\n");
- pci_restore_state(adev->pdev);
+ amdgpu_device_load_pci_state(adev->pdev);
/* wait for asic to come out of reset */
for (i = 0; i < adev->usec_timeout; i++) {
@@ -580,10 +545,13 @@ static int soc15_asic_reset(struct amdgpu_device *adev)
switch (soc15_asic_reset_method(adev)) {
case AMD_RESET_METHOD_BACO:
+ dev_info(adev->dev, "BACO reset\n");
return soc15_asic_baco_reset(adev);
case AMD_RESET_METHOD_MODE2:
+ dev_info(adev->dev, "MODE2 reset\n");
return amdgpu_dpm_mode2_reset(adev);
default:
+ dev_info(adev->dev, "MODE1 reset\n");
return soc15_asic_mode1_reset(adev);
}
}
@@ -694,12 +662,12 @@ static void soc15_reg_base_init(struct amdgpu_device *adev)
* it doesn't support SRIOV. */
if (amdgpu_discovery) {
r = amdgpu_discovery_reg_base_init(adev);
- if (r) {
- DRM_WARN("failed to init reg base from ip discovery table, "
- "fallback to legacy init method\n");
- vega10_reg_base_init(adev);
- }
+ if (r == 0)
+ break;
+ DRM_WARN("failed to init reg base from ip discovery table, "
+ "fallback to legacy init method\n");
}
+ vega10_reg_base_init(adev);
break;
case CHIP_VEGA20:
vega20_reg_base_init(adev);
@@ -1026,6 +994,11 @@ static uint64_t soc15_get_pcie_replay_count(struct amdgpu_device *adev)
return (nak_r + nak_g);
}
+static void soc15_pre_asic_init(struct amdgpu_device *adev)
+{
+ gmc_v9_0_restore_registers(adev);
+}
+
static const struct amdgpu_asic_funcs soc15_asic_funcs =
{
.read_disabled_bios = &soc15_read_disabled_bios,
@@ -1046,6 +1019,7 @@ static const struct amdgpu_asic_funcs soc15_asic_funcs =
.need_reset_on_init = &soc15_need_reset_on_init,
.get_pcie_replay_count = &soc15_get_pcie_replay_count,
.supports_baco = &soc15_supports_baco,
+ .pre_asic_init = &soc15_pre_asic_init,
};
static const struct amdgpu_asic_funcs vega20_asic_funcs =
@@ -1069,6 +1043,7 @@ static const struct amdgpu_asic_funcs vega20_asic_funcs =
.need_reset_on_init = &soc15_need_reset_on_init,
.get_pcie_replay_count = &soc15_get_pcie_replay_count,
.supports_baco = &soc15_supports_baco,
+ .pre_asic_init = &soc15_pre_asic_init,
};
static int soc15_common_early_init(void *handle)
@@ -1220,8 +1195,7 @@ static int soc15_common_early_init(void *handle)
adev->pg_flags = AMD_PG_SUPPORT_SDMA |
AMD_PG_SUPPORT_MMHUB |
- AMD_PG_SUPPORT_VCN |
- AMD_PG_SUPPORT_VCN_DPG;
+ AMD_PG_SUPPORT_VCN;
} else {
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
@@ -1268,7 +1242,15 @@ static int soc15_common_early_init(void *handle)
break;
case CHIP_RENOIR:
adev->asic_funcs = &soc15_asic_funcs;
- adev->apu_flags |= AMD_APU_IS_RENOIR;
+ if (adev->pdev->device == 0x1636)
+ adev->apu_flags |= AMD_APU_IS_RENOIR;
+ else
+ adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
+
+ if (adev->apu_flags & AMD_APU_IS_RENOIR)
+ adev->external_rev_id = adev->rev_id + 0x91;
+ else
+ adev->external_rev_id = adev->rev_id + 0xa1;
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_3D_CGCG |
@@ -1293,7 +1275,6 @@ static int soc15_common_early_init(void *handle)
AMD_PG_SUPPORT_VCN |
AMD_PG_SUPPORT_JPEG |
AMD_PG_SUPPORT_VCN_DPG;
- adev->external_rev_id = adev->rev_id + 0x91;
break;
default:
/* FIXME: not supported yet */
@@ -1449,7 +1430,8 @@ static void soc15_update_hdp_light_sleep(struct amdgpu_device *adev, bool enable
uint32_t def, data;
if (adev->asic_type == CHIP_VEGA20 ||
- adev->asic_type == CHIP_ARCTURUS) {
+ adev->asic_type == CHIP_ARCTURUS ||
+ adev->asic_type == CHIP_RENOIR) {
def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL));
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
diff --git a/drivers/gpu/drm/amd/amdgpu/ta_rap_if.h b/drivers/gpu/drm/amd/amdgpu/ta_rap_if.h
new file mode 100644
index 000000000000..f14833fae07c
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/ta_rap_if.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _TA_RAP_IF_H
+#define _TA_RAP_IF_H
+
+/* Responses have bit 31 set */
+#define RSP_ID_MASK (1U << 31)
+#define RSP_ID(cmdId) (((uint32_t)(cmdId)) | RSP_ID_MASK)
+
+enum ta_rap_status {
+ TA_RAP_STATUS__SUCCESS = 1,
+ TA_RAP_STATUS__ERROR_GENERIC_FAILURE = 2,
+ TA_RAP_STATUS__ERROR_CMD_NOT_SUPPORTED = 3,
+ TA_RAP_STATUS__ERROR_INVALID_VALIDATION_METHOD = 4,
+ TA_RAP_STATUS__ERROR_NULL_POINTER = 5,
+ TA_RAP_STATUS__ERROR_NOT_INITIALIZED = 6,
+ TA_RAP_STATUS__ERROR_VALIDATION_FAILED = 7,
+ TA_RAP_STATUS__ERROR_ASIC_NOT_SUPPORTED = 8,
+ TA_RAP_STATUS__ERROR_OPERATION_NOT_PERMISSABLE = 9,
+ TA_RAP_STATUS__ERROR_ALREADY_INIT = 10,
+};
+
+enum ta_rap_cmd {
+ TA_CMD_RAP__INITIALIZE = 1,
+ TA_CMD_RAP__VALIDATE_L0 = 2,
+};
+
+enum ta_rap_validation_method {
+ METHOD_A = 1,
+};
+
+struct ta_rap_cmd_input_data {
+ uint8_t reserved[8];
+};
+
+struct ta_rap_cmd_output_data {
+ uint32_t last_subsection;
+ uint32_t num_total_validate;
+ uint32_t num_valid;
+ uint32_t last_validate_addr;
+ uint32_t last_validate_val;
+ uint32_t last_validate_val_exptd;
+};
+
+union ta_rap_cmd_input {
+ struct ta_rap_cmd_input_data input;
+};
+
+union ta_rap_cmd_output {
+ struct ta_rap_cmd_output_data output;
+};
+
+struct ta_rap_shared_memory {
+ uint32_t cmd_id;
+ uint32_t validation_method_id;
+ uint32_t resp_id;
+ enum ta_rap_status rap_status;
+ union ta_rap_cmd_input rap_in_message;
+ union ta_rap_cmd_output rap_out_message;
+ uint8_t reserved[64];
+};
+
+#endif // #define _TA_RAP_IF_H
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
index 418cf097c918..5288617ca552 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
@@ -32,20 +32,6 @@
#define UMC_6_INST_DIST 0x40000
-/*
- * (addr / 256) * 8192, the higher 26 bits in ErrorAddr
- * is the index of 8KB block
- */
-#define ADDR_OF_8KB_BLOCK(addr) (((addr) & ~0xffULL) << 5)
-/* channel index is the index of 256B block */
-#define ADDR_OF_256B_BLOCK(channel_index) ((channel_index) << 8)
-/* offset in 256B block */
-#define OFFSET_IN_256B_BLOCK(addr) ((addr) & 0xffULL)
-
-#define LOOP_UMC_INST(umc_inst) for ((umc_inst) = 0; (umc_inst) < adev->umc.umc_inst_num; (umc_inst)++)
-#define LOOP_UMC_CH_INST(ch_inst) for ((ch_inst) = 0; (ch_inst) < adev->umc.channel_inst_num; (ch_inst)++)
-#define LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) LOOP_UMC_INST((umc_inst)) LOOP_UMC_CH_INST((ch_inst))
-
const uint32_t
umc_v6_1_channel_idx_tbl[UMC_V6_1_UMC_INSTANCE_NUM][UMC_V6_1_CHANNEL_INSTANCE_NUM] = {
{2, 18, 11, 27}, {4, 20, 13, 29},
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c
new file mode 100644
index 000000000000..5665c77a9d58
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c
@@ -0,0 +1,331 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "umc_v8_7.h"
+#include "amdgpu_ras.h"
+#include "amdgpu.h"
+
+#include "rsmu/rsmu_0_0_2_offset.h"
+#include "rsmu/rsmu_0_0_2_sh_mask.h"
+#include "umc/umc_8_7_0_offset.h"
+#include "umc/umc_8_7_0_sh_mask.h"
+
+#define UMC_8_INST_DIST 0x40000
+
+const uint32_t
+ umc_v8_7_channel_idx_tbl[UMC_V8_7_UMC_INSTANCE_NUM][UMC_V8_7_CHANNEL_INSTANCE_NUM] = {
+ {2, 11}, {4, 13},
+ {1, 8}, {7, 14},
+ {10, 3}, {12, 5},
+ {9, 0}, {15, 6}
+};
+
+static inline uint32_t get_umc_8_reg_offset(struct amdgpu_device *adev,
+ uint32_t umc_inst,
+ uint32_t ch_inst)
+{
+ return adev->umc.channel_offs*ch_inst + UMC_8_INST_DIST*umc_inst;
+}
+
+static void umc_v8_7_clear_error_count_per_channel(struct amdgpu_device *adev,
+ uint32_t umc_reg_offset)
+{
+ uint32_t ecc_err_cnt_addr;
+ uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr;
+
+ ecc_err_cnt_sel_addr =
+ SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_GeccErrCntSel);
+ ecc_err_cnt_addr =
+ SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_GeccErrCnt);
+
+ /* select the lower chip */
+ ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr +
+ umc_reg_offset) * 4);
+ ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel,
+ UMCCH0_0_GeccErrCntSel,
+ GeccErrCntCsSel, 0);
+ WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4,
+ ecc_err_cnt_sel);
+
+ /* clear lower chip error count */
+ WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4,
+ UMC_V8_7_CE_CNT_INIT);
+
+ /* select the higher chip */
+ ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr +
+ umc_reg_offset) * 4);
+ ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel,
+ UMCCH0_0_GeccErrCntSel,
+ GeccErrCntCsSel, 1);
+ WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4,
+ ecc_err_cnt_sel);
+
+ /* clear higher chip error count */
+ WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4,
+ UMC_V8_7_CE_CNT_INIT);
+}
+
+static void umc_v8_7_clear_error_count(struct amdgpu_device *adev)
+{
+ uint32_t umc_inst = 0;
+ uint32_t ch_inst = 0;
+ uint32_t umc_reg_offset = 0;
+
+ LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
+ umc_reg_offset = get_umc_8_reg_offset(adev,
+ umc_inst,
+ ch_inst);
+
+ umc_v8_7_clear_error_count_per_channel(adev,
+ umc_reg_offset);
+ }
+}
+
+static void umc_v8_7_query_correctable_error_count(struct amdgpu_device *adev,
+ uint32_t umc_reg_offset,
+ unsigned long *error_count)
+{
+ uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr;
+ uint32_t ecc_err_cnt, ecc_err_cnt_addr;
+ uint64_t mc_umc_status;
+ uint32_t mc_umc_status_addr;
+
+ /* UMC 8_7_2 registers */
+ ecc_err_cnt_sel_addr =
+ SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_GeccErrCntSel);
+ ecc_err_cnt_addr =
+ SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_GeccErrCnt);
+ mc_umc_status_addr =
+ SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0);
+
+ /* select the lower chip and check the error count */
+ ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4);
+ ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_GeccErrCntSel,
+ GeccErrCntCsSel, 0);
+ WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
+
+ ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4);
+ *error_count +=
+ (REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_GeccErrCnt, GeccErrCnt) -
+ UMC_V8_7_CE_CNT_INIT);
+
+ /* select the higher chip and check the err counter */
+ ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_GeccErrCntSel,
+ GeccErrCntCsSel, 1);
+ WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
+
+ ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4);
+ *error_count +=
+ (REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_GeccErrCnt, GeccErrCnt) -
+ UMC_V8_7_CE_CNT_INIT);
+
+ /* check for SRAM correctable error
+ MCUMC_STATUS is a 64 bit register */
+ mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
+ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 6 &&
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)
+ *error_count += 1;
+}
+
+static void umc_v8_7_querry_uncorrectable_error_count(struct amdgpu_device *adev,
+ uint32_t umc_reg_offset,
+ unsigned long *error_count)
+{
+ uint64_t mc_umc_status;
+ uint32_t mc_umc_status_addr;
+
+ mc_umc_status_addr = SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0);
+
+ /* check the MCUMC_STATUS */
+ mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
+ if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
+ (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1))
+ *error_count += 1;
+}
+
+static void umc_v8_7_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ struct ras_err_data* err_data = (struct ras_err_data*)ras_error_status;
+
+ uint32_t umc_inst = 0;
+ uint32_t ch_inst = 0;
+ uint32_t umc_reg_offset = 0;
+
+ LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
+ umc_reg_offset = get_umc_8_reg_offset(adev,
+ umc_inst,
+ ch_inst);
+
+ umc_v8_7_query_correctable_error_count(adev,
+ umc_reg_offset,
+ &(err_data->ce_count));
+ umc_v8_7_querry_uncorrectable_error_count(adev,
+ umc_reg_offset,
+ &(err_data->ue_count));
+ }
+
+ umc_v8_7_clear_error_count(adev);
+}
+
+static void umc_v8_7_query_error_address(struct amdgpu_device *adev,
+ struct ras_err_data *err_data,
+ uint32_t umc_reg_offset,
+ uint32_t ch_inst,
+ uint32_t umc_inst)
+{
+ uint32_t lsb, mc_umc_status_addr;
+ uint64_t mc_umc_status, err_addr, retired_page, mc_umc_addrt0;
+ struct eeprom_table_record *err_rec;
+ uint32_t channel_index = adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
+
+ mc_umc_status_addr =
+ SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0);
+ mc_umc_addrt0 =
+ SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_ADDRT0);
+
+ mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
+
+ if (mc_umc_status == 0)
+ return;
+
+ if (!err_data->err_addr) {
+ /* clear umc status */
+ WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
+ return;
+ }
+
+ err_rec = &err_data->err_addr[err_data->err_addr_cnt];
+
+ /* calculate error address if ue/ce error is detected */
+ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
+ (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
+
+ err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
+ /* the lowest lsb bits should be ignored */
+ lsb = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, LSB);
+ err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
+ err_addr &= ~((0x1ULL << lsb) - 1);
+
+ /* translate umc channel address to soc pa, 3 parts are included */
+ retired_page = ADDR_OF_8KB_BLOCK(err_addr) |
+ ADDR_OF_256B_BLOCK(channel_index) |
+ OFFSET_IN_256B_BLOCK(err_addr);
+
+ /* we only save ue error information currently, ce is skipped */
+ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
+ == 1) {
+ err_rec->address = err_addr;
+ /* page frame address is saved */
+ err_rec->retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
+ err_rec->ts = (uint64_t)ktime_get_real_seconds();
+ err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
+ err_rec->cu = 0;
+ err_rec->mem_channel = channel_index;
+ err_rec->mcumc_id = umc_inst;
+
+ err_data->err_addr_cnt++;
+ }
+ }
+
+ /* clear umc status */
+ WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
+}
+
+static void umc_v8_7_query_ras_error_address(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ struct ras_err_data* err_data = (struct ras_err_data*)ras_error_status;
+
+ uint32_t umc_inst = 0;
+ uint32_t ch_inst = 0;
+ uint32_t umc_reg_offset = 0;
+
+ LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
+ umc_reg_offset = get_umc_8_reg_offset(adev,
+ umc_inst,
+ ch_inst);
+
+ umc_v8_7_query_error_address(adev,
+ err_data,
+ umc_reg_offset,
+ ch_inst,
+ umc_inst);
+ }
+}
+
+static void umc_v8_7_err_cnt_init_per_channel(struct amdgpu_device *adev,
+ uint32_t umc_reg_offset)
+{
+ uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr;
+ uint32_t ecc_err_cnt_addr;
+
+ ecc_err_cnt_sel_addr =
+ SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_GeccErrCntSel);
+ ecc_err_cnt_addr =
+ SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_GeccErrCnt);
+
+ /* select the lower chip and check the error count */
+ ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4);
+ ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_GeccErrCntSel,
+ GeccErrCntCsSel, 0);
+ /* set ce error interrupt type to APIC based interrupt */
+ ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_GeccErrCntSel,
+ GeccErrInt, 0x1);
+ WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
+ /* set error count to initial value */
+ WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V8_7_CE_CNT_INIT);
+
+ /* select the higher chip and check the err counter */
+ ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_GeccErrCntSel,
+ GeccErrCntCsSel, 1);
+ WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
+ WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V8_7_CE_CNT_INIT);
+}
+
+static void umc_v8_7_err_cnt_init(struct amdgpu_device *adev)
+{
+ uint32_t umc_inst = 0;
+ uint32_t ch_inst = 0;
+ uint32_t umc_reg_offset = 0;
+
+ LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
+ umc_reg_offset = get_umc_8_reg_offset(adev,
+ umc_inst,
+ ch_inst);
+
+ umc_v8_7_err_cnt_init_per_channel(adev, umc_reg_offset);
+ }
+}
+
+const struct amdgpu_umc_funcs umc_v8_7_funcs = {
+ .err_cnt_init = umc_v8_7_err_cnt_init,
+ .ras_late_init = amdgpu_umc_ras_late_init,
+ .query_ras_error_count = umc_v8_7_query_ras_error_count,
+ .query_ras_error_address = umc_v8_7_query_ras_error_address,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.h b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.h
new file mode 100644
index 000000000000..d4d0468e3df5
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __UMC_V8_7_H__
+#define __UMC_V8_7_H__
+
+#include "soc15_common.h"
+#include "amdgpu.h"
+
+/* HBM Memory Channel Width */
+#define UMC_V8_7_HBM_MEMORY_CHANNEL_WIDTH 128
+/* number of umc channel instance with memory map register access */
+#define UMC_V8_7_CHANNEL_INSTANCE_NUM 2
+/* number of umc instance with memory map register access */
+#define UMC_V8_7_UMC_INSTANCE_NUM 8
+/* total channel instances in one umc block */
+#define UMC_V8_7_TOTAL_CHANNEL_NUM (UMC_V8_7_CHANNEL_INSTANCE_NUM * UMC_V8_7_UMC_INSTANCE_NUM)
+/* UMC regiser per channel offset */
+#define UMC_V8_7_PER_CHANNEL_OFFSET_SIENNA 0x400
+
+/* EccErrCnt max value */
+#define UMC_V8_7_CE_CNT_MAX 0xffff
+/* umc ce interrupt threshold */
+#define UMC_V8_7_CE_INT_THRESHOLD 0xffff
+/* umc ce count initial value */
+#define UMC_V8_7_CE_CNT_INIT (UMC_V8_7_CE_CNT_MAX - UMC_V8_7_CE_INT_THRESHOLD)
+
+extern const struct amdgpu_umc_funcs umc_v8_7_funcs;
+extern const uint32_t
+ umc_v8_7_channel_idx_tbl[UMC_V8_7_UMC_INSTANCE_NUM][UMC_V8_7_CHANNEL_INSTANCE_NUM];
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 3cafba726587..b0c0c438fc93 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -348,7 +348,7 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
/* Set the write pointer delay */
WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
- /* programm the 4GB memory segment for rptr and ring buffer */
+ /* program the 4GB memory segment for rptr and ring buffer */
WREG32(mmUVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) |
(0x7 << 16) | (0x1 << 31));
@@ -541,7 +541,7 @@ static void uvd_v4_2_mc_resume(struct amdgpu_device *adev)
uint64_t addr;
uint32_t size;
- /* programm the VCPU memory controller bits 0-27 */
+ /* program the VCPU memory controller bits 0-27 */
addr = (adev->uvd.inst->gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3;
size = AMDGPU_UVD_FIRMWARE_SIZE(adev) >> 3;
WREG32(mmUVD_VCPU_CACHE_OFFSET0, addr);
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index a566ff926e90..6e57001f6d0a 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -253,7 +253,7 @@ static void uvd_v5_0_mc_resume(struct amdgpu_device *adev)
uint64_t offset;
uint32_t size;
- /* programm memory controller bits 0-27 */
+ /* program memory controller bits 0-27 */
WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
lower_32_bits(adev->uvd.inst->gpu_addr));
WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
@@ -404,7 +404,7 @@ static int uvd_v5_0_start(struct amdgpu_device *adev)
/* set the wb address */
WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
- /* programm the RB_BASE for ring buffer */
+ /* program the RB_BASE for ring buffer */
WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
lower_32_bits(ring->gpu_addr));
WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 0a880bc101b8..666bfa4a0b8e 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -583,7 +583,7 @@ static void uvd_v6_0_mc_resume(struct amdgpu_device *adev)
uint64_t offset;
uint32_t size;
- /* programm memory controller bits 0-27 */
+ /* program memory controller bits 0-27 */
WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
lower_32_bits(adev->uvd.inst->gpu_addr));
WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
@@ -825,7 +825,7 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
/* set the wb address */
WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
- /* programm the RB_BASE for ring buffer */
+ /* program the RB_BASE for ring buffer */
WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
lower_32_bits(ring->gpu_addr));
WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
@@ -1240,8 +1240,8 @@ static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev,
break;
}
- if (false == int_handled)
- DRM_ERROR("Unhandled interrupt: %d %d\n",
+ if (!int_handled)
+ DRM_ERROR("Unhandled interrupt: %d %d\n",
entry->src_id, entry->src_data[0]);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index e07e3fae99b5..b44c8677ce8d 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -1073,7 +1073,7 @@ static int uvd_v7_0_start(struct amdgpu_device *adev)
WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR_ADDR,
(upper_32_bits(ring->gpu_addr) >> 2));
- /* programm the RB_BASE for ring buffer */
+ /* program the RB_BASE for ring buffer */
WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
lower_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 927c330fad21..86e1ef732ebe 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -54,6 +54,7 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
int inst_idx, struct dpg_pause_state *new_state);
static void vcn_v1_0_idle_work_handler(struct work_struct *work);
+static void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring);
/**
* vcn_v1_0_early_init - set function pointers
@@ -910,7 +911,7 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
(upper_32_bits(ring->gpu_addr) >> 2));
- /* programm the RB_BASE for ring buffer */
+ /* program the RB_BASE for ring buffer */
WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
lower_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
@@ -1068,7 +1069,7 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
(upper_32_bits(ring->gpu_addr) >> 2));
- /* programm the RB_BASE for ring buffer */
+ /* program the RB_BASE for ring buffer */
WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
lower_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
@@ -1804,11 +1805,24 @@ static void vcn_v1_0_idle_work_handler(struct work_struct *work)
}
}
-void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring)
+static void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring)
{
- struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_device *adev = ring->adev;
bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
+ mutex_lock(&adev->vcn.vcn1_jpeg1_workaround);
+
+ if (amdgpu_fence_wait_empty(&ring->adev->jpeg.inst->ring_dec))
+ DRM_ERROR("VCN dec: jpeg dec ring may not be empty\n");
+
+ vcn_v1_0_set_pg_for_begin_use(ring, set_clocks);
+
+}
+
+void vcn_v1_0_set_pg_for_begin_use(struct amdgpu_ring *ring, bool set_clocks)
+{
+ struct amdgpu_device *adev = ring->adev;
+
if (set_clocks) {
amdgpu_gfx_off_ctrl(adev, false);
if (adev->pm.dpm_enabled)
@@ -1844,6 +1858,12 @@ void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring)
}
}
+void vcn_v1_0_ring_end_use(struct amdgpu_ring *ring)
+{
+ schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
+ mutex_unlock(&ring->adev->vcn.vcn1_jpeg1_workaround);
+}
+
static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
.name = "vcn_v1_0",
.early_init = vcn_v1_0_early_init,
@@ -1891,7 +1911,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
.insert_end = vcn_v1_0_dec_ring_insert_end,
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = vcn_v1_0_ring_begin_use,
- .end_use = amdgpu_vcn_ring_end_use,
+ .end_use = vcn_v1_0_ring_end_use,
.emit_wreg = vcn_v1_0_dec_ring_emit_wreg,
.emit_reg_wait = vcn_v1_0_dec_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
@@ -1923,7 +1943,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
.insert_end = vcn_v1_0_enc_ring_insert_end,
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = vcn_v1_0_ring_begin_use,
- .end_use = amdgpu_vcn_ring_end_use,
+ .end_use = vcn_v1_0_ring_end_use,
.emit_wreg = vcn_v1_0_enc_ring_emit_wreg,
.emit_reg_wait = vcn_v1_0_enc_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h
index f67d7391fc21..1f1cc7f0ece7 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h
@@ -24,7 +24,8 @@
#ifndef __VCN_V1_0_H__
#define __VCN_V1_0_H__
-void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring);
+void vcn_v1_0_ring_end_use(struct amdgpu_ring *ring);
+void vcn_v1_0_set_pg_for_begin_use(struct amdgpu_ring *ring, bool set_clocks);
extern const struct amdgpu_ip_block_version vcn_v1_0_ip_block;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index 23a9eb5b2c8a..e5d29dee0c88 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -900,7 +900,7 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
(upper_32_bits(ring->gpu_addr) >> 2));
- /* programm the RB_BASE for ring buffer */
+ /* program the RB_BASE for ring buffer */
WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
lower_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
@@ -1060,7 +1060,7 @@ static int vcn_v2_0_start(struct amdgpu_device *adev)
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
- /* programm the RB_BASE for ring buffer */
+ /* program the RB_BASE for ring buffer */
WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
lower_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index e99bef6e2354..0f1d3ef8baa7 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -80,23 +80,18 @@ static int vcn_v2_5_early_init(void *handle)
adev->vcn.harvest_config = 0;
adev->vcn.num_enc_rings = 1;
} else {
- if (adev->asic_type == CHIP_ARCTURUS) {
- u32 harvest;
- int i;
-
- adev->vcn.num_vcn_inst = VCN25_MAX_HW_INSTANCES_ARCTURUS;
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- harvest = RREG32_SOC15(VCN, i, mmCC_UVD_HARVESTING);
- if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
- adev->vcn.harvest_config |= 1 << i;
- }
-
- if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
- AMDGPU_VCN_HARVEST_VCN1))
- /* both instances are harvested, disable the block */
- return -ENOENT;
- } else
- adev->vcn.num_vcn_inst = 1;
+ u32 harvest;
+ int i;
+ adev->vcn.num_vcn_inst = VCN25_MAX_HW_INSTANCES_ARCTURUS;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ harvest = RREG32_SOC15(VCN, i, mmCC_UVD_HARVESTING);
+ if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
+ adev->vcn.harvest_config |= 1 << i;
+ }
+ if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
+ AMDGPU_VCN_HARVEST_VCN1))
+ /* both instances are harvested, disable the block */
+ return -ENOENT;
adev->vcn.num_enc_rings = 2;
}
@@ -887,7 +882,7 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR_ADDR,
(upper_32_bits(ring->gpu_addr) >> 2));
- /* programm the RB_BASE for ring buffer */
+ /* program the RB_BASE for ring buffer */
WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
lower_32_bits(ring->gpu_addr));
WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
@@ -1067,7 +1062,7 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp);
fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
- /* programm the RB_BASE for ring buffer */
+ /* program the RB_BASE for ring buffer */
WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
lower_32_bits(ring->gpu_addr));
WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
@@ -1108,7 +1103,7 @@ static int vcn_v2_5_mmsch_start(struct amdgpu_device *adev,
{
uint32_t data = 0, loop = 0, size = 0;
uint64_t addr = table->gpu_addr;
- struct mmsch_v1_1_init_header *header = NULL;;
+ struct mmsch_v1_1_init_header *header = NULL;
header = (struct mmsch_v1_1_init_header *)table->cpu_addr;
size = header->total_size;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 63e5547cfb16..e074f7ed388c 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -198,7 +198,7 @@ static int vcn_v3_0_sw_init(void *handle)
} else {
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i;
}
- if (i != 0)
+ if (adev->asic_type == CHIP_SIENNA_CICHLID && i != 0)
ring->no_scheduler = true;
sprintf(ring->name, "vcn_dec_%d", i);
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
@@ -222,7 +222,7 @@ static int vcn_v3_0_sw_init(void *handle)
} else {
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + j + 8 * i;
}
- if (i != 1)
+ if (adev->asic_type == CHIP_SIENNA_CICHLID && i != 1)
ring->no_scheduler = true;
sprintf(ring->name, "vcn_enc_%d.%d", i, j);
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
@@ -746,18 +746,18 @@ static void vcn_v3_0_disable_clock_gating(struct amdgpu_device *adev, int inst)
| UVD_SUVD_CGC_GATE__IME_HEVC_MASK
| UVD_SUVD_CGC_GATE__EFC_MASK
| UVD_SUVD_CGC_GATE__SAOE_MASK
- | 0x08000000
+ | UVD_SUVD_CGC_GATE__SRE_AV1_MASK
| UVD_SUVD_CGC_GATE__FBC_PCLK_MASK
| UVD_SUVD_CGC_GATE__FBC_CCLK_MASK
- | 0x40000000
+ | UVD_SUVD_CGC_GATE__SCM_AV1_MASK
| UVD_SUVD_CGC_GATE__SMPA_MASK);
WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE, data);
data = RREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE2);
data |= (UVD_SUVD_CGC_GATE2__MPBE0_MASK
| UVD_SUVD_CGC_GATE2__MPBE1_MASK
- | 0x00000004
- | 0x00000008
+ | UVD_SUVD_CGC_GATE2__SIT_AV1_MASK
+ | UVD_SUVD_CGC_GATE2__SDB_AV1_MASK
| UVD_SUVD_CGC_GATE2__MPC1_MASK);
WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE2, data);
@@ -776,8 +776,8 @@ static void vcn_v3_0_disable_clock_gating(struct amdgpu_device *adev, int inst)
| UVD_SUVD_CGC_CTRL__SMPA_MODE_MASK
| UVD_SUVD_CGC_CTRL__MPBE0_MODE_MASK
| UVD_SUVD_CGC_CTRL__MPBE1_MODE_MASK
- | 0x00008000
- | 0x00010000
+ | UVD_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK
| UVD_SUVD_CGC_CTRL__MPC1_MODE_MASK
| UVD_SUVD_CGC_CTRL__FBC_PCLK_MASK
| UVD_SUVD_CGC_CTRL__FBC_CCLK_MASK);
@@ -892,8 +892,8 @@ static void vcn_v3_0_enable_clock_gating(struct amdgpu_device *adev, int inst)
| UVD_SUVD_CGC_CTRL__SMPA_MODE_MASK
| UVD_SUVD_CGC_CTRL__MPBE0_MODE_MASK
| UVD_SUVD_CGC_CTRL__MPBE1_MODE_MASK
- | 0x00008000
- | 0x00010000
+ | UVD_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK
| UVD_SUVD_CGC_CTRL__MPC1_MODE_MASK
| UVD_SUVD_CGC_CTRL__FBC_PCLK_MASK
| UVD_SUVD_CGC_CTRL__FBC_CCLK_MASK);
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index f6f2ed0830b1..9bcd0eebc6d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -752,8 +752,10 @@ static int vi_asic_reset(struct amdgpu_device *adev)
int r;
if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
+ dev_info(adev->dev, "BACO reset\n");
r = amdgpu_dpm_baco_reset(adev);
} else {
+ dev_info(adev->dev, "PCI CONFIG reset\n");
r = vi_asic_pci_config_reset(adev);
}
@@ -1066,6 +1068,10 @@ static bool vi_need_reset_on_init(struct amdgpu_device *adev)
return false;
}
+static void vi_pre_asic_init(struct amdgpu_device *adev)
+{
+}
+
static const struct amdgpu_asic_funcs vi_asic_funcs =
{
.read_disabled_bios = &vi_read_disabled_bios,
@@ -1086,6 +1092,7 @@ static const struct amdgpu_asic_funcs vi_asic_funcs =
.need_reset_on_init = &vi_need_reset_on_init,
.get_pcie_replay_count = &vi_get_pcie_replay_count,
.supports_baco = &vi_asic_supports_baco,
+ .pre_asic_init = &vi_pre_asic_init,
};
#define CZ_REV_BRISTOL(rev) \
@@ -1507,8 +1514,7 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
PP_BLOCK_SYS_MC,
pp_support_state,
pp_state);
- if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
- amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) {
@@ -1526,8 +1532,7 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
PP_BLOCK_SYS_SDMA,
pp_support_state,
pp_state);
- if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
- amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) {
@@ -1545,8 +1550,7 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
PP_BLOCK_SYS_HDP,
pp_support_state,
pp_state);
- if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
- amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
@@ -1560,8 +1564,7 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
PP_BLOCK_SYS_BIF,
PP_STATE_SUPPORT_LS,
pp_state);
- if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
- amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG) {
if (state == AMD_CG_STATE_UNGATE)
@@ -1573,8 +1576,7 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
PP_BLOCK_SYS_BIF,
PP_STATE_SUPPORT_CG,
pp_state);
- if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
- amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS) {
@@ -1588,8 +1590,7 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
PP_BLOCK_SYS_DRM,
PP_STATE_SUPPORT_LS,
pp_state);
- if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
- amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG) {
@@ -1603,8 +1604,7 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
PP_BLOCK_SYS_ROM,
PP_STATE_SUPPORT_CG,
pp_state);
- if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
- amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
return 0;
}