aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Kconfig9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aldebaran.c407
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aldebaran.h32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aldebaran_reg_init.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c196
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c73
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c184
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c555
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c72
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c193
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c101
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c189
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c143
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c136
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c146
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c423
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h105
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c98
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h85
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_test.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c498
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v2_1.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c77
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v3_6.c4
-rw-r--r--[-rwxr-xr-x]drivers/gpu/drm/amd/amdgpu/dimgrey_cavefish_reg_init.c0
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c151
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c224
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c1297
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c75
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c186
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c67
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v10_1.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c1333
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c64
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c251
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c378
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c185
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c232
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4.h28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c111
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smuio_v13_0.c121
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smuio_v13_0.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c376
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15_common.h63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_1.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_1.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_7.c281
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_7.h37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_7.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_7.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c103
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c167
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_ih.c55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c190
166 files changed, 9345 insertions, 2003 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig
index 9375e7f12420..74a8105fd2c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/Kconfig
+++ b/drivers/gpu/drm/amd/amdgpu/Kconfig
@@ -34,15 +34,6 @@ config DRM_AMDGPU_USERPTR
This option selects CONFIG_HMM and CONFIG_HMM_MIRROR if it
isn't already selected to enabled full userptr support.
-config DRM_AMDGPU_GART_DEBUGFS
- bool "Allow GART access through debugfs"
- depends on DRM_AMDGPU
- depends on DEBUG_FS
- default n
- help
- Selecting this option creates a debugfs file to inspect the mapped
- pages. Uses more memory for housekeeping, enable only for debugging.
-
source "drivers/gpu/drm/amd/acp/Kconfig"
source "drivers/gpu/drm/amd/display/Kconfig"
source "drivers/gpu/drm/amd/amdkfd/Kconfig"
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 13ebb1f71e49..ee85e8aba636 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -71,7 +71,7 @@ amdgpu-y += \
vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \
vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o navi10_reg_init.o navi14_reg_init.o \
arct_reg_init.o navi12_reg_init.o mxgpu_nv.o sienna_cichlid_reg_init.o vangogh_reg_init.o \
- nbio_v7_2.o dimgrey_cavefish_reg_init.o hdp_v4_0.o hdp_v5_0.o
+ nbio_v7_2.o dimgrey_cavefish_reg_init.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o
# add DF block
amdgpu-y += \
@@ -83,11 +83,12 @@ amdgpu-y += \
gmc_v7_0.o \
gmc_v8_0.o \
gfxhub_v1_0.o mmhub_v1_0.o gmc_v9_0.o gfxhub_v1_1.o mmhub_v9_4.o \
- gfxhub_v2_0.o mmhub_v2_0.o gmc_v10_0.o gfxhub_v2_1.o mmhub_v2_3.o
+ gfxhub_v2_0.o mmhub_v2_0.o gmc_v10_0.o gfxhub_v2_1.o mmhub_v2_3.o \
+ mmhub_v1_7.o
# add UMC block
amdgpu-y += \
- umc_v6_1.o umc_v6_0.o umc_v8_7.o
+ umc_v6_0.o umc_v6_1.o umc_v6_7.o umc_v8_7.o
# add IH block
amdgpu-y += \
@@ -106,7 +107,8 @@ amdgpu-y += \
psp_v3_1.o \
psp_v10_0.o \
psp_v11_0.o \
- psp_v12_0.o
+ psp_v12_0.o \
+ psp_v13_0.o
# add DCE block
amdgpu-y += \
@@ -121,6 +123,7 @@ amdgpu-y += \
gfx_v8_0.o \
gfx_v9_0.o \
gfx_v9_4.o \
+ gfx_v9_4_2.o \
gfx_v10_0.o
# add async DMA block
@@ -129,6 +132,7 @@ amdgpu-y += \
sdma_v2_4.o \
sdma_v3_0.o \
sdma_v4_0.o \
+ sdma_v4_4.o \
sdma_v5_0.o \
sdma_v5_2.o
@@ -172,11 +176,17 @@ amdgpu-y += \
amdgpu-y += \
smuio_v9_0.o \
smuio_v11_0.o \
- smuio_v11_0_6.o
+ smuio_v11_0_6.o \
+ smuio_v13_0.o
+
+# add reset block
+amdgpu-y += \
+ amdgpu_reset.o
# add amdkfd interfaces
amdgpu-y += amdgpu_amdkfd.o
+
ifneq ($(CONFIG_HSA_AMD),)
AMDKFD_PATH := ../amdkfd
include $(FULL_AMD_PATH)/amdkfd/Makefile
@@ -187,6 +197,7 @@ amdgpu-y += \
amdgpu_amdkfd_gfx_v8.o \
amdgpu_amdkfd_gfx_v9.o \
amdgpu_amdkfd_arcturus.o \
+ amdgpu_amdkfd_aldebaran.o \
amdgpu_amdkfd_gfx_v10.o \
amdgpu_amdkfd_gfx_v10_3.o
diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
new file mode 100644
index 000000000000..65b1dca4b02e
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
@@ -0,0 +1,407 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "aldebaran.h"
+#include "amdgpu_reset.h"
+#include "amdgpu_amdkfd.h"
+#include "amdgpu_dpm.h"
+#include "amdgpu_job.h"
+#include "amdgpu_ring.h"
+#include "amdgpu_ras.h"
+#include "amdgpu_psp.h"
+#include "amdgpu_xgmi.h"
+
+static struct amdgpu_reset_handler *
+aldebaran_get_reset_handler(struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *reset_context)
+{
+ struct amdgpu_reset_handler *handler;
+ struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+
+ if (reset_context->method != AMD_RESET_METHOD_NONE) {
+ dev_dbg(adev->dev, "Getting reset handler for method %d\n",
+ reset_context->method);
+ list_for_each_entry(handler, &reset_ctl->reset_handlers,
+ handler_list) {
+ if (handler->reset_method == reset_context->method)
+ return handler;
+ }
+ }
+
+ if (adev->gmc.xgmi.connected_to_cpu) {
+ list_for_each_entry(handler, &reset_ctl->reset_handlers,
+ handler_list) {
+ if (handler->reset_method == AMD_RESET_METHOD_MODE2) {
+ reset_context->method = AMD_RESET_METHOD_MODE2;
+ return handler;
+ }
+ }
+ }
+
+ dev_dbg(adev->dev, "Reset handler not found!\n");
+
+ return NULL;
+}
+
+static int aldebaran_mode2_suspend_ip(struct amdgpu_device *adev)
+{
+ int r, i;
+
+ amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
+ amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
+
+ for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
+ if (!(adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_GFX ||
+ adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_SDMA))
+ continue;
+
+ r = adev->ip_blocks[i].version->funcs->suspend(adev);
+
+ if (r) {
+ dev_err(adev->dev,
+ "suspend of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
+ return r;
+ }
+
+ adev->ip_blocks[i].status.hw = false;
+ }
+
+ return r;
+}
+
+static int
+aldebaran_mode2_prepare_hwcontext(struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *reset_context)
+{
+ int r = 0;
+ struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+
+ dev_dbg(adev->dev, "Aldebaran prepare hw context\n");
+ /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
+ if (!amdgpu_sriov_vf(adev))
+ r = aldebaran_mode2_suspend_ip(adev);
+
+ return r;
+}
+
+static void aldebaran_async_reset(struct work_struct *work)
+{
+ struct amdgpu_reset_handler *handler;
+ struct amdgpu_reset_control *reset_ctl =
+ container_of(work, struct amdgpu_reset_control, reset_work);
+ struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+
+ list_for_each_entry(handler, &reset_ctl->reset_handlers,
+ handler_list) {
+ if (handler->reset_method == reset_ctl->active_reset) {
+ dev_dbg(adev->dev, "Resetting device\n");
+ handler->do_reset(adev);
+ break;
+ }
+ }
+}
+
+static int aldebaran_mode2_reset(struct amdgpu_device *adev)
+{
+ /* disable BM */
+ pci_clear_master(adev->pdev);
+ adev->asic_reset_res = amdgpu_dpm_mode2_reset(adev);
+ return adev->asic_reset_res;
+}
+
+static int
+aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *reset_context)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+ struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+ int r = 0;
+
+ dev_dbg(adev->dev, "aldebaran perform hw reset\n");
+ if (reset_context->hive == NULL) {
+ /* Wrong context, return error */
+ return -EINVAL;
+ }
+
+ list_for_each_entry(tmp_adev, &reset_context->hive->device_list,
+ gmc.xgmi.head) {
+ mutex_lock(&tmp_adev->reset_cntl->reset_lock);
+ tmp_adev->reset_cntl->active_reset = AMD_RESET_METHOD_MODE2;
+ }
+ /*
+ * Mode2 reset doesn't need any sync between nodes in XGMI hive, instead launch
+ * them together so that they can be completed asynchronously on multiple nodes
+ */
+ list_for_each_entry(tmp_adev, &reset_context->hive->device_list,
+ gmc.xgmi.head) {
+ /* For XGMI run all resets in parallel to speed up the process */
+ if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
+ if (!queue_work(system_unbound_wq,
+ &tmp_adev->reset_cntl->reset_work))
+ r = -EALREADY;
+ } else
+ r = aldebaran_mode2_reset(tmp_adev);
+ if (r) {
+ dev_err(tmp_adev->dev,
+ "ASIC reset failed with error, %d for drm dev, %s",
+ r, adev_to_drm(tmp_adev)->unique);
+ break;
+ }
+ }
+
+ /* For XGMI wait for all resets to complete before proceed */
+ if (!r) {
+ list_for_each_entry(tmp_adev,
+ &reset_context->hive->device_list,
+ gmc.xgmi.head) {
+ if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
+ flush_work(&tmp_adev->reset_cntl->reset_work);
+ r = tmp_adev->asic_reset_res;
+ if (r)
+ break;
+ }
+ }
+ }
+
+ list_for_each_entry(tmp_adev, &reset_context->hive->device_list,
+ gmc.xgmi.head) {
+ mutex_unlock(&tmp_adev->reset_cntl->reset_lock);
+ tmp_adev->reset_cntl->active_reset = AMD_RESET_METHOD_NONE;
+ }
+
+ return r;
+}
+
+static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev)
+{
+ struct amdgpu_firmware_info *ucode_list[AMDGPU_UCODE_ID_MAXIMUM];
+ struct amdgpu_firmware_info *ucode;
+ struct amdgpu_ip_block *cmn_block;
+ int ucode_count = 0;
+ int i, r;
+
+ dev_dbg(adev->dev, "Reloading ucodes after reset\n");
+ for (i = 0; i < adev->firmware.max_ucodes; i++) {
+ ucode = &adev->firmware.ucode[i];
+ if (!ucode->fw)
+ continue;
+ switch (ucode->ucode_id) {
+ case AMDGPU_UCODE_ID_SDMA0:
+ case AMDGPU_UCODE_ID_SDMA1:
+ case AMDGPU_UCODE_ID_SDMA2:
+ case AMDGPU_UCODE_ID_SDMA3:
+ case AMDGPU_UCODE_ID_SDMA4:
+ case AMDGPU_UCODE_ID_SDMA5:
+ case AMDGPU_UCODE_ID_SDMA6:
+ case AMDGPU_UCODE_ID_SDMA7:
+ case AMDGPU_UCODE_ID_CP_MEC1:
+ case AMDGPU_UCODE_ID_CP_MEC1_JT:
+ case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
+ case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
+ case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
+ case AMDGPU_UCODE_ID_RLC_G:
+ ucode_list[ucode_count++] = ucode;
+ break;
+ default:
+ break;
+ };
+ }
+
+ /* Reinit NBIF block */
+ cmn_block =
+ amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_COMMON);
+ if (unlikely(!cmn_block)) {
+ dev_err(adev->dev, "Failed to get BIF handle\n");
+ return -EINVAL;
+ }
+ r = cmn_block->version->funcs->resume(adev);
+ if (r)
+ return r;
+
+ /* Reinit GFXHUB */
+ adev->gfxhub.funcs->init(adev);
+ r = adev->gfxhub.funcs->gart_enable(adev);
+ if (r) {
+ dev_err(adev->dev, "GFXHUB gart reenable failed after reset\n");
+ return r;
+ }
+
+ /* Reload GFX firmware */
+ r = psp_load_fw_list(&adev->psp, ucode_list, ucode_count);
+ if (r) {
+ dev_err(adev->dev, "GFX ucode load failed after reset\n");
+ return r;
+ }
+
+ /* Resume RLC, FW needs RLC alive to complete reset process */
+ adev->gfx.rlc.funcs->resume(adev);
+
+ /* Wait for FW reset event complete */
+ r = smu_wait_for_event(adev, SMU_EVENT_RESET_COMPLETE, 0);
+ if (r) {
+ dev_err(adev->dev,
+ "Failed to get response from firmware after reset\n");
+ return r;
+ }
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!(adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_GFX ||
+ adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_SDMA))
+ continue;
+ r = adev->ip_blocks[i].version->funcs->resume(adev);
+ if (r) {
+ dev_err(adev->dev,
+ "resume of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
+ return r;
+ }
+
+ adev->ip_blocks[i].status.hw = true;
+ }
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!(adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_GFX ||
+ adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_SDMA ||
+ adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_COMMON))
+ continue;
+
+ if (adev->ip_blocks[i].version->funcs->late_init) {
+ r = adev->ip_blocks[i].version->funcs->late_init(
+ (void *)adev);
+ if (r) {
+ dev_err(adev->dev,
+ "late_init of IP block <%s> failed %d after reset\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
+ return r;
+ }
+ }
+ adev->ip_blocks[i].status.late_initialized = true;
+ }
+
+ amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
+ amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
+
+ return r;
+}
+
+static int
+aldebaran_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *reset_context)
+{
+ int r;
+ struct amdgpu_device *tmp_adev = NULL;
+
+ if (reset_context->hive == NULL) {
+ /* Wrong context, return error */
+ return -EINVAL;
+ }
+
+ list_for_each_entry(tmp_adev, &reset_context->hive->device_list,
+ gmc.xgmi.head) {
+ dev_info(tmp_adev->dev,
+ "GPU reset succeeded, trying to resume\n");
+ r = aldebaran_mode2_restore_ip(tmp_adev);
+ if (r)
+ goto end;
+
+ /*
+ * Add this ASIC as tracked as reset was already
+ * complete successfully.
+ */
+ amdgpu_register_gpu_instance(tmp_adev);
+
+ /* Resume RAS */
+ amdgpu_ras_resume(tmp_adev);
+
+ /* Update PSP FW topology after reset */
+ if (reset_context->hive &&
+ tmp_adev->gmc.xgmi.num_physical_nodes > 1)
+ r = amdgpu_xgmi_update_topology(reset_context->hive,
+ tmp_adev);
+
+ if (!r) {
+ amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
+
+ r = amdgpu_ib_ring_tests(tmp_adev);
+ if (r) {
+ dev_err(tmp_adev->dev,
+ "ib ring test failed (%d).\n", r);
+ r = -EAGAIN;
+ tmp_adev->asic_reset_res = r;
+ goto end;
+ }
+ }
+ }
+
+end:
+ return r;
+}
+
+static struct amdgpu_reset_handler aldebaran_mode2_handler = {
+ .reset_method = AMD_RESET_METHOD_MODE2,
+ .prepare_env = NULL,
+ .prepare_hwcontext = aldebaran_mode2_prepare_hwcontext,
+ .perform_reset = aldebaran_mode2_perform_reset,
+ .restore_hwcontext = aldebaran_mode2_restore_hwcontext,
+ .restore_env = NULL,
+ .do_reset = aldebaran_mode2_reset,
+};
+
+int aldebaran_reset_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_reset_control *reset_ctl;
+
+ reset_ctl = kzalloc(sizeof(*reset_ctl), GFP_KERNEL);
+ if (!reset_ctl)
+ return -ENOMEM;
+
+ reset_ctl->handle = adev;
+ reset_ctl->async_reset = aldebaran_async_reset;
+ reset_ctl->active_reset = AMD_RESET_METHOD_NONE;
+ reset_ctl->get_reset_handler = aldebaran_get_reset_handler;
+
+ INIT_LIST_HEAD(&reset_ctl->reset_handlers);
+ INIT_WORK(&reset_ctl->reset_work, reset_ctl->async_reset);
+ /* Only mode2 is handled through reset control now */
+ amdgpu_reset_add_handler(reset_ctl, &aldebaran_mode2_handler);
+
+ adev->reset_cntl = reset_ctl;
+
+ return 0;
+}
+
+int aldebaran_reset_fini(struct amdgpu_device *adev)
+{
+ kfree(adev->reset_cntl);
+ adev->reset_cntl = NULL;
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.h b/drivers/gpu/drm/amd/amdgpu/aldebaran.h
new file mode 100644
index 000000000000..a07db5454d49
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __ALDEBARAN_H__
+#define __ALDEBARAN_H__
+
+#include "amdgpu.h"
+
+int aldebaran_reset_init(struct amdgpu_device *adev);
+int aldebaran_reset_fini(struct amdgpu_device *adev);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aldebaran_reg_init.c
new file mode 100644
index 000000000000..28e6c9ab8767
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/aldebaran_reg_init.c
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "soc15.h"
+
+#include "soc15_common.h"
+#include "aldebaran_ip_offset.h"
+
+int aldebaran_reg_base_init(struct amdgpu_device *adev)
+{
+ /* HW has more IP blocks, only initialized the block needed by our driver */
+ uint32_t i;
+ for (i = 0 ; i < MAX_INSTANCE ; ++i) {
+ adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
+ adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i]));
+ adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i]));
+ adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i]));
+ adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
+ adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i]));
+ adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i]));
+ adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i]));
+ adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i]));
+ adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(SDMA0_BASE.instance[i]));
+ adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(SDMA1_BASE.instance[i]));
+ adev->reg_offset[SDMA2_HWIP][i] = (uint32_t *)(&(SDMA2_BASE.instance[i]));
+ adev->reg_offset[SDMA3_HWIP][i] = (uint32_t *)(&(SDMA3_BASE.instance[i]));
+ adev->reg_offset[SDMA4_HWIP][i] = (uint32_t *)(&(SDMA4_BASE.instance[i]));
+ adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
+ adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
+ adev->reg_offset[UMC_HWIP][i] = (uint32_t *)(&(UMC_BASE.instance[i]));
+ adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(VCN_BASE.instance[i]));
+ }
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 29885febc0b0..dc3a69296321 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -107,7 +107,6 @@
#include "amdgpu_gfxhub.h"
#include "amdgpu_df.h"
#include "amdgpu_smuio.h"
-#include "amdgpu_hdp.h"
#define MAX_GPU_INSTANCE 16
@@ -124,6 +123,16 @@ struct amdgpu_mgpu_info
uint32_t num_gpu;
uint32_t num_dgpu;
uint32_t num_apu;
+
+ /* delayed reset_func for XGMI configuration if necessary */
+ struct delayed_work delayed_reset_work;
+ bool pending_reset;
+};
+
+struct amdgpu_watchdog_timer
+{
+ bool timeout_fatal_disable;
+ uint32_t period; /* maxCycles = (1 << period), the number of cycles before a timeout */
};
#define AMDGPU_MAX_TIMEOUT_PARAM_LENGTH 256
@@ -177,7 +186,9 @@ extern int amdgpu_compute_multipipe;
extern int amdgpu_gpu_recovery;
extern int amdgpu_emu_mode;
extern uint amdgpu_smu_memory_pool_size;
+extern int amdgpu_smu_pptable_id;
extern uint amdgpu_dc_feature_mask;
+extern uint amdgpu_freesync_vid_mode;
extern uint amdgpu_dc_debug_mask;
extern uint amdgpu_dm_abm_level;
extern int amdgpu_backlight;
@@ -185,6 +196,7 @@ extern struct amdgpu_mgpu_info mgpu_info;
extern int amdgpu_ras_enable;
extern uint amdgpu_ras_mask;
extern int amdgpu_bad_page_threshold;
+extern struct amdgpu_watchdog_timer amdgpu_watchdog_timer;
extern int amdgpu_async_gfx_ring;
extern int amdgpu_mcbp;
extern int amdgpu_discovery;
@@ -258,6 +270,8 @@ struct amdgpu_bo_va_mapping;
struct amdgpu_atif;
struct kfd_vm_fault_info;
struct amdgpu_hive_info;
+struct amdgpu_reset_context;
+struct amdgpu_reset_control;
enum amdgpu_cp_irq {
AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP = 0,
@@ -576,6 +590,7 @@ struct amdgpu_allowed_register_entry {
};
enum amd_reset_method {
+ AMD_RESET_METHOD_NONE = -1,
AMD_RESET_METHOD_LEGACY = 0,
AMD_RESET_METHOD_MODE0,
AMD_RESET_METHOD_MODE1,
@@ -584,6 +599,19 @@ enum amd_reset_method {
AMD_RESET_METHOD_PCI,
};
+struct amdgpu_video_codec_info {
+ u32 codec_type;
+ u32 max_width;
+ u32 max_height;
+ u32 max_pixels_per_frame;
+ u32 max_level;
+};
+
+struct amdgpu_video_codecs {
+ const u32 codec_count;
+ const struct amdgpu_video_codec_info *codec_array;
+};
+
/*
* ASIC specific functions.
*/
@@ -628,6 +656,9 @@ struct amdgpu_asic_funcs {
void (*pre_asic_init)(struct amdgpu_device *adev);
/* enter/exit umd stable pstate */
int (*update_umd_stable_pstate)(struct amdgpu_device *adev, bool enter);
+ /* query video codecs */
+ int (*query_video_codecs)(struct amdgpu_device *adev, bool encode,
+ const struct amdgpu_video_codecs **codecs);
};
/*
@@ -792,12 +823,7 @@ struct amdgpu_device {
bool accel_working;
struct notifier_block acpi_nb;
struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS];
- struct amdgpu_debugfs debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
- unsigned debugfs_count;
-#if defined(CONFIG_DEBUG_FS)
- struct dentry *debugfs_preempt;
- struct dentry *debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
-#endif
+ struct debugfs_blob_wrapper debugfs_vbios_blob;
struct amdgpu_atif *atif;
struct amdgpu_atcs atcs;
struct mutex srbm_mutex;
@@ -853,8 +879,6 @@ struct amdgpu_device {
spinlock_t audio_endpt_idx_lock;
amdgpu_block_rreg_t audio_endpt_rreg;
amdgpu_block_wreg_t audio_endpt_wreg;
- void __iomem *rio_mem;
- resource_size_t rio_mem_size;
struct amdgpu_doorbell doorbell;
/* clock/pll info */
@@ -897,6 +921,8 @@ struct amdgpu_device {
struct amdgpu_irq_src vupdate_irq;
struct amdgpu_irq_src pageflip_irq;
struct amdgpu_irq_src hpd_irq;
+ struct amdgpu_irq_src dmub_trace_irq;
+ struct amdgpu_irq_src dmub_outbox_irq;
/* rings */
u64 fence_context;
@@ -1020,6 +1046,7 @@ struct amdgpu_device {
int asic_reset_res;
struct work_struct xgmi_reset_work;
+ struct list_head reset_list;
long gfx_timeout;
long sdma_timeout;
@@ -1050,6 +1077,8 @@ struct amdgpu_device {
bool in_pci_err_recovery;
struct pci_saved_state *pci_state;
+
+ struct amdgpu_reset_control *reset_cntl;
};
static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)
@@ -1062,7 +1091,7 @@ static inline struct drm_device *adev_to_drm(struct amdgpu_device *adev)
return &adev->ddev;
}
-static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
+static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_device *bdev)
{
return container_of(bdev, struct amdgpu_device, mman.bdev);
}
@@ -1084,9 +1113,6 @@ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value);
uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset);
-u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg);
-void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v);
-
u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
u32 pcie_index, u32 pcie_data,
u32 reg_addr);
@@ -1103,6 +1129,12 @@ void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type);
bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
+int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
+ struct amdgpu_reset_context *reset_context);
+
+int amdgpu_do_asic_reset(struct list_head *device_list_handle,
+ struct amdgpu_reset_context *reset_context);
+
int emu_soc_asic_init(struct amdgpu_device *adev);
/*
@@ -1168,8 +1200,6 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
} while (0)
#define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_device_rreg((adev), (reg), false))
-#define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg))
-#define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v))
#define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
#define REG_FIELD_MASK(reg, field) reg##__##field##_MASK
@@ -1223,6 +1253,7 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
#define amdgpu_asic_pre_asic_init(adev) (adev)->asic_funcs->pre_asic_init((adev))
#define amdgpu_asic_update_umd_stable_pstate(adev, enter) \
((adev)->asic_funcs->update_umd_stable_pstate ? (adev)->asic_funcs->update_umd_stable_pstate((adev), (enter)) : 0)
+#define amdgpu_asic_query_video_codecs(adev, e, c) (adev)->asic_funcs->query_video_codecs((adev), (e), (c))
#define amdgpu_inc_vram_lost(adev) atomic_inc(&((adev)->vram_lost_counter));
@@ -1242,7 +1273,9 @@ void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
const u32 *registers,
const u32 array_size);
+int amdgpu_device_mode1_reset(struct amdgpu_device *adev);
bool amdgpu_device_supports_atpx(struct drm_device *dev);
+bool amdgpu_device_supports_px(struct drm_device *dev);
bool amdgpu_device_supports_boco(struct drm_device *dev);
bool amdgpu_device_supports_baco(struct drm_device *dev);
bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
@@ -1356,6 +1389,13 @@ void amdgpu_pci_resume(struct pci_dev *pdev);
bool amdgpu_device_cache_pci_state(struct pci_dev *pdev);
bool amdgpu_device_load_pci_state(struct pci_dev *pdev);
+bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev);
+
+int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
+ enum amd_clockgating_state state);
+int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
+ enum amd_powergating_state state);
+
#include "amdgpu_object.h"
static inline bool amdgpu_is_tmz(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index c5343a5eecbe..5f6696a3c778 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -44,7 +44,7 @@ int amdgpu_amdkfd_init(void)
int ret;
si_meminfo(&si);
- amdgpu_amdkfd_total_mem_size = si.totalram - si.totalhigh;
+ amdgpu_amdkfd_total_mem_size = si.freeram - si.freehigh;
amdgpu_amdkfd_total_mem_size *= si.mem_unit;
ret = kgd2kfd_init();
@@ -165,7 +165,8 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
adev->doorbell_index.last_non_cp;
}
- kgd2kfd_device_init(adev->kfd.dev, adev_to_drm(adev), &gpu_resources);
+ adev->kfd.init_complete = kgd2kfd_device_init(adev->kfd.dev,
+ adev_to_drm(adev), &gpu_resources);
}
}
@@ -245,6 +246,7 @@ int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
bp.type = ttm_bo_type_kernel;
bp.resv = NULL;
+ bp.bo_ptr_size = sizeof(struct amdgpu_bo);
if (cp_mqd_gfx9)
bp.flags |= AMDGPU_GEM_CREATE_CP_MQD_GFX9;
@@ -316,6 +318,7 @@ int amdgpu_amdkfd_alloc_gws(struct kgd_dev *kgd, size_t size,
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
struct amdgpu_bo *bo = NULL;
+ struct amdgpu_bo_user *ubo;
struct amdgpu_bo_param bp;
int r;
@@ -326,14 +329,16 @@ int amdgpu_amdkfd_alloc_gws(struct kgd_dev *kgd, size_t size,
bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
bp.type = ttm_bo_type_device;
bp.resv = NULL;
+ bp.bo_ptr_size = sizeof(struct amdgpu_bo);
- r = amdgpu_bo_create(adev, &bp, &bo);
+ r = amdgpu_bo_create_user(adev, &bp, &ubo);
if (r) {
dev_err(adev->dev,
"failed to allocate gws BO for amdkfd (%d)\n", r);
return r;
}
+ bo = &ubo->bo;
*mem_obj = bo;
return 0;
}
@@ -494,8 +499,6 @@ int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
*dma_buf_kgd = (struct kgd_dev *)adev;
if (bo_size)
*bo_size = amdgpu_bo_size(bo);
- if (metadata_size)
- *metadata_size = bo->metadata_size;
if (metadata_buffer)
r = amdgpu_bo_get_metadata(bo, metadata_buffer, buffer_size,
metadata_size, &metadata_flags);
@@ -638,13 +641,6 @@ void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
- /* Temp workaround to fix the soft hang observed in certain compute
- * applications if GFXOFF is enabled.
- */
- if (adev->asic_type == CHIP_SIENNA_CICHLID) {
- pr_debug("GFXOFF is %s\n", idle ? "enabled" : "disabled");
- amdgpu_gfx_off_ctrl(adev, idle);
- }
amdgpu_dpm_switch_power_profile(adev,
PP_SMC_POWER_PROFILE_COMPUTE,
!idle);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index a81d9cacf9b8..14f68c028126 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -80,6 +80,7 @@ struct amdgpu_amdkfd_fence {
struct amdgpu_kfd_dev {
struct kfd_dev *dev;
uint64_t vram_used;
+ bool init_complete;
};
enum kgd_engine_type {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c
new file mode 100644
index 000000000000..a5434b713856
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "amdgpu.h"
+#include "amdgpu_amdkfd.h"
+#include "amdgpu_amdkfd_arcturus.h"
+#include "amdgpu_amdkfd_gfx_v9.h"
+
+const struct kfd2kgd_calls aldebaran_kfd2kgd = {
+ .program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
+ .set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
+ .init_interrupts = kgd_gfx_v9_init_interrupts,
+ .hqd_load = kgd_gfx_v9_hqd_load,
+ .hiq_mqd_load = kgd_gfx_v9_hiq_mqd_load,
+ .hqd_sdma_load = kgd_arcturus_hqd_sdma_load,
+ .hqd_dump = kgd_gfx_v9_hqd_dump,
+ .hqd_sdma_dump = kgd_arcturus_hqd_sdma_dump,
+ .hqd_is_occupied = kgd_gfx_v9_hqd_is_occupied,
+ .hqd_sdma_is_occupied = kgd_arcturus_hqd_sdma_is_occupied,
+ .hqd_destroy = kgd_gfx_v9_hqd_destroy,
+ .hqd_sdma_destroy = kgd_arcturus_hqd_sdma_destroy,
+ .address_watch_disable = kgd_gfx_v9_address_watch_disable,
+ .address_watch_execute = kgd_gfx_v9_address_watch_execute,
+ .wave_control_execute = kgd_gfx_v9_wave_control_execute,
+ .address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset,
+ .get_atc_vmid_pasid_mapping_info =
+ kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
+ .set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
index 604757a1e440..9ef9f3ddad48 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
@@ -122,7 +122,7 @@ static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
return sdma_rlc_reg_offset;
}
-static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
+int kgd_arcturus_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
uint32_t __user *wptr, struct mm_struct *mm)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
@@ -192,7 +192,7 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
return 0;
}
-static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
+int kgd_arcturus_hqd_sdma_dump(struct kgd_dev *kgd,
uint32_t engine_id, uint32_t queue_id,
uint32_t (**dump)[2], uint32_t *n_regs)
{
@@ -224,7 +224,7 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
return 0;
}
-static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
+bool kgd_arcturus_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v9_sdma_mqd *m;
@@ -243,7 +243,7 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
return false;
}
-static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
+int kgd_arcturus_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
unsigned int utimeout)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
@@ -289,13 +289,13 @@ const struct kfd2kgd_calls arcturus_kfd2kgd = {
.init_interrupts = kgd_gfx_v9_init_interrupts,
.hqd_load = kgd_gfx_v9_hqd_load,
.hiq_mqd_load = kgd_gfx_v9_hiq_mqd_load,
- .hqd_sdma_load = kgd_hqd_sdma_load,
+ .hqd_sdma_load = kgd_arcturus_hqd_sdma_load,
.hqd_dump = kgd_gfx_v9_hqd_dump,
- .hqd_sdma_dump = kgd_hqd_sdma_dump,
+ .hqd_sdma_dump = kgd_arcturus_hqd_sdma_dump,
.hqd_is_occupied = kgd_gfx_v9_hqd_is_occupied,
- .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
+ .hqd_sdma_is_occupied = kgd_arcturus_hqd_sdma_is_occupied,
.hqd_destroy = kgd_gfx_v9_hqd_destroy,
- .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
+ .hqd_sdma_destroy = kgd_arcturus_hqd_sdma_destroy,
.address_watch_disable = kgd_gfx_v9_address_watch_disable,
.address_watch_execute = kgd_gfx_v9_address_watch_execute,
.wave_control_execute = kgd_gfx_v9_wave_control_execute,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.h
new file mode 100644
index 000000000000..ce08131b7b5f
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+int kgd_arcturus_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
+ uint32_t __user *wptr, struct mm_struct *mm);
+int kgd_arcturus_hqd_sdma_dump(struct kgd_dev *kgd,
+ uint32_t engine_id, uint32_t queue_id,
+ uint32_t (**dump)[2], uint32_t *n_regs);
+bool kgd_arcturus_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
+int kgd_arcturus_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
+ unsigned int utimeout);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
index 3107b9575929..5af464933976 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
@@ -40,13 +40,13 @@ static atomic_t fence_seq = ATOMIC_INIT(0);
* All the BOs in a process share an eviction fence. When process X wants
* to map VRAM memory but TTM can't find enough space, TTM will attempt to
* evict BOs from its LRU list. TTM checks if the BO is valuable to evict
- * by calling ttm_bo_driver->eviction_valuable().
+ * by calling ttm_device_funcs->eviction_valuable().
*
- * ttm_bo_driver->eviction_valuable() - will return false if the BO belongs
+ * ttm_device_funcs->eviction_valuable() - will return false if the BO belongs
* to process X. Otherwise, it will return true to indicate BO can be
* evicted by TTM.
*
- * If ttm_bo_driver->eviction_valuable returns true, then TTM will continue
+ * If ttm_device_funcs->eviction_valuable returns true, then TTM will continue
* the evcition process for that BO by calling ttm_bo_evict --> amdgpu_bo_move
* --> amdgpu_copy_buffer(). This sets up job in GPU scheduler.
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index ac0a432a9bf7..e93850f2f3b1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -31,6 +31,7 @@
#include "amdgpu_amdkfd.h"
#include "amdgpu_dma_buf.h"
#include <uapi/linux/kfd_ioctl.h>
+#include "amdgpu_xgmi.h"
/* BO flag to indicate a KFD userptr BO */
#define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63)
@@ -96,7 +97,7 @@ void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
uint64_t mem;
si_meminfo(&si);
- mem = si.totalram - si.totalhigh;
+ mem = si.freeram - si.freehigh;
mem *= si.mem_unit;
spin_lock_init(&kfd_mem_limit.mem_limit_lock);
@@ -119,6 +120,16 @@ void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
*/
#define ESTIMATE_PT_SIZE(mem_size) ((mem_size) >> 14)
+static size_t amdgpu_amdkfd_acc_size(uint64_t size)
+{
+ size >>= PAGE_SHIFT;
+ size *= sizeof(dma_addr_t) + sizeof(void *);
+
+ return __roundup_pow_of_two(sizeof(struct amdgpu_bo)) +
+ __roundup_pow_of_two(sizeof(struct ttm_tt)) +
+ PAGE_ALIGN(size);
+}
+
static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
uint64_t size, u32 domain, bool sg)
{
@@ -127,8 +138,7 @@ static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
int ret = 0;
- acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
- sizeof(struct amdgpu_bo));
+ acc_size = amdgpu_amdkfd_acc_size(size);
vram_needed = 0;
if (domain == AMDGPU_GEM_DOMAIN_GTT) {
@@ -175,8 +185,7 @@ static void unreserve_mem_limit(struct amdgpu_device *adev,
{
size_t acc_size;
- acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
- sizeof(struct amdgpu_bo));
+ acc_size = amdgpu_amdkfd_acc_size(size);
spin_lock(&kfd_mem_limit.mem_limit_lock);
if (domain == AMDGPU_GEM_DOMAIN_GTT) {
@@ -404,7 +413,10 @@ static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
{
struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
bool coherent = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT;
+ bool uncached = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED;
uint32_t mapping_flags;
+ uint64_t pte_flags;
+ bool snoop = false;
mapping_flags = AMDGPU_VM_PAGE_READABLE;
if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE)
@@ -425,12 +437,41 @@ static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
}
break;
+ case CHIP_ALDEBARAN:
+ if (coherent && uncached) {
+ if (adev->gmc.xgmi.connected_to_cpu ||
+ !(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM))
+ snoop = true;
+ mapping_flags |= AMDGPU_VM_MTYPE_UC;
+ } else if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
+ if (bo_adev == adev) {
+ mapping_flags |= AMDGPU_VM_MTYPE_RW;
+ if (adev->gmc.xgmi.connected_to_cpu)
+ snoop = true;
+ } else {
+ mapping_flags |= AMDGPU_VM_MTYPE_NC;
+ if (amdgpu_xgmi_same_hive(adev, bo_adev))
+ snoop = true;
+ }
+ } else {
+ snoop = true;
+ if (adev->gmc.xgmi.connected_to_cpu)
+ /* system memory uses NC on A+A */
+ mapping_flags |= AMDGPU_VM_MTYPE_NC;
+ else
+ mapping_flags |= coherent ?
+ AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
+ }
+ break;
default:
mapping_flags |= coherent ?
AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
}
- return amdgpu_gem_va_map_flags(adev, mapping_flags);
+ pte_flags = amdgpu_gem_va_map_flags(adev, mapping_flags);
+ pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
+
+ return pte_flags;
}
/* add_bo_to_vm - Add a BO to a VM
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 86add0f4ea4d..494b2e1717d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -1232,157 +1232,6 @@ int amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(struct amdgpu_device *
return amdgpu_atombios_get_max_vddc(adev, VOLTAGE_TYPE_VDDC, leakage_idx, voltage);
}
-int amdgpu_atombios_get_leakage_id_from_vbios(struct amdgpu_device *adev,
- u16 *leakage_id)
-{
- union set_voltage args;
- int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
- u8 frev, crev;
-
- if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
- return -EINVAL;
-
- switch (crev) {
- case 3:
- case 4:
- args.v3.ucVoltageType = 0;
- args.v3.ucVoltageMode = ATOM_GET_LEAKAGE_ID;
- args.v3.usVoltageLevel = 0;
-
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
-
- *leakage_id = le16_to_cpu(args.v3.usVoltageLevel);
- break;
- default:
- DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
- return -EINVAL;
- }
-
- return 0;
-}
-
-int amdgpu_atombios_get_leakage_vddc_based_on_leakage_params(struct amdgpu_device *adev,
- u16 *vddc, u16 *vddci,
- u16 virtual_voltage_id,
- u16 vbios_voltage_id)
-{
- int index = GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo);
- u8 frev, crev;
- u16 data_offset, size;
- int i, j;
- ATOM_ASIC_PROFILING_INFO_V2_1 *profile;
- u16 *leakage_bin, *vddc_id_buf, *vddc_buf, *vddci_id_buf, *vddci_buf;
-
- *vddc = 0;
- *vddci = 0;
-
- if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size,
- &frev, &crev, &data_offset))
- return -EINVAL;
-
- profile = (ATOM_ASIC_PROFILING_INFO_V2_1 *)
- (adev->mode_info.atom_context->bios + data_offset);
-
- switch (frev) {
- case 1:
- return -EINVAL;
- case 2:
- switch (crev) {
- case 1:
- if (size < sizeof(ATOM_ASIC_PROFILING_INFO_V2_1))
- return -EINVAL;
- leakage_bin = (u16 *)
- (adev->mode_info.atom_context->bios + data_offset +
- le16_to_cpu(profile->usLeakageBinArrayOffset));
- vddc_id_buf = (u16 *)
- (adev->mode_info.atom_context->bios + data_offset +
- le16_to_cpu(profile->usElbVDDC_IdArrayOffset));
- vddc_buf = (u16 *)
- (adev->mode_info.atom_context->bios + data_offset +
- le16_to_cpu(profile->usElbVDDC_LevelArrayOffset));
- vddci_id_buf = (u16 *)
- (adev->mode_info.atom_context->bios + data_offset +
- le16_to_cpu(profile->usElbVDDCI_IdArrayOffset));
- vddci_buf = (u16 *)
- (adev->mode_info.atom_context->bios + data_offset +
- le16_to_cpu(profile->usElbVDDCI_LevelArrayOffset));
-
- if (profile->ucElbVDDC_Num > 0) {
- for (i = 0; i < profile->ucElbVDDC_Num; i++) {
- if (vddc_id_buf[i] == virtual_voltage_id) {
- for (j = 0; j < profile->ucLeakageBinNum; j++) {
- if (vbios_voltage_id <= leakage_bin[j]) {
- *vddc = vddc_buf[j * profile->ucElbVDDC_Num + i];
- break;
- }
- }
- break;
- }
- }
- }
- if (profile->ucElbVDDCI_Num > 0) {
- for (i = 0; i < profile->ucElbVDDCI_Num; i++) {
- if (vddci_id_buf[i] == virtual_voltage_id) {
- for (j = 0; j < profile->ucLeakageBinNum; j++) {
- if (vbios_voltage_id <= leakage_bin[j]) {
- *vddci = vddci_buf[j * profile->ucElbVDDCI_Num + i];
- break;
- }
- }
- break;
- }
- }
- }
- break;
- default:
- DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
- return -EINVAL;
- }
- break;
- default:
- DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
- return -EINVAL;
- }
-
- return 0;
-}
-
-union get_voltage_info {
- struct _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 in;
- struct _GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 evv_out;
-};
-
-int amdgpu_atombios_get_voltage_evv(struct amdgpu_device *adev,
- u16 virtual_voltage_id,
- u16 *voltage)
-{
- int index = GetIndexIntoMasterTable(COMMAND, GetVoltageInfo);
- u32 entry_id;
- u32 count = adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count;
- union get_voltage_info args;
-
- for (entry_id = 0; entry_id < count; entry_id++) {
- if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].v ==
- virtual_voltage_id)
- break;
- }
-
- if (entry_id >= count)
- return -EINVAL;
-
- args.in.ucVoltageType = VOLTAGE_TYPE_VDDC;
- args.in.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
- args.in.usVoltageLevel = cpu_to_le16(virtual_voltage_id);
- args.in.ulSCLKFreq =
- cpu_to_le32(adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].clk);
-
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
-
- *voltage = le16_to_cpu(args.evv_out.usVoltageLevel);
-
- return 0;
-}
-
union voltage_object_info {
struct _ATOM_VOLTAGE_OBJECT_INFO v1;
struct _ATOM_VOLTAGE_OBJECT_INFO_V2 v2;
@@ -1905,40 +1754,6 @@ static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
return r;
}
-/**
- * cail_ioreg_write - write IO register
- *
- * @info: atom card_info pointer
- * @reg: IO register offset
- * @val: value to write to the pll register
- *
- * Provides a IO register accessor for the atom interpreter (r4xx+).
- */
-static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
-{
- struct amdgpu_device *adev = drm_to_adev(info->dev);
-
- WREG32_IO(reg, val);
-}
-
-/**
- * cail_ioreg_read - read IO register
- *
- * @info: atom card_info pointer
- * @reg: IO register offset
- *
- * Provides an IO register accessor for the atom interpreter (r4xx+).
- * Returns the value of the IO register.
- */
-static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
-{
- struct amdgpu_device *adev = drm_to_adev(info->dev);
- uint32_t r;
-
- r = RREG32_IO(reg);
- return r;
-}
-
static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -1947,7 +1762,7 @@ static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev,
struct amdgpu_device *adev = drm_to_adev(ddev);
struct atom_context *ctx = adev->mode_info.atom_context;
- return snprintf(buf, PAGE_SIZE, "%s\n", ctx->vbios_version);
+ return sysfs_emit(buf, "%s\n", ctx->vbios_version);
}
static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version,
@@ -1998,15 +1813,6 @@ int amdgpu_atombios_init(struct amdgpu_device *adev)
atom_card_info->dev = adev_to_drm(adev);
atom_card_info->reg_read = cail_reg_read;
atom_card_info->reg_write = cail_reg_write;
- /* needed for iio ops */
- if (adev->rio_mem) {
- atom_card_info->ioreg_read = cail_ioreg_read;
- atom_card_info->ioreg_write = cail_ioreg_write;
- } else {
- DRM_DEBUG("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n");
- atom_card_info->ioreg_read = cail_reg_read;
- atom_card_info->ioreg_write = cail_reg_write;
- }
atom_card_info->mc_read = cail_mc_read;
atom_card_info->mc_write = cail_mc_write;
atom_card_info->pll_read = cail_pll_read;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
index 1321ec09c734..8cc0222dba19 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
@@ -168,18 +168,6 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev,
void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
u32 eng_clock, u32 mem_clock);
-int amdgpu_atombios_get_leakage_id_from_vbios(struct amdgpu_device *adev,
- u16 *leakage_id);
-
-int amdgpu_atombios_get_leakage_vddc_based_on_leakage_params(struct amdgpu_device *adev,
- u16 *vddc, u16 *vddci,
- u16 virtual_voltage_id,
- u16 vbios_voltage_id);
-
-int amdgpu_atombios_get_voltage_evv(struct amdgpu_device *adev,
- u16 virtual_voltage_id,
- u16 *voltage);
-
bool
amdgpu_atombios_is_voltage_gpio(struct amdgpu_device *adev,
u8 voltage_type, u8 voltage_mode);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index 6107ac91db25..60716b35444b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -117,12 +117,15 @@ union igp_info {
union umc_info {
struct atom_umc_info_v3_1 v31;
+ struct atom_umc_info_v3_2 v32;
+ struct atom_umc_info_v3_3 v33;
};
union vram_info {
struct atom_vram_info_header_v2_3 v23;
struct atom_vram_info_header_v2_4 v24;
struct atom_vram_info_header_v2_5 v25;
+ struct atom_vram_info_header_v2_6 v26;
};
union vram_module {
@@ -164,6 +167,7 @@ static int convert_atom_mem_type_to_vram_type(struct amdgpu_device *adev,
vram_type = AMDGPU_VRAM_TYPE_GDDR5;
break;
case ATOM_DGPU_VRAM_TYPE_HBM2:
+ case ATOM_DGPU_VRAM_TYPE_HBM2E:
vram_type = AMDGPU_VRAM_TYPE_HBM;
break;
case ATOM_DGPU_VRAM_TYPE_GDDR6:
@@ -315,6 +319,26 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
if (vram_vendor)
*vram_vendor = mem_vendor;
break;
+ case 6:
+ if (module_id > vram_info->v26.vram_module_num)
+ module_id = 0;
+ vram_module = (union vram_module *)vram_info->v26.vram_module;
+ while (i < module_id) {
+ vram_module = (union vram_module *)
+ ((u8 *)vram_module + vram_module->v9.vram_module_size);
+ i++;
+ }
+ mem_type = vram_module->v9.memory_type;
+ if (vram_type)
+ *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ mem_channel_number = vram_module->v9.channel_num;
+ mem_channel_width = vram_module->v9.channel_width;
+ if (vram_width)
+ *vram_width = mem_channel_number * (1 << mem_channel_width);
+ mem_vendor = (vram_module->v9.vender_rev_id) & 0xF;
+ if (vram_vendor)
+ *vram_vendor = mem_vendor;
+ break;
default:
return -EINVAL;
}
@@ -337,19 +361,39 @@ bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev)
union umc_info *umc_info;
u8 frev, crev;
bool ecc_default_enabled = false;
+ u8 umc_config;
+ u32 umc_config1;
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
umc_info);
if (amdgpu_atom_parse_data_header(mode_info->atom_context,
index, &size, &frev, &crev, &data_offset)) {
- /* support umc_info 3.1+ */
- if ((frev == 3 && crev >= 1) || (frev > 3)) {
+ if (frev == 3) {
umc_info = (union umc_info *)
(mode_info->atom_context->bios + data_offset);
- ecc_default_enabled =
- (le32_to_cpu(umc_info->v31.umc_config) &
- UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
+ switch (crev) {
+ case 1:
+ umc_config = le32_to_cpu(umc_info->v31.umc_config);
+ ecc_default_enabled =
+ (umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
+ break;
+ case 2:
+ umc_config = le32_to_cpu(umc_info->v32.umc_config);
+ ecc_default_enabled =
+ (umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
+ break;
+ case 3:
+ umc_config = le32_to_cpu(umc_info->v33.umc_config);
+ umc_config1 = le32_to_cpu(umc_info->v33.umc_config1);
+ ecc_default_enabled =
+ ((umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ||
+ (umc_config1 & UMC_CONFIG1__ENABLE_ECC_CAPABLE)) ? true : false;
+ break;
+ default:
+ /* unsupported crev */
+ return false;
+ }
}
}
@@ -479,7 +523,8 @@ int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev)
}
union gfx_info {
- struct atom_gfx_info_v2_4 v24;
+ struct atom_gfx_info_v2_4 v24;
+ struct atom_gfx_info_v2_7 v27;
};
int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev)
@@ -514,6 +559,22 @@ int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev)
adev->gfx.cu_info.max_scratch_slots_per_cu = gfx_info->v24.gc_max_scratch_slots_per_cu;
adev->gfx.cu_info.lds_size = le16_to_cpu(gfx_info->v24.gc_lds_size);
return 0;
+ case 7:
+ adev->gfx.config.max_shader_engines = gfx_info->v27.max_shader_engines;
+ adev->gfx.config.max_cu_per_sh = gfx_info->v27.max_cu_per_sh;
+ adev->gfx.config.max_sh_per_se = gfx_info->v27.max_sh_per_se;
+ adev->gfx.config.max_backends_per_se = gfx_info->v27.max_backends_per_se;
+ adev->gfx.config.max_texture_channel_caches = gfx_info->v27.max_texture_channel_caches;
+ adev->gfx.config.max_gprs = le16_to_cpu(gfx_info->v27.gc_num_gprs);
+ adev->gfx.config.max_gs_threads = gfx_info->v27.gc_num_max_gs_thds;
+ adev->gfx.config.gs_vgt_table_depth = gfx_info->v27.gc_gs_table_depth;
+ adev->gfx.config.gs_prim_buffer_depth = le16_to_cpu(gfx_info->v27.gc_gsprim_buff_depth);
+ adev->gfx.config.double_offchip_lds_buf = gfx_info->v27.gc_double_offchip_lds_buffer;
+ adev->gfx.cu_info.wave_front_size = le16_to_cpu(gfx_info->v27.gc_wave_size);
+ adev->gfx.cu_info.max_waves_per_simd = le16_to_cpu(gfx_info->v27.gc_max_waves_per_simd);
+ adev->gfx.cu_info.max_scratch_slots_per_cu = gfx_info->v27.gc_max_scratch_slots_per_cu;
+ adev->gfx.cu_info.lds_size = le16_to_cpu(gfx_info->v27.gc_lds_size);
+ return 0;
default:
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index d9b35df33806..313517f7cf10 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -85,6 +85,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
bp.flags = 0;
bp.type = ttm_bo_type_kernel;
bp.resv = NULL;
+ bp.bo_ptr_size = sizeof(struct amdgpu_bo);
+
n = AMDGPU_BENCHMARK_ITERATIONS;
r = amdgpu_bo_create(adev, &bp, &sobj);
if (r) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index cfb1a9a04477..27b19503773b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -97,6 +97,10 @@ static bool igp_read_bios_from_vram(struct amdgpu_device *adev)
if (amdgpu_device_need_post(adev))
return false;
+ /* FB BAR not enabled */
+ if (pci_resource_len(adev->pdev, 0) == 0)
+ return false;
+
adev->bios = NULL;
vram_base = pci_resource_start(adev->pdev, 0);
bios = ioremap_wc(vram_base, size);
@@ -316,7 +320,7 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
adev->bios = kmalloc(size, GFP_KERNEL);
if (!adev->bios) {
- DRM_ERROR("Unable to allocate bios\n");
+ dev_err(adev->dev, "Unable to allocate bios\n");
return false;
}
@@ -364,7 +368,7 @@ static bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev)
return false;
tbl_size = hdr->length;
if (tbl_size < sizeof(UEFI_ACPI_VFCT)) {
- DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n");
+ dev_info(adev->dev, "ACPI VFCT table present but broken (too short #1),skipping\n");
return false;
}
@@ -377,13 +381,13 @@ static bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev)
offset += sizeof(VFCT_IMAGE_HEADER);
if (offset > tbl_size) {
- DRM_ERROR("ACPI VFCT image header truncated\n");
+ dev_info(adev->dev, "ACPI VFCT image header truncated,skipping\n");
return false;
}
offset += vhdr->ImageLength;
if (offset > tbl_size) {
- DRM_ERROR("ACPI VFCT image truncated\n");
+ dev_info(adev->dev, "ACPI VFCT image truncated,skipping\n");
return false;
}
@@ -406,7 +410,7 @@ static bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev)
}
}
- DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n");
+ dev_info(adev->dev, "ACPI VFCT table present but broken (too short #2),skipping\n");
return false;
}
#else
@@ -453,7 +457,7 @@ bool amdgpu_get_bios(struct amdgpu_device *adev)
goto success;
}
- DRM_ERROR("Unable to locate a BIOS ROM\n");
+ dev_err(adev->dev, "Unable to locate a BIOS ROM\n");
return false;
success:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 3e240b952e79..b5c766998045 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -117,7 +117,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
if (cs->in.num_chunks == 0)
return 0;
- chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
+ chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
if (!chunk_array)
return -ENOMEM;
@@ -144,7 +144,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
}
p->nchunks = cs->in.num_chunks;
- p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
+ p->chunks = kvmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
GFP_KERNEL);
if (!p->chunks) {
ret = -ENOMEM;
@@ -238,7 +238,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
if (p->uf_entry.tv.bo)
p->job->uf_addr = uf_offset;
- kfree(chunk_array);
+ kvfree(chunk_array);
/* Use this opportunity to fill in task info for the vm */
amdgpu_vm_set_task_info(vm);
@@ -250,11 +250,11 @@ free_all_kdata:
free_partial_kdata:
for (; i >= 0; i--)
kvfree(p->chunks[i].kdata);
- kfree(p->chunks);
+ kvfree(p->chunks);
p->chunks = NULL;
p->nchunks = 0;
free_chunk:
- kfree(chunk_array);
+ kvfree(chunk_array);
return ret;
}
@@ -559,7 +559,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
sizeof(struct page *),
GFP_KERNEL | __GFP_ZERO);
if (!e->user_pages) {
- DRM_ERROR("calloc failure\n");
+ DRM_ERROR("kvmalloc_array failure\n");
return -ENOMEM;
}
@@ -706,7 +706,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
for (i = 0; i < parser->nchunks; i++)
kvfree(parser->chunks[i].kdata);
- kfree(parser->chunks);
+ kvfree(parser->chunks);
if (parser->job)
amdgpu_job_free(parser->job);
if (parser->uf_entry.tv.bo) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 43059ead733b..bcaf271b39bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -28,7 +28,6 @@
#include <linux/uaccess.h>
#include <linux/pm_runtime.h>
#include <linux/poll.h>
-#include <drm/drm_debugfs.h>
#include "amdgpu.h"
#include "amdgpu_pm.h"
@@ -38,45 +37,6 @@
#include "amdgpu_securedisplay.h"
#include "amdgpu_fw_attestation.h"
-/**
- * amdgpu_debugfs_add_files - Add simple debugfs entries
- *
- * @adev: Device to attach debugfs entries to
- * @files: Array of function callbacks that respond to reads
- * @nfiles: Number of callbacks to register
- *
- */
-int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
- const struct drm_info_list *files,
- unsigned nfiles)
-{
- unsigned i;
-
- for (i = 0; i < adev->debugfs_count; i++) {
- if (adev->debugfs[i].files == files) {
- /* Already registered */
- return 0;
- }
- }
-
- i = adev->debugfs_count + 1;
- if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
- DRM_ERROR("Reached maximum number of debugfs components.\n");
- DRM_ERROR("Report so we increase "
- "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
- return -EINVAL;
- }
- adev->debugfs[adev->debugfs_count].files = files;
- adev->debugfs[adev->debugfs_count].num_files = nfiles;
- adev->debugfs_count = i;
-#if defined(CONFIG_DEBUG_FS)
- drm_debugfs_create_files(files, nfiles,
- adev_to_drm(adev)->primary->debugfs_root,
- adev_to_drm(adev)->primary);
-#endif
- return 0;
-}
-
int amdgpu_debugfs_wait_dump(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
@@ -1228,22 +1188,20 @@ int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
adev, debugfs_regs[i]);
if (!i && !IS_ERR_OR_NULL(ent))
i_size_write(ent->d_inode, adev->rmmio_size);
- adev->debugfs_regs[i] = ent;
}
return 0;
}
-static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
+static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
+ struct drm_device *dev = adev_to_drm(adev);
int r = 0, i;
r = pm_runtime_get_sync(dev->dev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(dev->dev);
return r;
}
@@ -1285,30 +1243,19 @@ static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
return 0;
}
-static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data)
+static int amdgpu_debugfs_evict_vram(void *data, u64 *val)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
-
- seq_write(m, adev->bios, adev->bios_size);
- return 0;
-}
-
-static int amdgpu_debugfs_evict_vram(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+ struct drm_device *dev = adev_to_drm(adev);
int r;
r = pm_runtime_get_sync(dev->dev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(dev->dev);
return r;
}
- seq_printf(m, "(%d)\n", amdgpu_bo_evict_vram(adev));
+ *val = amdgpu_bo_evict_vram(adev);
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
@@ -1316,11 +1263,11 @@ static int amdgpu_debugfs_evict_vram(struct seq_file *m, void *data)
return 0;
}
-static int amdgpu_debugfs_evict_gtt(struct seq_file *m, void *data)
+
+static int amdgpu_debugfs_evict_gtt(void *data, u64 *val)
{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+ struct drm_device *dev = adev_to_drm(adev);
struct ttm_resource_manager *man;
int r;
@@ -1331,8 +1278,7 @@ static int amdgpu_debugfs_evict_gtt(struct seq_file *m, void *data)
}
man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
- r = ttm_resource_manager_evict_all(&adev->mman.bdev, man);
- seq_printf(m, "(%d)\n", r);
+ *val = ttm_resource_manager_evict_all(&adev->mman.bdev, man);
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
@@ -1340,10 +1286,11 @@ static int amdgpu_debugfs_evict_gtt(struct seq_file *m, void *data)
return 0;
}
-static int amdgpu_debugfs_vm_info(struct seq_file *m, void *data)
+
+static int amdgpu_debugfs_vm_info_show(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *dev = node->minor->dev;
+ struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
+ struct drm_device *dev = adev_to_drm(adev);
struct drm_file *file;
int r;
@@ -1369,13 +1316,12 @@ static int amdgpu_debugfs_vm_info(struct seq_file *m, void *data)
return r;
}
-static const struct drm_info_list amdgpu_debugfs_list[] = {
- {"amdgpu_vbios", amdgpu_debugfs_get_vbios_dump},
- {"amdgpu_test_ib", &amdgpu_debugfs_test_ib},
- {"amdgpu_evict_vram", &amdgpu_debugfs_evict_vram},
- {"amdgpu_evict_gtt", &amdgpu_debugfs_evict_gtt},
- {"amdgpu_vm_info", &amdgpu_debugfs_vm_info},
-};
+DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_test_ib);
+DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_vm_info);
+DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_evict_vram_fops, amdgpu_debugfs_evict_vram,
+ NULL, "%lld\n");
+DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_evict_gtt_fops, amdgpu_debugfs_evict_gtt,
+ NULL, "%lld\n");
static void amdgpu_ib_preempt_fences_swap(struct amdgpu_ring *ring,
struct dma_fence **fences)
@@ -1586,71 +1532,50 @@ static int amdgpu_debugfs_sclk_set(void *data, u64 val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(fops_ib_preempt, NULL,
+DEFINE_DEBUGFS_ATTRIBUTE(fops_ib_preempt, NULL,
amdgpu_debugfs_ib_preempt, "%llu\n");
-DEFINE_SIMPLE_ATTRIBUTE(fops_sclk_set, NULL,
+DEFINE_DEBUGFS_ATTRIBUTE(fops_sclk_set, NULL,
amdgpu_debugfs_sclk_set, "%llu\n");
int amdgpu_debugfs_init(struct amdgpu_device *adev)
{
+ struct dentry *root = adev_to_drm(adev)->primary->debugfs_root;
+ struct dentry *ent;
int r, i;
- adev->debugfs_preempt =
- debugfs_create_file("amdgpu_preempt_ib", 0600,
- adev_to_drm(adev)->primary->debugfs_root, adev,
- &fops_ib_preempt);
- if (!(adev->debugfs_preempt)) {
+
+
+ ent = debugfs_create_file("amdgpu_preempt_ib", 0600, root, adev,
+ &fops_ib_preempt);
+ if (!ent) {
DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n");
return -EIO;
}
- adev->smu.debugfs_sclk =
- debugfs_create_file("amdgpu_force_sclk", 0200,
- adev_to_drm(adev)->primary->debugfs_root, adev,
- &fops_sclk_set);
- if (!(adev->smu.debugfs_sclk)) {
+ ent = debugfs_create_file("amdgpu_force_sclk", 0200, root, adev,
+ &fops_sclk_set);
+ if (!ent) {
DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n");
return -EIO;
}
/* Register debugfs entries for amdgpu_ttm */
- r = amdgpu_ttm_debugfs_init(adev);
- if (r) {
- DRM_ERROR("Failed to init debugfs\n");
- return r;
- }
-
- r = amdgpu_debugfs_pm_init(adev);
- if (r) {
- DRM_ERROR("Failed to register debugfs file for dpm!\n");
- return r;
- }
-
- if (amdgpu_debugfs_sa_init(adev)) {
- dev_err(adev->dev, "failed to register debugfs file for SA\n");
- }
-
- if (amdgpu_debugfs_fence_init(adev))
- dev_err(adev->dev, "fence debugfs file creation failed\n");
-
- r = amdgpu_debugfs_gem_init(adev);
- if (r)
- DRM_ERROR("registering gem debugfs failed (%d).\n", r);
+ amdgpu_ttm_debugfs_init(adev);
+ amdgpu_debugfs_pm_init(adev);
+ amdgpu_debugfs_sa_init(adev);
+ amdgpu_debugfs_fence_init(adev);
+ amdgpu_debugfs_gem_init(adev);
r = amdgpu_debugfs_regs_init(adev);
if (r)
DRM_ERROR("registering register debugfs failed (%d).\n", r);
- r = amdgpu_debugfs_firmware_init(adev);
- if (r)
- DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
+ amdgpu_debugfs_firmware_init(adev);
#if defined(CONFIG_DRM_AMD_DC)
- if (amdgpu_device_has_dc_support(adev)) {
- if (dtn_debugfs_init(adev))
- DRM_ERROR("amdgpu: failed initialize dtn debugfs support.\n");
- }
+ if (amdgpu_device_has_dc_support(adev))
+ dtn_debugfs_init(adev);
#endif
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
@@ -1665,17 +1590,26 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
}
amdgpu_ras_debugfs_create_all(adev);
-
amdgpu_debugfs_autodump_init(adev);
-
amdgpu_rap_debugfs_init(adev);
-
amdgpu_securedisplay_debugfs_init(adev);
-
amdgpu_fw_attestation_debugfs_init(adev);
- return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list,
- ARRAY_SIZE(amdgpu_debugfs_list));
+ debugfs_create_file("amdgpu_evict_vram", 0444, root, adev,
+ &amdgpu_evict_vram_fops);
+ debugfs_create_file("amdgpu_evict_gtt", 0444, root, adev,
+ &amdgpu_evict_gtt_fops);
+ debugfs_create_file("amdgpu_test_ib", 0444, root, adev,
+ &amdgpu_debugfs_test_ib_fops);
+ debugfs_create_file("amdgpu_vm_info", 0444, root, adev,
+ &amdgpu_debugfs_vm_info_fops);
+
+ adev->debugfs_vbios_blob.data = adev->bios;
+ adev->debugfs_vbios_blob.size = adev->bios_size;
+ debugfs_create_blob("amdgpu_vbios", 0444, root,
+ &adev->debugfs_vbios_blob);
+
+ return 0;
}
#else
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
index 2803884d338d..141a8474e24f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
@@ -26,11 +26,6 @@
/*
* Debugfs
*/
-struct amdgpu_debugfs {
- const struct drm_info_list *files;
- unsigned num_files;
-};
-
struct amdgpu_autodump {
struct completion dumping;
struct wait_queue_head gpu_hang;
@@ -39,10 +34,7 @@ struct amdgpu_autodump {
int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
int amdgpu_debugfs_init(struct amdgpu_device *adev);
void amdgpu_debugfs_fini(struct amdgpu_device *adev);
-int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
- const struct drm_info_list *files,
- unsigned nfiles);
-int amdgpu_debugfs_fence_init(struct amdgpu_device *adev);
-int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev);
-int amdgpu_debugfs_gem_init(struct amdgpu_device *adev);
+void amdgpu_debugfs_fence_init(struct amdgpu_device *adev);
+void amdgpu_debugfs_firmware_init(struct amdgpu_device *adev);
+void amdgpu_debugfs_gem_init(struct amdgpu_device *adev);
int amdgpu_debugfs_wait_dump(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 8a5a8ff5d362..b4ad1c055c70 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -65,6 +65,7 @@
#include "amdgpu_ras.h"
#include "amdgpu_pmu.h"
#include "amdgpu_fru_eeprom.h"
+#include "amdgpu_reset.h"
#include <linux/suspend.h>
#include <drm/task_barrier.h>
@@ -110,6 +111,7 @@ const char *amdgpu_asic_name[] = {
"RAVEN",
"ARCTURUS",
"RENOIR",
+ "ALDEBARAN",
"NAVI10",
"NAVI14",
"NAVI12",
@@ -136,7 +138,7 @@ static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
struct amdgpu_device *adev = drm_to_adev(ddev);
uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
- return snprintf(buf, PAGE_SIZE, "%llu\n", cnt);
+ return sysfs_emit(buf, "%llu\n", cnt);
}
static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
@@ -160,7 +162,7 @@ static ssize_t amdgpu_device_get_product_name(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_name);
+ return sysfs_emit(buf, "%s\n", adev->product_name);
}
static DEVICE_ATTR(product_name, S_IRUGO,
@@ -182,7 +184,7 @@ static ssize_t amdgpu_device_get_product_number(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_number);
+ return sysfs_emit(buf, "%s\n", adev->product_number);
}
static DEVICE_ATTR(product_number, S_IRUGO,
@@ -204,25 +206,25 @@ static ssize_t amdgpu_device_get_serial_number(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- return snprintf(buf, PAGE_SIZE, "%s\n", adev->serial);
+ return sysfs_emit(buf, "%s\n", adev->serial);
}
static DEVICE_ATTR(serial_number, S_IRUGO,
amdgpu_device_get_serial_number, NULL);
/**
- * amdgpu_device_supports_atpx - Is the device a dGPU with HG/PX power control
+ * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
*
* @dev: drm_device pointer
*
- * Returns true if the device is a dGPU with HG/PX power control,
+ * Returns true if the device is a dGPU with ATPX power control,
* otherwise return false.
*/
-bool amdgpu_device_supports_atpx(struct drm_device *dev)
+bool amdgpu_device_supports_px(struct drm_device *dev)
{
struct amdgpu_device *adev = drm_to_adev(dev);
- if (adev->flags & AMD_IS_PX)
+ if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
return true;
return false;
}
@@ -232,14 +234,15 @@ bool amdgpu_device_supports_atpx(struct drm_device *dev)
*
* @dev: drm_device pointer
*
- * Returns true if the device is a dGPU with HG/PX power control,
+ * Returns true if the device is a dGPU with ACPI power control,
* otherwise return false.
*/
bool amdgpu_device_supports_boco(struct drm_device *dev)
{
struct amdgpu_device *adev = drm_to_adev(dev);
- if (adev->has_pr3)
+ if (adev->has_pr3 ||
+ ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
return true;
return false;
}
@@ -325,6 +328,35 @@ void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
/*
* register access helper functions.
*/
+
+/* Check if hw access should be skipped because of hotplug or device error */
+bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
+{
+ if (adev->in_pci_err_recovery)
+ return true;
+
+#ifdef CONFIG_LOCKDEP
+ /*
+ * This is a bit complicated to understand, so worth a comment. What we assert
+ * here is that the GPU reset is not running on another thread in parallel.
+ *
+ * For this we trylock the read side of the reset semaphore, if that succeeds
+ * we know that the reset is not running in paralell.
+ *
+ * If the trylock fails we assert that we are either already holding the read
+ * side of the lock or are the reset thread itself and hold the write side of
+ * the lock.
+ */
+ if (in_task()) {
+ if (down_read_trylock(&adev->reset_sem))
+ up_read(&adev->reset_sem);
+ else
+ lockdep_assert_held(&adev->reset_sem);
+ }
+#endif
+ return false;
+}
+
/**
* amdgpu_device_rreg - read a memory mapped IO or indirect register
*
@@ -339,7 +371,7 @@ uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
{
uint32_t ret;
- if (adev->in_pci_err_recovery)
+ if (amdgpu_device_skip_hw_access(adev))
return 0;
if ((reg * 4) < adev->rmmio_size) {
@@ -376,7 +408,7 @@ uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
*/
uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
{
- if (adev->in_pci_err_recovery)
+ if (amdgpu_device_skip_hw_access(adev))
return 0;
if (offset < adev->rmmio_size)
@@ -401,7 +433,7 @@ uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
*/
void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
{
- if (adev->in_pci_err_recovery)
+ if (amdgpu_device_skip_hw_access(adev))
return;
if (offset < adev->rmmio_size)
@@ -424,7 +456,7 @@ void amdgpu_device_wreg(struct amdgpu_device *adev,
uint32_t reg, uint32_t v,
uint32_t acc_flags)
{
- if (adev->in_pci_err_recovery)
+ if (amdgpu_device_skip_hw_access(adev))
return;
if ((reg * 4) < adev->rmmio_size) {
@@ -451,63 +483,20 @@ void amdgpu_device_wreg(struct amdgpu_device *adev,
void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
uint32_t reg, uint32_t v)
{
- if (adev->in_pci_err_recovery)
+ if (amdgpu_device_skip_hw_access(adev))
return;
if (amdgpu_sriov_fullaccess(adev) &&
adev->gfx.rlc.funcs &&
adev->gfx.rlc.funcs->is_rlcg_access_range) {
if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
- return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v);
+ return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v, 0);
} else {
writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
}
}
/**
- * amdgpu_io_rreg - read an IO register
- *
- * @adev: amdgpu_device pointer
- * @reg: dword aligned register offset
- *
- * Returns the 32 bit value from the offset specified.
- */
-u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
-{
- if (adev->in_pci_err_recovery)
- return 0;
-
- if ((reg * 4) < adev->rio_mem_size)
- return ioread32(adev->rio_mem + (reg * 4));
- else {
- iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
- return ioread32(adev->rio_mem + (mmMM_DATA * 4));
- }
-}
-
-/**
- * amdgpu_io_wreg - write to an IO register
- *
- * @adev: amdgpu_device pointer
- * @reg: dword aligned register offset
- * @v: 32 bit value to write to the register
- *
- * Writes the value specified to the offset specified.
- */
-void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
-{
- if (adev->in_pci_err_recovery)
- return;
-
- if ((reg * 4) < adev->rio_mem_size)
- iowrite32(v, adev->rio_mem + (reg * 4));
- else {
- iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
- iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
- }
-}
-
-/**
* amdgpu_mm_rdoorbell - read a doorbell dword
*
* @adev: amdgpu_device pointer
@@ -518,7 +507,7 @@ void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
*/
u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
{
- if (adev->in_pci_err_recovery)
+ if (amdgpu_device_skip_hw_access(adev))
return 0;
if (index < adev->doorbell.num_doorbells) {
@@ -541,7 +530,7 @@ u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
*/
void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
{
- if (adev->in_pci_err_recovery)
+ if (amdgpu_device_skip_hw_access(adev))
return;
if (index < adev->doorbell.num_doorbells) {
@@ -562,7 +551,7 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
*/
u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
{
- if (adev->in_pci_err_recovery)
+ if (amdgpu_device_skip_hw_access(adev))
return 0;
if (index < adev->doorbell.num_doorbells) {
@@ -585,7 +574,7 @@ u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
*/
void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
{
- if (adev->in_pci_err_recovery)
+ if (amdgpu_device_skip_hw_access(adev))
return;
if (index < adev->doorbell.num_doorbells) {
@@ -1223,6 +1212,10 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev)
}
}
+ /* Don't post if we need to reset whole hive on init */
+ if (adev->gmc.xgmi.pending_reset)
+ return false;
+
if (adev->has_hw_reset) {
adev->has_hw_reset = false;
return true;
@@ -1429,7 +1422,7 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
struct drm_device *dev = pci_get_drvdata(pdev);
int r;
- if (amdgpu_device_supports_atpx(dev) && state == VGA_SWITCHEROO_OFF)
+ if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
return;
if (state == VGA_SWITCHEROO_ON) {
@@ -1810,6 +1803,7 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
case CHIP_CARRIZO:
case CHIP_STONEY:
case CHIP_VEGA20:
+ case CHIP_ALDEBARAN:
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
case CHIP_DIMGREY_CAVEFISH:
@@ -2010,6 +2004,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
case CHIP_RAVEN:
case CHIP_ARCTURUS:
case CHIP_RENOIR:
+ case CHIP_ALDEBARAN:
if (adev->flags & AMD_IS_APU)
adev->family = AMDGPU_FAMILY_RV;
else
@@ -2045,6 +2040,8 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
adev->pm.pp_feature = amdgpu_pp_feature_mask;
if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+ if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
+ adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
for (i = 0; i < adev->num_ip_blocks; i++) {
if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
@@ -2083,6 +2080,11 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
return r;
}
+
+ /*get pf2vf msg info at it's earliest time*/
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_init_data_exchange(adev);
+
}
}
@@ -2149,6 +2151,9 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
continue;
+ if (!adev->ip_blocks[i].status.sw)
+ continue;
+
/* no need to do the fw loading again if already done*/
if (adev->ip_blocks[i].status.hw == true)
break;
@@ -2289,7 +2294,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
if (adev->gmc.xgmi.num_physical_nodes > 1)
amdgpu_xgmi_add_device(adev);
- amdgpu_amdkfd_device_init(adev);
+
+ /* Don't init kfd if whole hive need to be reset during init */
+ if (!adev->gmc.xgmi.pending_reset)
+ amdgpu_amdkfd_device_init(adev);
amdgpu_fru_get_product_info(adev);
@@ -2359,8 +2367,8 @@ static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
* Returns 0 on success, negative error code on failure.
*/
-static int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
- enum amd_clockgating_state state)
+int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
+ enum amd_clockgating_state state)
{
int i, j, r;
@@ -2395,7 +2403,8 @@ static int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
return 0;
}
-static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_powergating_state state)
+int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
+ enum amd_powergating_state state)
{
int i, j, r;
@@ -2506,6 +2515,11 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
if (r)
DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
+ /* For XGMI + passthrough configuration on arcturus, enable light SBR */
+ if (adev->asic_type == CHIP_ARCTURUS &&
+ amdgpu_passthrough(adev) &&
+ adev->gmc.xgmi.num_physical_nodes > 1)
+ smu_set_light_sbr(&adev->smu, true);
if (adev->gmc.xgmi.num_physical_nodes > 1) {
mutex_lock(&mgpu_info.mutex);
@@ -2743,6 +2757,16 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
continue;
}
+ /* skip unnecessary suspend if we do not initialize them yet */
+ if (adev->gmc.xgmi.pending_reset &&
+ !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
+ adev->ip_blocks[i].status.hw = false;
+ continue;
+ }
+
/* skip suspend of gfx and psp for S0ix
* gfx is in gfxoff state, so on resume it will exit gfxoff just
* like at runtime. PSP is also part of the always on hardware
@@ -2772,7 +2796,6 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
}
}
}
- adev->ip_blocks[i].status.hw = false;
}
return 0;
@@ -2793,8 +2816,10 @@ int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
{
int r;
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
+ amdgpu_virt_fini_data_exchange(adev);
amdgpu_virt_request_full_gpu(adev, false);
+ }
r = amdgpu_device_ip_suspend_phase1(adev);
if (r)
@@ -3117,8 +3142,9 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
if (adev->asic_reset_res)
goto fail;
- if (adev->mmhub.funcs && adev->mmhub.funcs->reset_ras_error_count)
- adev->mmhub.funcs->reset_ras_error_count(adev);
+ if (adev->mmhub.ras_funcs &&
+ adev->mmhub.ras_funcs->reset_ras_error_count)
+ adev->mmhub.ras_funcs->reset_ras_error_count(adev);
} else {
task_barrier_full(&hive->tb);
@@ -3228,7 +3254,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
struct drm_device *ddev = adev_to_drm(adev);
struct pci_dev *pdev = adev->pdev;
int r, i;
- bool atpx = false;
+ bool px = false;
u32 max_MBps;
adev->shutdown = false;
@@ -3276,7 +3302,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
/* mutex initialization are all done here so we
* can recall function without having locking issues */
- atomic_set(&adev->irq.ih.lock, 0);
mutex_init(&adev->firmware.mutex);
mutex_init(&adev->pm.mutex);
mutex_init(&adev->gfx.gpu_clock_mutex);
@@ -3309,6 +3334,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
INIT_LIST_HEAD(&adev->shadow_list);
mutex_init(&adev->shadow_list_lock);
+ INIT_LIST_HEAD(&adev->reset_list);
+
INIT_DELAYED_WORK(&adev->delayed_init_work,
amdgpu_device_delayed_init_work_handler);
INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
@@ -3347,17 +3374,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
- /* io port mapping */
- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
- if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
- adev->rio_mem_size = pci_resource_len(adev->pdev, i);
- adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
- break;
- }
- }
- if (adev->rio_mem == NULL)
- DRM_INFO("PCI I/O BAR is not found.\n");
-
/* enable PCIE atomic ops */
r = pci_enable_atomic_ops_to_root(adev->pdev,
PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
@@ -3400,16 +3416,12 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
- if (amdgpu_device_supports_atpx(ddev))
- atpx = true;
- if (amdgpu_has_atpx() &&
- (amdgpu_is_atpx_hybrid() ||
- amdgpu_has_atpx_dgpu_power_cntl()) &&
- !pci_is_thunderbolt_attached(adev->pdev))
+ if (amdgpu_device_supports_px(ddev)) {
+ px = true;
vga_switcheroo_register_client(adev->pdev,
- &amdgpu_switcheroo_ops, atpx);
- if (atpx)
+ &amdgpu_switcheroo_ops, px);
vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
+ }
if (amdgpu_emu_mode == 1) {
/* post the asic on emulation mode */
@@ -3417,6 +3429,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
goto fence_driver_init;
}
+ amdgpu_reset_init(adev);
+
/* detect if we are with an SRIOV vbios */
amdgpu_device_detect_sriov_bios(adev);
@@ -3424,10 +3438,28 @@ int amdgpu_device_init(struct amdgpu_device *adev,
* E.g., driver was not cleanly unloaded previously, etc.
*/
if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
- r = amdgpu_asic_reset(adev);
- if (r) {
- dev_err(adev->dev, "asic reset on init failed\n");
- goto failed;
+ if (adev->gmc.xgmi.num_physical_nodes) {
+ dev_info(adev->dev, "Pending hive reset.\n");
+ adev->gmc.xgmi.pending_reset = true;
+ /* Only need to init necessary block for SMU to handle the reset */
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_blocks[i].status.valid)
+ continue;
+ if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
+ DRM_DEBUG("IP %s disabled for hw_init.\n",
+ adev->ip_blocks[i].version->funcs->name);
+ adev->ip_blocks[i].status.hw = true;
+ }
+ }
+ } else {
+ r = amdgpu_asic_reset(adev);
+ if (r) {
+ dev_err(adev->dev, "asic reset on init failed\n");
+ goto failed;
+ }
}
}
@@ -3493,11 +3525,11 @@ fence_driver_init:
adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
adev->virt.ops = NULL;
r = -EAGAIN;
- goto failed;
+ goto release_ras_con;
}
dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
- goto failed;
+ goto release_ras_con;
}
dev_info(adev->dev,
@@ -3558,19 +3590,19 @@ fence_driver_init:
/* enable clockgating, etc. after ib tests, etc. since some blocks require
* explicit gating rather than handling it automatically.
*/
- r = amdgpu_device_ip_late_init(adev);
- if (r) {
- dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
- amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
- goto failed;
+ if (!adev->gmc.xgmi.pending_reset) {
+ r = amdgpu_device_ip_late_init(adev);
+ if (r) {
+ dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
+ amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
+ goto release_ras_con;
+ }
+ /* must succeed. */
+ amdgpu_ras_resume(adev);
+ queue_delayed_work(system_wq, &adev->delayed_init_work,
+ msecs_to_jiffies(AMDGPU_RESUME_MS));
}
- /* must succeed. */
- amdgpu_ras_resume(adev);
-
- queue_delayed_work(system_wq, &adev->delayed_init_work,
- msecs_to_jiffies(AMDGPU_RESUME_MS));
-
if (amdgpu_sriov_vf(adev))
flush_delayed_work(&adev->delayed_init_work);
@@ -3587,11 +3619,18 @@ fence_driver_init:
if (amdgpu_device_cache_pci_state(adev->pdev))
pci_restore_state(pdev);
+ if (adev->gmc.xgmi.pending_reset)
+ queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
+ msecs_to_jiffies(AMDGPU_RESUME_MS));
+
return 0;
+release_ras_con:
+ amdgpu_release_ras_context(adev);
+
failed:
amdgpu_vf_error_trans_all(adev);
- if (atpx)
+ if (px)
vga_switcheroo_fini_domain_pm_ops(adev->dev);
failed_unmap:
@@ -3613,6 +3652,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
{
dev_info(adev->dev, "amdgpu: finishing device.\n");
flush_delayed_work(&adev->delayed_init_work);
+ ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
adev->shutdown = true;
kfree(adev->pci_state);
@@ -3641,6 +3681,9 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
release_firmware(adev->firmware.gpu_info_fw);
adev->firmware.gpu_info_fw = NULL;
adev->accel_working = false;
+
+ amdgpu_reset_fini(adev);
+
/* free i2c buses */
if (!amdgpu_device_has_dc_support(adev))
amdgpu_i2c_fini(adev);
@@ -3650,18 +3693,12 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
kfree(adev->bios);
adev->bios = NULL;
- if (amdgpu_has_atpx() &&
- (amdgpu_is_atpx_hybrid() ||
- amdgpu_has_atpx_dgpu_power_cntl()) &&
- !pci_is_thunderbolt_attached(adev->pdev))
+ if (amdgpu_device_supports_px(adev_to_drm(adev))) {
vga_switcheroo_unregister_client(adev->pdev);
- if (amdgpu_device_supports_atpx(adev_to_drm(adev)))
vga_switcheroo_fini_domain_pm_ops(adev->dev);
+ }
if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
vga_client_register(adev->pdev, NULL, NULL, NULL);
- if (adev->rio_mem)
- pci_iounmap(adev->pdev, adev->rio_mem);
- adev->rio_mem = NULL;
iounmap(adev->rmmio);
adev->rmmio = NULL;
amdgpu_device_doorbell_fini(adev);
@@ -4079,11 +4116,11 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
amdgpu_amdkfd_post_reset(adev);
error:
- amdgpu_virt_release_full_gpu(adev, true);
if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
amdgpu_inc_vram_lost(adev);
r = amdgpu_device_recover_vram(adev);
}
+ amdgpu_virt_release_full_gpu(adev, true);
return r;
}
@@ -4160,6 +4197,8 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
case CHIP_DIMGREY_CAVEFISH:
+ case CHIP_VANGOGH:
+ case CHIP_ALDEBARAN:
break;
default:
goto disabled;
@@ -4173,15 +4212,60 @@ disabled:
return false;
}
+int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
+{
+ u32 i;
+ int ret = 0;
+
+ amdgpu_atombios_scratch_regs_engine_hung(adev, true);
+
+ dev_info(adev->dev, "GPU mode1 reset\n");
+
+ /* disable BM */
+ pci_clear_master(adev->pdev);
+
+ amdgpu_device_cache_pci_state(adev->pdev);
+
+ if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
+ dev_info(adev->dev, "GPU smu mode1 reset\n");
+ ret = amdgpu_dpm_mode1_reset(adev);
+ } else {
+ dev_info(adev->dev, "GPU psp mode1 reset\n");
+ ret = psp_gpu_reset(adev);
+ }
+
+ if (ret)
+ dev_err(adev->dev, "GPU mode1 reset failed\n");
+
+ amdgpu_device_load_pci_state(adev->pdev);
+
+ /* wait for asic to come out of reset */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ u32 memsize = adev->nbio.funcs->get_memsize(adev);
+
+ if (memsize != 0xffffffff)
+ break;
+ udelay(1);
+ }
+
+ amdgpu_atombios_scratch_regs_engine_hung(adev, false);
+ return ret;
+}
-static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
- struct amdgpu_job *job,
- bool *need_full_reset_arg)
+int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
+ struct amdgpu_reset_context *reset_context)
{
int i, r = 0;
- bool need_full_reset = *need_full_reset_arg;
+ struct amdgpu_job *job = NULL;
+ bool need_full_reset =
+ test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
- amdgpu_debugfs_wait_dump(adev);
+ if (reset_context->reset_req_dev == adev)
+ job = reset_context->job;
+
+ /* no need to dump if device is not in good state during probe period */
+ if (!adev->gmc.xgmi.pending_reset)
+ amdgpu_debugfs_wait_dump(adev);
if (amdgpu_sriov_vf(adev)) {
/* stop the data exchange thread */
@@ -4202,6 +4286,13 @@ static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
if(job)
drm_sched_increase_karma(&job->base);
+ r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
+ /* If reset handler not implemented, continue; otherwise return */
+ if (r == -ENOSYS)
+ r = 0;
+ else
+ return r;
+
/* Don't suspend on bare metal if we are not going to HW reset the ASIC */
if (!amdgpu_sriov_vf(adev)) {
@@ -4220,30 +4311,47 @@ static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
if (need_full_reset)
r = amdgpu_device_ip_suspend(adev);
-
- *need_full_reset_arg = need_full_reset;
+ if (need_full_reset)
+ set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
+ else
+ clear_bit(AMDGPU_NEED_FULL_RESET,
+ &reset_context->flags);
}
return r;
}
-static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
- struct list_head *device_list_handle,
- bool *need_full_reset_arg,
- bool skip_hw_reset)
+int amdgpu_do_asic_reset(struct list_head *device_list_handle,
+ struct amdgpu_reset_context *reset_context)
{
struct amdgpu_device *tmp_adev = NULL;
- bool need_full_reset = *need_full_reset_arg, vram_lost = false;
+ bool need_full_reset, skip_hw_reset, vram_lost = false;
int r = 0;
+ /* Try reset handler method first */
+ tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
+ reset_list);
+ r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
+ /* If reset handler not implemented, continue; otherwise return */
+ if (r == -ENOSYS)
+ r = 0;
+ else
+ return r;
+
+ /* Reset handler not implemented, use the default method */
+ need_full_reset =
+ test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
+ skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
+
/*
- * ASIC reset has to be done on all HGMI hive nodes ASAP
+ * ASIC reset has to be done on all XGMI hive nodes ASAP
* to allow proper links negotiation in FW (within 1 sec)
*/
if (!skip_hw_reset && need_full_reset) {
- list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+ list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
/* For XGMI run all resets in parallel to speed up the process */
if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
+ tmp_adev->gmc.xgmi.pending_reset = false;
if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
r = -EALREADY;
} else
@@ -4258,8 +4366,7 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
/* For XGMI wait for all resets to complete before proceed */
if (!r) {
- list_for_each_entry(tmp_adev, device_list_handle,
- gmc.xgmi.head) {
+ list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
flush_work(&tmp_adev->xgmi_reset_work);
r = tmp_adev->asic_reset_res;
@@ -4271,22 +4378,22 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
}
if (!r && amdgpu_ras_intr_triggered()) {
- list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
- if (tmp_adev->mmhub.funcs &&
- tmp_adev->mmhub.funcs->reset_ras_error_count)
- tmp_adev->mmhub.funcs->reset_ras_error_count(tmp_adev);
+ list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+ if (tmp_adev->mmhub.ras_funcs &&
+ tmp_adev->mmhub.ras_funcs->reset_ras_error_count)
+ tmp_adev->mmhub.ras_funcs->reset_ras_error_count(tmp_adev);
}
amdgpu_ras_intr_cleared();
}
- list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+ list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
if (need_full_reset) {
/* post card */
- if (amdgpu_device_asic_init(tmp_adev))
+ r = amdgpu_device_asic_init(tmp_adev);
+ if (r) {
dev_warn(tmp_adev->dev, "asic atom init failed!");
-
- if (!r) {
+ } else {
dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
r = amdgpu_device_ip_resume_phase1(tmp_adev);
if (r)
@@ -4319,6 +4426,10 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
*/
amdgpu_register_gpu_instance(tmp_adev);
+ if (!reset_context->hive &&
+ tmp_adev->gmc.xgmi.num_physical_nodes > 1)
+ amdgpu_xgmi_add_device(tmp_adev);
+
r = amdgpu_device_ip_late_init(tmp_adev);
if (r)
goto out;
@@ -4335,7 +4446,7 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
* bad_page_threshold value to fix this once
* probing driver again.
*/
- if (!amdgpu_ras_check_err_threshold(tmp_adev)) {
+ if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
/* must succeed. */
amdgpu_ras_resume(tmp_adev);
} else {
@@ -4344,8 +4455,10 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
}
/* Update PSP FW topology after reset */
- if (hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1)
- r = amdgpu_xgmi_update_topology(hive, tmp_adev);
+ if (reset_context->hive &&
+ tmp_adev->gmc.xgmi.num_physical_nodes > 1)
+ r = amdgpu_xgmi_update_topology(
+ reset_context->hive, tmp_adev);
}
}
@@ -4369,7 +4482,10 @@ out:
}
end:
- *need_full_reset_arg = need_full_reset;
+ if (need_full_reset)
+ set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
+ else
+ clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
return r;
}
@@ -4385,7 +4501,6 @@ static bool amdgpu_device_lock_adev(struct amdgpu_device *adev,
down_write(&adev->reset_sem);
}
- atomic_inc(&adev->gpu_reset_counter);
switch (amdgpu_asic_reset_method(adev)) {
case AMD_RESET_METHOD_MODE1:
adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
@@ -4507,6 +4622,74 @@ static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
return 0;
}
+void amdgpu_device_recheck_guilty_jobs(
+ struct amdgpu_device *adev, struct list_head *device_list_handle,
+ struct amdgpu_reset_context *reset_context)
+{
+ int i, r = 0;
+
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+ struct amdgpu_ring *ring = adev->rings[i];
+ int ret = 0;
+ struct drm_sched_job *s_job;
+
+ if (!ring || !ring->sched.thread)
+ continue;
+
+ s_job = list_first_entry_or_null(&ring->sched.pending_list,
+ struct drm_sched_job, list);
+ if (s_job == NULL)
+ continue;
+
+ /* clear job's guilty and depend the folowing step to decide the real one */
+ drm_sched_reset_karma(s_job);
+ drm_sched_resubmit_jobs_ext(&ring->sched, 1);
+
+ ret = dma_fence_wait_timeout(s_job->s_fence->parent, false, ring->sched.timeout);
+ if (ret == 0) { /* timeout */
+ DRM_ERROR("Found the real bad job! ring:%s, job_id:%llx\n",
+ ring->sched.name, s_job->id);
+
+ /* set guilty */
+ drm_sched_increase_karma(s_job);
+retry:
+ /* do hw reset */
+ if (amdgpu_sriov_vf(adev)) {
+ amdgpu_virt_fini_data_exchange(adev);
+ r = amdgpu_device_reset_sriov(adev, false);
+ if (r)
+ adev->asic_reset_res = r;
+ } else {
+ clear_bit(AMDGPU_SKIP_HW_RESET,
+ &reset_context->flags);
+ r = amdgpu_do_asic_reset(device_list_handle,
+ reset_context);
+ if (r && r == -EAGAIN)
+ goto retry;
+ }
+
+ /*
+ * add reset counter so that the following
+ * resubmitted job could flush vmid
+ */
+ atomic_inc(&adev->gpu_reset_counter);
+ continue;
+ }
+
+ /* got the hw fence, signal finished fence */
+ atomic_dec(ring->sched.score);
+ dma_fence_get(&s_job->s_fence->finished);
+ dma_fence_signal(&s_job->s_fence->finished);
+ dma_fence_put(&s_job->s_fence->finished);
+
+ /* remove node from list and free the job */
+ spin_lock(&ring->sched.job_list_lock);
+ list_del_init(&s_job->list);
+ spin_unlock(&ring->sched.job_list_lock);
+ ring->sched.ops->free_job(s_job);
+ }
+}
+
/**
* amdgpu_device_gpu_recover - reset the asic and recover scheduler
*
@@ -4522,13 +4705,16 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
struct amdgpu_job *job)
{
struct list_head device_list, *device_list_handle = NULL;
- bool need_full_reset = false;
bool job_signaled = false;
struct amdgpu_hive_info *hive = NULL;
struct amdgpu_device *tmp_adev = NULL;
int i, r = 0;
bool need_emergency_restart = false;
bool audio_suspended = false;
+ int tmp_vram_lost_counter;
+ struct amdgpu_reset_context reset_context;
+
+ memset(&reset_context, 0, sizeof(reset_context));
/*
* Special case: RAS triggered and full reset isn't supported
@@ -4569,6 +4755,12 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
mutex_lock(&hive->hive_lock);
}
+ reset_context.method = AMD_RESET_METHOD_NONE;
+ reset_context.reset_req_dev = adev;
+ reset_context.job = job;
+ reset_context.hive = hive;
+ clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+
/*
* lock the device before we try to operate the linked list
* if didn't get the device lock, don't touch the linked list since
@@ -4592,16 +4784,18 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
*/
INIT_LIST_HEAD(&device_list);
if (adev->gmc.xgmi.num_physical_nodes > 1) {
- if (!list_is_first(&adev->gmc.xgmi.head, &hive->device_list))
- list_rotate_to_front(&adev->gmc.xgmi.head, &hive->device_list);
- device_list_handle = &hive->device_list;
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
+ list_add_tail(&tmp_adev->reset_list, &device_list);
+ if (!list_is_first(&adev->reset_list, &device_list))
+ list_rotate_to_front(&adev->reset_list, &device_list);
+ device_list_handle = &device_list;
} else {
- list_add_tail(&adev->gmc.xgmi.head, &device_list);
+ list_add_tail(&adev->reset_list, &device_list);
device_list_handle = &device_list;
}
/* block all schedulers and reset given job's ring */
- list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+ list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
/*
* Try to put the audio codec into suspend state
* before gpu reset started.
@@ -4646,6 +4840,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
if (need_emergency_restart)
amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
}
+ atomic_inc(&tmp_adev->gpu_reset_counter);
}
if (need_emergency_restart)
@@ -4665,10 +4860,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
}
retry: /* Rest of adevs pre asic reset from XGMI hive. */
- list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
- r = amdgpu_device_pre_asic_reset(tmp_adev,
- (tmp_adev == adev) ? job : NULL,
- &need_full_reset);
+ list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+ r = amdgpu_device_pre_asic_reset(tmp_adev, &reset_context);
/*TODO Should we stop ?*/
if (r) {
dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
@@ -4677,6 +4870,7 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
}
}
+ tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter));
/* Actual ASIC resets if needed.*/
/* TODO Implement XGMI hive reset logic for SRIOV */
if (amdgpu_sriov_vf(adev)) {
@@ -4684,7 +4878,7 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
if (r)
adev->asic_reset_res = r;
} else {
- r = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset, false);
+ r = amdgpu_do_asic_reset(device_list_handle, &reset_context);
if (r && r == -EAGAIN)
goto retry;
}
@@ -4692,7 +4886,19 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
skip_hw_reset:
/* Post ASIC reset for all devs .*/
- list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+ list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+
+ /*
+ * Sometimes a later bad compute job can block a good gfx job as gfx
+ * and compute ring share internal GC HW mutually. We add an additional
+ * guilty jobs recheck step to find the real guilty job, it synchronously
+ * submits and pends for the first job being signaled. If it gets timeout,
+ * we identify it as a real guilty job.
+ */
+ if (amdgpu_gpu_recovery == 2 &&
+ !(tmp_vram_lost_counter < atomic_read(&adev->vram_lost_counter)))
+ amdgpu_device_recheck_guilty_jobs(
+ tmp_adev, device_list_handle, &reset_context);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = tmp_adev->rings[i];
@@ -4723,10 +4929,17 @@ skip_hw_reset:
}
skip_sched_resume:
- list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
- /*unlock kfd: SRIOV would do it separately */
+ list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+ /* unlock kfd: SRIOV would do it separately */
if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
amdgpu_amdkfd_post_reset(tmp_adev);
+
+ /* kfd_post_reset will do nothing if kfd device is not initialized,
+ * need to bring up kfd here if it's not be initialized before
+ */
+ if (!adev->kfd.init_complete)
+ amdgpu_amdkfd_device_init(adev);
+
if (audio_suspended)
amdgpu_device_resume_display_audio(tmp_adev);
amdgpu_device_unlock_adev(tmp_adev);
@@ -4988,6 +5201,7 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
drm_sched_stop(&ring->sched, NULL);
}
+ atomic_inc(&adev->gpu_reset_counter);
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
/* Permanent error, prepare for device removal */
@@ -5029,14 +5243,16 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
struct drm_device *dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = drm_to_adev(dev);
int r, i;
- bool need_full_reset = true;
+ struct amdgpu_reset_context reset_context;
u32 memsize;
struct list_head device_list;
DRM_INFO("PCI error: slot reset callback!!\n");
+ memset(&reset_context, 0, sizeof(reset_context));
+
INIT_LIST_HEAD(&device_list);
- list_add_tail(&adev->gmc.xgmi.head, &device_list);
+ list_add_tail(&adev->reset_list, &device_list);
/* wait for asic to come out of reset */
msleep(500);
@@ -5057,13 +5273,18 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
goto out;
}
+ reset_context.method = AMD_RESET_METHOD_NONE;
+ reset_context.reset_req_dev = adev;
+ set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+ set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
+
adev->in_pci_err_recovery = true;
- r = amdgpu_device_pre_asic_reset(adev, NULL, &need_full_reset);
+ r = amdgpu_device_pre_asic_reset(adev, &reset_context);
adev->in_pci_err_recovery = false;
if (r)
goto out;
- r = amdgpu_do_asic_reset(NULL, &device_list, &need_full_reset, true);
+ r = amdgpu_do_asic_reset(&device_list, &reset_context);
out:
if (!r) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index f753e04fee99..9a2f811450ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -870,17 +870,62 @@ static int amdgpu_display_get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb
return r;
}
+int amdgpu_display_gem_fb_init(struct drm_device *dev,
+ struct amdgpu_framebuffer *rfb,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj)
+{
+ int ret;
+
+ rfb->base.obj[0] = obj;
+ drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
+ ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
+ if (ret)
+ goto err;
+
+ ret = amdgpu_display_framebuffer_init(dev, rfb, mode_cmd, obj);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ drm_err(dev, "Failed to init gem fb: %d\n", ret);
+ rfb->base.obj[0] = NULL;
+ return ret;
+}
+
+int amdgpu_display_gem_fb_verify_and_init(
+ struct drm_device *dev, struct amdgpu_framebuffer *rfb,
+ struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj)
+{
+ int ret;
+
+ rfb->base.obj[0] = obj;
+
+ /* Verify that bo size can fit the fb size. */
+ ret = drm_gem_fb_init_with_funcs(dev, &rfb->base, file_priv, mode_cmd,
+ &amdgpu_fb_funcs);
+ if (ret)
+ goto err;
+
+ ret = amdgpu_display_framebuffer_init(dev, rfb, mode_cmd, obj);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ drm_err(dev, "Failed to verify and init gem fb: %d\n", ret);
+ rfb->base.obj[0] = NULL;
+ return ret;
+}
+
int amdgpu_display_framebuffer_init(struct drm_device *dev,
struct amdgpu_framebuffer *rfb,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
int ret, i;
- rfb->base.obj[0] = obj;
- drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
- ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
- if (ret)
- goto fail;
/*
* This needs to happen before modifier conversion as that might change
@@ -891,13 +936,13 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev,
drm_dbg_kms(dev, "Plane 0 and %d have different BOs: %u vs. %u\n",
i, mode_cmd->handles[0], mode_cmd->handles[i]);
ret = -EINVAL;
- goto fail;
+ return ret;
}
}
ret = amdgpu_display_get_fb_info(rfb, &rfb->tiling_flags, &rfb->tmz_surface);
if (ret)
- goto fail;
+ return ret;
if (dev->mode_config.allow_fb_modifiers &&
!(rfb->base.flags & DRM_MODE_FB_MODIFIERS)) {
@@ -905,20 +950,17 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev,
if (ret) {
drm_dbg_kms(dev, "Failed to convert tiling flags 0x%llX to a modifier",
rfb->tiling_flags);
- goto fail;
+ return ret;
}
}
for (i = 1; i < rfb->base.format->num_planes; ++i) {
+ drm_gem_object_get(rfb->base.obj[0]);
+ drm_gem_object_put(rfb->base.obj[i]);
rfb->base.obj[i] = rfb->base.obj[0];
- drm_gem_object_get(rfb->base.obj[i]);
}
return 0;
-
-fail:
- rfb->base.obj[0] = NULL;
- return ret;
}
struct drm_framebuffer *
@@ -953,13 +995,15 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
return ERR_PTR(-ENOMEM);
}
- ret = amdgpu_display_framebuffer_init(dev, amdgpu_fb, mode_cmd, obj);
+ ret = amdgpu_display_gem_fb_verify_and_init(dev, amdgpu_fb, file_priv,
+ mode_cmd, obj);
if (ret) {
kfree(amdgpu_fb);
drm_gem_object_put(obj);
return ERR_PTR(ret);
}
+ drm_gem_object_put(obj);
return &amdgpu_fb->base;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 47e0b48dc26f..e0c4f7c7f1b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -321,17 +321,12 @@ static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach,
struct sg_table *sgt,
enum dma_data_direction dir)
{
- struct dma_buf *dma_buf = attach->dmabuf;
- struct drm_gem_object *obj = dma_buf->priv;
- struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
-
if (sgt->sgl->page_link) {
dma_unmap_sgtable(attach->dev, sgt, dir, 0);
sg_free_table(sgt);
kfree(sgt);
} else {
- amdgpu_vram_mgr_free_sgt(adev, attach->dev, dir, sgt);
+ amdgpu_vram_mgr_free_sgt(attach->dev, dir, sgt);
}
}
@@ -434,22 +429,22 @@ amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf)
{
struct dma_resv *resv = dma_buf->resv;
struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_bo *bo;
- struct amdgpu_bo_param bp;
struct drm_gem_object *gobj;
+ struct amdgpu_bo *bo;
+ uint64_t flags = 0;
int ret;
- memset(&bp, 0, sizeof(bp));
- bp.size = dma_buf->size;
- bp.byte_align = PAGE_SIZE;
- bp.domain = AMDGPU_GEM_DOMAIN_CPU;
- bp.flags = 0;
- bp.type = ttm_bo_type_sg;
- bp.resv = resv;
dma_resv_lock(resv, NULL);
+
+ if (dma_buf->ops == &amdgpu_dmabuf_ops) {
+ struct amdgpu_bo *other = gem_to_amdgpu_bo(dma_buf->priv);
+
+ flags |= other->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+ }
+
ret = amdgpu_gem_object_create(adev, dma_buf->size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_CPU,
- 0, ttm_bo_type_sg, resv, &gobj);
+ AMDGPU_GEM_DOMAIN_CPU, flags,
+ ttm_bo_type_sg, resv, &gobj);
if (ret)
goto error;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index e92e7dea71da..d8f131ed10cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -36,6 +36,7 @@
#include <linux/vga_switcheroo.h>
#include <drm/drm_probe_helper.h>
#include <linux/mmu_notifier.h>
+#include <linux/suspend.h>
#include "amdgpu.h"
#include "amdgpu_irq.h"
@@ -45,6 +46,8 @@
#include "amdgpu_amdkfd.h"
#include "amdgpu_ras.h"
+#include "amdgpu_xgmi.h"
+#include "amdgpu_reset.h"
/*
* KMS wrapper.
@@ -90,9 +93,10 @@
* - 3.38.0 - Add AMDGPU_IB_FLAG_EMIT_MEM_SYNC
* - 3.39.0 - DMABUF implicit sync does a full pipeline sync
* - 3.40.0 - Add AMDGPU_IDS_FLAGS_TMZ
+ * - 3.41.0 - Add video codec query
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 40
+#define KMS_DRIVER_MINOR 41
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit;
@@ -145,6 +149,7 @@ int amdgpu_compute_multipipe = -1;
int amdgpu_gpu_recovery = -1; /* auto */
int amdgpu_emu_mode;
uint amdgpu_smu_memory_pool_size;
+int amdgpu_smu_pptable_id = -1;
/*
* FBC (bit 0) disabled by default
* MULTI_MON_PP_MCLK_SWITCH (bit 1) enabled by default
@@ -162,16 +167,26 @@ int amdgpu_discovery = -1;
int amdgpu_mes;
int amdgpu_noretry = -1;
int amdgpu_force_asic_type = -1;
-int amdgpu_tmz;
+int amdgpu_tmz = -1; /* auto */
+uint amdgpu_freesync_vid_mode;
int amdgpu_reset_method = -1; /* auto */
int amdgpu_num_kcq = -1;
+static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work);
+
struct amdgpu_mgpu_info mgpu_info = {
.mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
+ .delayed_reset_work = __DELAYED_WORK_INITIALIZER(
+ mgpu_info.delayed_reset_work,
+ amdgpu_drv_delayed_reset_work_handler, 0),
};
int amdgpu_ras_enable = -1;
uint amdgpu_ras_mask = 0xffffffff;
int amdgpu_bad_page_threshold = -1;
+struct amdgpu_watchdog_timer amdgpu_watchdog_timer = {
+ .timeout_fatal_disable = false,
+ .period = 0x23, /* default to max. timeout = 1 << 0x23 cycles */
+};
/**
* DOC: vramlimit (int)
@@ -502,7 +517,7 @@ module_param_named(compute_multipipe, amdgpu_compute_multipipe, int, 0444);
* DOC: gpu_recovery (int)
* Set to enable GPU recovery mechanism (1 = enable, 0 = disable). The default is -1 (auto, disabled except SRIOV).
*/
-MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (1 = enable, 0 = disable, -1 = auto)");
+MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (2 = advanced tdr mode, 1 = enable, 0 = disable, -1 = auto)");
module_param_named(gpu_recovery, amdgpu_gpu_recovery, int, 0444);
/**
@@ -528,6 +543,20 @@ MODULE_PARM_DESC(ras_mask, "Mask of RAS features to enable (default 0xffffffff),
module_param_named(ras_mask, amdgpu_ras_mask, uint, 0444);
/**
+ * DOC: timeout_fatal_disable (bool)
+ * Disable Watchdog timeout fatal error event
+ */
+MODULE_PARM_DESC(timeout_fatal_disable, "disable watchdog timeout fatal error (false = default)");
+module_param_named(timeout_fatal_disable, amdgpu_watchdog_timer.timeout_fatal_disable, bool, 0644);
+
+/**
+ * DOC: timeout_period (uint)
+ * Modify the watchdog timeout max_cycles as (1 << period)
+ */
+MODULE_PARM_DESC(timeout_period, "watchdog timeout period (1 to 0x23(default), timeout maxCycles = (1 << period)");
+module_param_named(timeout_period, amdgpu_watchdog_timer.period, uint, 0644);
+
+/**
* DOC: si_support (int)
* Set SI support driver. This parameter works after set config CONFIG_DRM_AMDGPU_SI. For SI asic, when radeon driver is enabled,
* set value 0 to use radeon driver, while set value 1 to use amdgpu driver. The default is using radeon driver when it available,
@@ -748,6 +777,13 @@ bool no_system_mem_limit;
module_param(no_system_mem_limit, bool, 0644);
MODULE_PARM_DESC(no_system_mem_limit, "disable system memory limit (false = default)");
+/**
+ * DOC: no_queue_eviction_on_vm_fault (int)
+ * If set, process queues will not be evicted on gpuvm fault. This is to keep the wavefront context for debugging (0 = queue eviction, 1 = no queue eviction). The default is 0 (queue eviction).
+ */
+int amdgpu_no_queue_eviction_on_vm_fault = 0;
+MODULE_PARM_DESC(no_queue_eviction_on_vm_fault, "No queue eviction on VM fault (0 = queue eviction, 1 = no queue eviction)");
+module_param_named(no_queue_eviction_on_vm_fault, amdgpu_no_queue_eviction_on_vm_fault, int, 0444);
#endif
/**
@@ -792,10 +828,21 @@ module_param_named(backlight, amdgpu_backlight, bint, 0444);
*
* The default value: 0 (off). TODO: change to auto till it is completed.
*/
-MODULE_PARM_DESC(tmz, "Enable TMZ feature (-1 = auto, 0 = off (default), 1 = on)");
+MODULE_PARM_DESC(tmz, "Enable TMZ feature (-1 = auto (default), 0 = off, 1 = on)");
module_param_named(tmz, amdgpu_tmz, int, 0444);
/**
+ * DOC: freesync_video (uint)
+ * Enabled the optimization to adjust front porch timing to achieve seamless mode change experience
+ * when setting a freesync supported mode for which full modeset is not needed.
+ * The default value: 0 (off).
+ */
+MODULE_PARM_DESC(
+ freesync_video,
+ "Enable freesync modesetting optimization feature (0 = off (default), 1 = on)");
+module_param_named(freesync_video, amdgpu_freesync_vid_mode, uint, 0444);
+
+/**
* DOC: reset_method (int)
* GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco, 5 = pci)
*/
@@ -815,6 +862,15 @@ module_param_named(bad_page_threshold, amdgpu_bad_page_threshold, int, 0444);
MODULE_PARM_DESC(num_kcq, "number of kernel compute queue user want to setup (8 if set to greater than 8 or less than 0, only affect gfx 8+)");
module_param_named(num_kcq, amdgpu_num_kcq, int, 0444);
+/**
+ * DOC: smu_pptable_id (int)
+ * Used to override pptable id. id = 0 use VBIOS pptable.
+ * id > 0 use the soft pptable with specicfied id.
+ */
+MODULE_PARM_DESC(smu_pptable_id,
+ "specify pptable id to be used (-1 = auto(default) value, 0 = use pptable from vbios, > 0 = soft pptable id)");
+module_param_named(smu_pptable_id, amdgpu_smu_pptable_id, int, 0444);
+
static const struct pci_device_id pciidlist[] = {
#ifdef CONFIG_DRM_AMDGPU_SI
{0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
@@ -1125,6 +1181,11 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x73E2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
{0x1002, 0x73FF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+ /* Aldebaran */
+ {0x1002, 0x7408, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN|AMD_EXP_HW_SUPPORT},
+ {0x1002, 0x740C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN|AMD_EXP_HW_SUPPORT},
+ {0x1002, 0x740F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN|AMD_EXP_HW_SUPPORT},
+
{0, 0, 0}
};
@@ -1279,6 +1340,98 @@ amdgpu_pci_shutdown(struct pci_dev *pdev)
adev->mp1_state = PP_MP1_STATE_NONE;
}
+/**
+ * amdgpu_drv_delayed_reset_work_handler - work handler for reset
+ *
+ * @work: work_struct.
+ */
+static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work)
+{
+ struct list_head device_list;
+ struct amdgpu_device *adev;
+ int i, r;
+ struct amdgpu_reset_context reset_context;
+
+ memset(&reset_context, 0, sizeof(reset_context));
+
+ mutex_lock(&mgpu_info.mutex);
+ if (mgpu_info.pending_reset == true) {
+ mutex_unlock(&mgpu_info.mutex);
+ return;
+ }
+ mgpu_info.pending_reset = true;
+ mutex_unlock(&mgpu_info.mutex);
+
+ /* Use a common context, just need to make sure full reset is done */
+ reset_context.method = AMD_RESET_METHOD_NONE;
+ set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+
+ for (i = 0; i < mgpu_info.num_dgpu; i++) {
+ adev = mgpu_info.gpu_ins[i].adev;
+ reset_context.reset_req_dev = adev;
+ r = amdgpu_device_pre_asic_reset(adev, &reset_context);
+ if (r) {
+ dev_err(adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
+ r, adev_to_drm(adev)->unique);
+ }
+ if (!queue_work(system_unbound_wq, &adev->xgmi_reset_work))
+ r = -EALREADY;
+ }
+ for (i = 0; i < mgpu_info.num_dgpu; i++) {
+ adev = mgpu_info.gpu_ins[i].adev;
+ flush_work(&adev->xgmi_reset_work);
+ adev->gmc.xgmi.pending_reset = false;
+ }
+
+ /* reset function will rebuild the xgmi hive info , clear it now */
+ for (i = 0; i < mgpu_info.num_dgpu; i++)
+ amdgpu_xgmi_remove_device(mgpu_info.gpu_ins[i].adev);
+
+ INIT_LIST_HEAD(&device_list);
+
+ for (i = 0; i < mgpu_info.num_dgpu; i++)
+ list_add_tail(&mgpu_info.gpu_ins[i].adev->reset_list, &device_list);
+
+ /* unregister the GPU first, reset function will add them back */
+ list_for_each_entry(adev, &device_list, reset_list)
+ amdgpu_unregister_gpu_instance(adev);
+
+ /* Use a common context, just need to make sure full reset is done */
+ set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
+ r = amdgpu_do_asic_reset(&device_list, &reset_context);
+
+ if (r) {
+ DRM_ERROR("reinit gpus failure");
+ return;
+ }
+ for (i = 0; i < mgpu_info.num_dgpu; i++) {
+ adev = mgpu_info.gpu_ins[i].adev;
+ if (!adev->kfd.init_complete)
+ amdgpu_amdkfd_device_init(adev);
+ amdgpu_ttm_set_buffer_funcs_status(adev, true);
+ }
+ return;
+}
+
+static int amdgpu_pmops_prepare(struct device *dev)
+{
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
+
+ /* Return a positive number here so
+ * DPM_FLAG_SMART_SUSPEND works properly
+ */
+ if (amdgpu_device_supports_boco(drm_dev))
+ return pm_runtime_suspended(dev) &&
+ pm_suspend_via_firmware();
+
+ return 0;
+}
+
+static void amdgpu_pmops_complete(struct device *dev)
+{
+ /* nothing to do */
+}
+
static int amdgpu_pmops_suspend(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
@@ -1364,7 +1517,7 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
}
adev->in_runpm = true;
- if (amdgpu_device_supports_atpx(drm_dev))
+ if (amdgpu_device_supports_px(drm_dev))
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
ret = amdgpu_device_suspend(drm_dev, false);
@@ -1373,16 +1526,14 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
return ret;
}
- if (amdgpu_device_supports_atpx(drm_dev)) {
+ if (amdgpu_device_supports_px(drm_dev)) {
/* Only need to handle PCI state in the driver for ATPX
* PCI core handles it for _PR3.
*/
- if (!amdgpu_is_atpx_hybrid()) {
- amdgpu_device_cache_pci_state(pdev);
- pci_disable_device(pdev);
- pci_ignore_hotplug(pdev);
- pci_set_power_state(pdev, PCI_D3cold);
- }
+ amdgpu_device_cache_pci_state(pdev);
+ pci_disable_device(pdev);
+ pci_ignore_hotplug(pdev);
+ pci_set_power_state(pdev, PCI_D3cold);
drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
} else if (amdgpu_device_supports_baco(drm_dev)) {
amdgpu_device_baco_enter(drm_dev);
@@ -1401,19 +1552,17 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
if (!adev->runpm)
return -EINVAL;
- if (amdgpu_device_supports_atpx(drm_dev)) {
+ if (amdgpu_device_supports_px(drm_dev)) {
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
/* Only need to handle PCI state in the driver for ATPX
* PCI core handles it for _PR3.
*/
- if (!amdgpu_is_atpx_hybrid()) {
- pci_set_power_state(pdev, PCI_D0);
- amdgpu_device_load_pci_state(pdev);
- ret = pci_enable_device(pdev);
- if (ret)
- return ret;
- }
+ pci_set_power_state(pdev, PCI_D0);
+ amdgpu_device_load_pci_state(pdev);
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
pci_set_master(pdev);
} else if (amdgpu_device_supports_boco(drm_dev)) {
/* Only need to handle PCI state in the driver for ATPX
@@ -1424,7 +1573,7 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
amdgpu_device_baco_exit(drm_dev);
}
ret = amdgpu_device_resume(drm_dev, false);
- if (amdgpu_device_supports_atpx(drm_dev))
+ if (amdgpu_device_supports_px(drm_dev))
drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
adev->in_runpm = false;
return 0;
@@ -1505,6 +1654,8 @@ out:
}
static const struct dev_pm_ops amdgpu_pm_ops = {
+ .prepare = amdgpu_pmops_prepare,
+ .complete = amdgpu_pmops_complete,
.suspend = amdgpu_pmops_suspend,
.resume = amdgpu_pmops_resume,
.freeze = amdgpu_pmops_freeze,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 24010cacf7d0..4c5c19820d37 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -232,8 +232,8 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
goto out;
}
- ret = amdgpu_display_framebuffer_init(adev_to_drm(adev), &rfbdev->rfb,
- &mode_cmd, gobj);
+ ret = amdgpu_display_gem_fb_init(adev_to_drm(adev), &rfbdev->rfb,
+ &mode_cmd, gobj);
if (ret) {
DRM_ERROR("failed to initialize framebuffer %d\n", ret);
goto out;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index d56f4023ebb3..47ea46859618 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -36,8 +36,6 @@
#include <linux/firmware.h>
#include <linux/pm_runtime.h>
-#include <drm/drm_debugfs.h>
-
#include "amdgpu.h"
#include "amdgpu_trace.h"
@@ -441,7 +439,8 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
* Helper function for amdgpu_fence_driver_init().
*/
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
- unsigned num_hw_submission)
+ unsigned num_hw_submission,
+ atomic_t *sched_score)
{
struct amdgpu_device *adev = ring->adev;
long timeout;
@@ -469,30 +468,31 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
return -ENOMEM;
/* No need to setup the GPU scheduler for rings that don't need it */
- if (!ring->no_scheduler) {
- switch (ring->funcs->type) {
- case AMDGPU_RING_TYPE_GFX:
- timeout = adev->gfx_timeout;
- break;
- case AMDGPU_RING_TYPE_COMPUTE:
- timeout = adev->compute_timeout;
- break;
- case AMDGPU_RING_TYPE_SDMA:
- timeout = adev->sdma_timeout;
- break;
- default:
- timeout = adev->video_timeout;
- break;
- }
+ if (ring->no_scheduler)
+ return 0;
- r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
- num_hw_submission, amdgpu_job_hang_limit,
- timeout, ring->name);
- if (r) {
- DRM_ERROR("Failed to create scheduler on ring %s.\n",
- ring->name);
- return r;
- }
+ switch (ring->funcs->type) {
+ case AMDGPU_RING_TYPE_GFX:
+ timeout = adev->gfx_timeout;
+ break;
+ case AMDGPU_RING_TYPE_COMPUTE:
+ timeout = adev->compute_timeout;
+ break;
+ case AMDGPU_RING_TYPE_SDMA:
+ timeout = adev->sdma_timeout;
+ break;
+ default:
+ timeout = adev->video_timeout;
+ break;
+ }
+
+ r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
+ num_hw_submission, amdgpu_job_hang_limit,
+ timeout, sched_score, ring->name);
+ if (r) {
+ DRM_ERROR("Failed to create scheduler on ring %s.\n",
+ ring->name);
+ return r;
}
return 0;
@@ -533,6 +533,8 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
if (!ring || !ring->fence_drv.initialized)
continue;
+ if (!ring->no_scheduler)
+ drm_sched_fini(&ring->sched);
r = amdgpu_fence_wait_empty(ring);
if (r) {
/* no need to trigger GPU reset as we are unloading */
@@ -541,8 +543,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
if (ring->fence_drv.irq_src)
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
ring->fence_drv.irq_type);
- if (!ring->no_scheduler)
- drm_sched_fini(&ring->sched);
+
del_timer_sync(&ring->fence_drv.fallback_timer);
for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
dma_fence_put(ring->fence_drv.fences[j]);
@@ -697,11 +698,9 @@ static const struct dma_fence_ops amdgpu_fence_ops = {
* Fence debugfs
*/
#if defined(CONFIG_DEBUG_FS)
-static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
+static int amdgpu_debugfs_fence_info_show(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
int i;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
@@ -746,11 +745,10 @@ static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
*
* Manually trigger a gpu reset at the next fence wait.
*/
-static int amdgpu_debugfs_gpu_recover(struct seq_file *m, void *data)
+static int gpu_recover_get(void *data, u64 *val)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+ struct drm_device *dev = adev_to_drm(adev);
int r;
r = pm_runtime_get_sync(dev->dev);
@@ -759,8 +757,7 @@ static int amdgpu_debugfs_gpu_recover(struct seq_file *m, void *data)
return 0;
}
- seq_printf(m, "gpu recover\n");
- amdgpu_device_gpu_recover(adev, NULL);
+ *val = amdgpu_device_gpu_recover(adev, NULL);
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
@@ -768,26 +765,24 @@ static int amdgpu_debugfs_gpu_recover(struct seq_file *m, void *data)
return 0;
}
-static const struct drm_info_list amdgpu_debugfs_fence_list[] = {
- {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
- {"amdgpu_gpu_recover", &amdgpu_debugfs_gpu_recover, 0, NULL}
-};
+DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_fence_info);
+DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_gpu_recover_fops, gpu_recover_get, NULL,
+ "%lld\n");
-static const struct drm_info_list amdgpu_debugfs_fence_list_sriov[] = {
- {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
-};
#endif
-int amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
+void amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
- if (amdgpu_sriov_vf(adev))
- return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list_sriov,
- ARRAY_SIZE(amdgpu_debugfs_fence_list_sriov));
- return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list,
- ARRAY_SIZE(amdgpu_debugfs_fence_list));
-#else
- return 0;
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+ struct dentry *root = minor->debugfs_root;
+
+ debugfs_create_file("amdgpu_fence_info", 0444, root, adev,
+ &amdgpu_debugfs_fence_info_fops);
+
+ if (!amdgpu_sriov_vf(adev))
+ debugfs_create_file("amdgpu_gpu_recover", 0444, root, adev,
+ &amdgpu_debugfs_gpu_recover_fops);
#endif
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 0db933026722..c5a9a4fb10d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -71,7 +71,7 @@
*/
static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev)
{
- struct page *dummy_page = ttm_bo_glob.dummy_read_page;
+ struct page *dummy_page = ttm_glob.dummy_read_page;
if (adev->dummy_page_addr)
return 0;
@@ -126,6 +126,8 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
bp.type = ttm_bo_type_kernel;
bp.resv = NULL;
+ bp.bo_ptr_size = sizeof(struct amdgpu_bo);
+
r = amdgpu_bo_create(adev, &bp, &adev->gart.bo);
if (r) {
return r;
@@ -202,6 +204,7 @@ void amdgpu_gart_table_vram_free(struct amdgpu_device *adev)
return;
}
amdgpu_bo_unref(&adev->gart.bo);
+ adev->gart.ptr = NULL;
}
/*
@@ -236,9 +239,6 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
t = offset / AMDGPU_GPU_PAGE_SIZE;
p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
for (i = 0; i < pages; i++, p++) {
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
- adev->gart.pages[p] = NULL;
-#endif
page_base = adev->dummy_page_addr;
if (!adev->gart.ptr)
continue;
@@ -312,9 +312,6 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
int pages, struct page **pagelist, dma_addr_t *dma_addr,
uint64_t flags)
{
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
- unsigned t,p;
-#endif
int r, i;
if (!adev->gart.ready) {
@@ -322,13 +319,6 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
return -EINVAL;
}
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
- t = offset / AMDGPU_GPU_PAGE_SIZE;
- p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
- for (i = 0; i < pages; i++, p++)
- adev->gart.pages[p] = pagelist ? pagelist[i] : NULL;
-#endif
-
if (!adev->gart.ptr)
return 0;
@@ -373,14 +363,6 @@ int amdgpu_gart_init(struct amdgpu_device *adev)
DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
adev->gart.num_cpu_pages, adev->gart.num_gpu_pages);
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
- /* Allocate pages table */
- adev->gart.pages = vzalloc(array_size(sizeof(void *),
- adev->gart.num_cpu_pages));
- if (adev->gart.pages == NULL)
- return -ENOMEM;
-#endif
-
return 0;
}
@@ -393,9 +375,5 @@ int amdgpu_gart_init(struct amdgpu_device *adev)
*/
void amdgpu_gart_fini(struct amdgpu_device *adev)
{
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
- vfree(adev->gart.pages);
- adev->gart.pages = NULL;
-#endif
amdgpu_gart_dummy_page_fini(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
index afa2e2877d87..a25fe97b0196 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
@@ -46,9 +46,6 @@ struct amdgpu_gart {
unsigned num_gpu_pages;
unsigned num_cpu_pages;
unsigned table_size;
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
- struct page **pages;
-#endif
bool ready;
/* Asic default pte flags */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index b443907afcea..311bcdc59eda 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -32,7 +32,6 @@
#include <linux/dma-buf.h>
#include <drm/amdgpu_drm.h>
-#include <drm/drm_debugfs.h>
#include <drm/drm_gem_ttm_helper.h>
#include "amdgpu.h"
@@ -59,6 +58,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
struct drm_gem_object **obj)
{
struct amdgpu_bo *bo;
+ struct amdgpu_bo_user *ubo;
struct amdgpu_bo_param bp;
int r;
@@ -72,10 +72,13 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
bp.preferred_domain = initial_domain;
bp.flags = flags;
bp.domain = initial_domain;
- r = amdgpu_bo_create(adev, &bp, &bo);
+ bp.bo_ptr_size = sizeof(struct amdgpu_bo);
+
+ r = amdgpu_bo_create_user(adev, &bp, &ubo);
if (r)
return r;
+ bo = &ubo->bo;
*obj = &bo->tbo.base;
(*obj)->funcs = &amdgpu_gem_object_funcs;
@@ -855,10 +858,10 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
}
#if defined(CONFIG_DEBUG_FS)
-static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
+static int amdgpu_debugfs_gem_info_show(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *dev = node->minor->dev;
+ struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
+ struct drm_device *dev = adev_to_drm(adev);
struct drm_file *file;
int r;
@@ -896,16 +899,17 @@ static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
return 0;
}
-static const struct drm_info_list amdgpu_debugfs_gem_list[] = {
- {"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL},
-};
+DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_gem_info);
+
#endif
-int amdgpu_debugfs_gem_init(struct amdgpu_device *adev)
+void amdgpu_debugfs_gem_init(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
- return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list,
- ARRAY_SIZE(amdgpu_debugfs_gem_list));
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+ struct dentry *root = minor->debugfs_root;
+
+ debugfs_create_file("amdgpu_gem_info", 0444, root, adev,
+ &amdgpu_debugfs_gem_info_fops);
#endif
- return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 8e0a6c62322e..95d4f43a03df 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -310,9 +310,8 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
ring->eop_gpu_addr = kiq->eop_gpu_addr;
ring->no_scheduler = true;
sprintf(ring->name, "kiq_%d.%d.%d", ring->me, ring->pipe, ring->queue);
- r = amdgpu_ring_init(adev, ring, 1024,
- irq, AMDGPU_CP_KIQ_IRQ_DRIVER0,
- AMDGPU_RING_PRIO_DEFAULT);
+ r = amdgpu_ring_init(adev, ring, 1024, irq, AMDGPU_CP_KIQ_IRQ_DRIVER0,
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
dev_warn(adev->dev, "(%d) failed to init kiq ring\n", r);
@@ -463,20 +462,25 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev)
{
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
struct amdgpu_ring *kiq_ring = &kiq->ring;
- int i;
+ int i, r;
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
+ spin_lock(&adev->gfx.kiq.ring_lock);
if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
- adev->gfx.num_compute_rings))
+ adev->gfx.num_compute_rings)) {
+ spin_unlock(&adev->gfx.kiq.ring_lock);
return -ENOMEM;
+ }
for (i = 0; i < adev->gfx.num_compute_rings; i++)
kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.compute_ring[i],
RESET_QUEUES, 0, 0);
+ r = amdgpu_ring_test_helper(kiq_ring);
+ spin_unlock(&adev->gfx.kiq.ring_lock);
- return amdgpu_ring_test_helper(kiq_ring);
+ return r;
}
int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
@@ -519,12 +523,13 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev)
DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe,
kiq_ring->queue);
-
+ spin_lock(&adev->gfx.kiq.ring_lock);
r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
adev->gfx.num_compute_rings +
kiq->pmf->set_resources_size);
if (r) {
DRM_ERROR("Failed to lock KIQ (%d).\n", r);
+ spin_unlock(&adev->gfx.kiq.ring_lock);
return r;
}
@@ -533,6 +538,7 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev)
kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.compute_ring[i]);
r = amdgpu_ring_test_helper(kiq_ring);
+ spin_unlock(&adev->gfx.kiq.ring_lock);
if (r)
DRM_ERROR("KCQ enable failed\n");
@@ -601,6 +607,7 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev)
struct ras_ih_if ih_info = {
.cb = amdgpu_gfx_process_ras_data_cb,
};
+ struct ras_query_if info = { 0 };
if (!adev->gfx.ras_if) {
adev->gfx.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
@@ -612,13 +619,19 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev)
strcpy(adev->gfx.ras_if->name, "gfx");
}
fs_info.head = ih_info.head = *adev->gfx.ras_if;
-
r = amdgpu_ras_late_init(adev, adev->gfx.ras_if,
&fs_info, &ih_info);
if (r)
goto free;
if (amdgpu_ras_is_supported(adev, adev->gfx.ras_if->block)) {
+ if (adev->gmc.xgmi.connected_to_cpu) {
+ info.head = *adev->gfx.ras_if;
+ amdgpu_ras_query_error_status(adev, &info);
+ } else {
+ amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX);
+ }
+
r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
if (r)
goto late_fini;
@@ -664,8 +677,9 @@ int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
*/
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) {
kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
- if (adev->gfx.funcs->query_ras_error_count)
- adev->gfx.funcs->query_ras_error_count(adev, err_data);
+ if (adev->gfx.ras_funcs &&
+ adev->gfx.ras_funcs->query_ras_error_count)
+ adev->gfx.ras_funcs->query_ras_error_count(adev, err_data);
amdgpu_ras_reset_gpu(adev);
}
return AMDGPU_RAS_SUCCESS;
@@ -698,7 +712,7 @@ uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
struct amdgpu_ring *ring = &kiq->ring;
- if (adev->in_pci_err_recovery)
+ if (amdgpu_device_skip_hw_access(adev))
return 0;
BUG_ON(!ring->funcs->emit_rreg);
@@ -765,7 +779,7 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
BUG_ON(!ring->funcs->emit_wreg);
- if (adev->in_pci_err_recovery)
+ if (amdgpu_device_skip_hw_access(adev))
return;
spin_lock_irqsave(&kiq->ring_lock, flags);
@@ -829,14 +843,10 @@ int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev)
void amdgpu_gfx_state_change_set(struct amdgpu_device *adev, enum gfx_change_state state)
{
- if (is_support_sw_smu(adev)) {
- smu_gfx_state_change_set(&adev->smu, state);
- } else {
- mutex_lock(&adev->pm.mutex);
- if (adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->gfx_state_change_set)
- ((adev)->powerplay.pp_funcs->gfx_state_change_set(
- (adev)->powerplay.pp_handle, state));
- mutex_unlock(&adev->pm.mutex);
- }
+ mutex_lock(&adev->pm.mutex);
+ if (adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->gfx_state_change_set)
+ ((adev)->powerplay.pp_funcs->gfx_state_change_set(
+ (adev)->powerplay.pp_handle, state));
+ mutex_unlock(&adev->pm.mutex);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 72dbcd2bc6a6..d43fe2ed8116 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -30,6 +30,7 @@
#include "clearstate_defs.h"
#include "amdgpu_ring.h"
#include "amdgpu_rlc.h"
+#include "soc15.h"
/* GFX current status */
#define AMDGPU_GFX_NORMAL_MODE 0x00000000L
@@ -204,6 +205,19 @@ struct amdgpu_cu_info {
uint32_t bitmap[4][4];
};
+struct amdgpu_gfx_ras_funcs {
+ int (*ras_late_init)(struct amdgpu_device *adev);
+ void (*ras_fini)(struct amdgpu_device *adev);
+ int (*ras_error_inject)(struct amdgpu_device *adev,
+ void *inject_if);
+ int (*query_ras_error_count)(struct amdgpu_device *adev,
+ void *ras_error_status);
+ void (*reset_ras_error_count)(struct amdgpu_device *adev);
+ void (*query_ras_error_status)(struct amdgpu_device *adev);
+ void (*reset_ras_error_status)(struct amdgpu_device *adev);
+ void (*enable_watchdog_timer)(struct amdgpu_device *adev);
+};
+
struct amdgpu_gfx_funcs {
/* get the gpu clock counter */
uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
@@ -219,11 +233,7 @@ struct amdgpu_gfx_funcs {
uint32_t *dst);
void (*select_me_pipe_q)(struct amdgpu_device *adev, u32 me, u32 pipe,
u32 queue, u32 vmid);
- int (*ras_error_inject)(struct amdgpu_device *adev, void *inject_if);
- int (*query_ras_error_count) (struct amdgpu_device *adev, void *ras_error_status);
- void (*reset_ras_error_count) (struct amdgpu_device *adev);
void (*init_spm_golden)(struct amdgpu_device *adev);
- void (*query_ras_error_status) (struct amdgpu_device *adev);
void (*update_perfmon_mgcg)(struct amdgpu_device *adev, bool enable);
};
@@ -327,7 +337,8 @@ struct amdgpu_gfx {
DECLARE_BITMAP (pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
/*ras */
- struct ras_common_if *ras_if;
+ struct ras_common_if *ras_if;
+ const struct amdgpu_gfx_ras_funcs *ras_funcs;
};
#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index fe1a39ffda72..4d32233cde92 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -32,6 +32,59 @@
#include "amdgpu_xgmi.h"
/**
+ * amdgpu_gmc_pdb0_alloc - allocate vram for pdb0
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate video memory for pdb0 and map it for CPU access
+ * Returns 0 for success, error for failure.
+ */
+int amdgpu_gmc_pdb0_alloc(struct amdgpu_device *adev)
+{
+ int r;
+ struct amdgpu_bo_param bp;
+ u64 vram_size = adev->gmc.xgmi.node_segment_size * adev->gmc.xgmi.num_physical_nodes;
+ uint32_t pde0_page_shift = adev->gmc.vmid0_page_table_block_size + 21;
+ uint32_t npdes = (vram_size + (1ULL << pde0_page_shift) -1) >> pde0_page_shift;
+
+ memset(&bp, 0, sizeof(bp));
+ bp.size = PAGE_ALIGN((npdes + 1) * 8);
+ bp.byte_align = PAGE_SIZE;
+ bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
+ bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+ bp.type = ttm_bo_type_kernel;
+ bp.resv = NULL;
+ bp.bo_ptr_size = sizeof(struct amdgpu_bo);
+
+ r = amdgpu_bo_create(adev, &bp, &adev->gmc.pdb0_bo);
+ if (r)
+ return r;
+
+ r = amdgpu_bo_reserve(adev->gmc.pdb0_bo, false);
+ if (unlikely(r != 0))
+ goto bo_reserve_failure;
+
+ r = amdgpu_bo_pin(adev->gmc.pdb0_bo, AMDGPU_GEM_DOMAIN_VRAM);
+ if (r)
+ goto bo_pin_failure;
+ r = amdgpu_bo_kmap(adev->gmc.pdb0_bo, &adev->gmc.ptr_pdb0);
+ if (r)
+ goto bo_kmap_failure;
+
+ amdgpu_bo_unreserve(adev->gmc.pdb0_bo);
+ return 0;
+
+bo_kmap_failure:
+ amdgpu_bo_unpin(adev->gmc.pdb0_bo);
+bo_pin_failure:
+ amdgpu_bo_unreserve(adev->gmc.pdb0_bo);
+bo_reserve_failure:
+ amdgpu_bo_unref(&adev->gmc.pdb0_bo);
+ return r;
+}
+
+/**
* amdgpu_gmc_get_pde_for_bo - get the PDE for a BO
*
* @bo: the BO to get the PDE for
@@ -158,6 +211,39 @@ void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
mc->vram_end, mc->real_vram_size >> 20);
}
+/** amdgpu_gmc_sysvm_location - place vram and gart in sysvm aperture
+ *
+ * @adev: amdgpu device structure holding all necessary information
+ * @mc: memory controller structure holding memory information
+ *
+ * This function is only used if use GART for FB translation. In such
+ * case, we use sysvm aperture (vmid0 page tables) for both vram
+ * and gart (aka system memory) access.
+ *
+ * GPUVM (and our organization of vmid0 page tables) require sysvm
+ * aperture to be placed at a location aligned with 8 times of native
+ * page size. For example, if vm_context0_cntl.page_table_block_size
+ * is 12, then native page size is 8G (2M*2^12), sysvm should start
+ * with a 64G aligned address. For simplicity, we just put sysvm at
+ * address 0. So vram start at address 0 and gart is right after vram.
+ */
+void amdgpu_gmc_sysvm_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
+{
+ u64 hive_vram_start = 0;
+ u64 hive_vram_end = mc->xgmi.node_segment_size * mc->xgmi.num_physical_nodes - 1;
+ mc->vram_start = mc->xgmi.node_segment_size * mc->xgmi.physical_node_id;
+ mc->vram_end = mc->vram_start + mc->xgmi.node_segment_size - 1;
+ mc->gart_start = hive_vram_end + 1;
+ mc->gart_end = mc->gart_start + mc->gart_size - 1;
+ mc->fb_start = hive_vram_start;
+ mc->fb_end = hive_vram_end;
+ dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
+ mc->mc_vram_size >> 20, mc->vram_start,
+ mc->vram_end, mc->real_vram_size >> 20);
+ dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n",
+ mc->gart_size >> 20, mc->gart_start, mc->gart_end);
+}
+
/**
* amdgpu_gmc_gart_location - try to find GART location
*
@@ -165,7 +251,6 @@ void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
* @mc: memory controller structure holding memory information
*
* Function will place try to place GART before or after VRAM.
- *
* If GART size is bigger than space left then we ajust GART size.
* Thus function will never fails.
*/
@@ -176,8 +261,6 @@ void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
/*To avoid the hole, limit the max mc address to AMDGPU_GMC_HOLE_START*/
u64 max_mc_address = min(adev->gmc.mc_mask, AMDGPU_GMC_HOLE_START - 1);
- mc->gart_size += adev->pm.smu_prv_buffer_size;
-
/* VCE doesn't like it when BOs cross a 4GB segment, so align
* the GART base on a 4GB boundary as well.
*/
@@ -308,26 +391,46 @@ int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev)
{
int r;
- if (adev->umc.funcs && adev->umc.funcs->ras_late_init) {
- r = adev->umc.funcs->ras_late_init(adev);
+ if (adev->umc.ras_funcs &&
+ adev->umc.ras_funcs->ras_late_init) {
+ r = adev->umc.ras_funcs->ras_late_init(adev);
if (r)
return r;
}
- if (adev->mmhub.funcs && adev->mmhub.funcs->ras_late_init) {
- r = adev->mmhub.funcs->ras_late_init(adev);
+ if (adev->mmhub.ras_funcs &&
+ adev->mmhub.ras_funcs->ras_late_init) {
+ r = adev->mmhub.ras_funcs->ras_late_init(adev);
if (r)
return r;
}
- return amdgpu_xgmi_ras_late_init(adev);
+ if (!adev->gmc.xgmi.connected_to_cpu)
+ adev->gmc.xgmi.ras_funcs = &xgmi_ras_funcs;
+
+ if (adev->gmc.xgmi.ras_funcs &&
+ adev->gmc.xgmi.ras_funcs->ras_late_init) {
+ r = adev->gmc.xgmi.ras_funcs->ras_late_init(adev);
+ if (r)
+ return r;
+ }
+
+ return 0;
}
void amdgpu_gmc_ras_fini(struct amdgpu_device *adev)
{
- amdgpu_umc_ras_fini(adev);
- amdgpu_mmhub_ras_fini(adev);
- amdgpu_xgmi_ras_fini(adev);
+ if (adev->umc.ras_funcs &&
+ adev->umc.ras_funcs->ras_fini)
+ adev->umc.ras_funcs->ras_fini(adev);
+
+ if (adev->mmhub.ras_funcs &&
+ adev->mmhub.ras_funcs->ras_fini)
+ amdgpu_mmhub_ras_fini(adev);
+
+ if (adev->gmc.xgmi.ras_funcs &&
+ adev->gmc.xgmi.ras_funcs->ras_fini)
+ adev->gmc.xgmi.ras_funcs->ras_fini(adev);
}
/*
@@ -384,6 +487,16 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_RAVEN:
+ if (amdgpu_tmz == 0) {
+ adev->gmc.tmz_enabled = false;
+ dev_info(adev->dev,
+ "Trusted Memory Zone (TMZ) feature disabled (cmd line)\n");
+ } else {
+ adev->gmc.tmz_enabled = true;
+ dev_info(adev->dev,
+ "Trusted Memory Zone (TMZ) feature enabled\n");
+ }
+ break;
case CHIP_RENOIR:
case CHIP_NAVI10:
case CHIP_NAVI14:
@@ -423,6 +536,8 @@ void amdgpu_gmc_noretry_set(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_VEGA10:
case CHIP_VEGA20:
+ case CHIP_ARCTURUS:
+ case CHIP_ALDEBARAN:
/*
* noretry = 0 will cause kfd page fault tests fail
* for some ASICs, so set default to 1 for these ASICs.
@@ -518,3 +633,55 @@ void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev)
adev->mman.stolen_extended_size = 0;
}
}
+
+/**
+ * amdgpu_gmc_init_pdb0 - initialize PDB0
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * This function is only used when GART page table is used
+ * for FB address translatioin. In such a case, we construct
+ * a 2-level system VM page table: PDB0->PTB, to cover both
+ * VRAM of the hive and system memory.
+ *
+ * PDB0 is static, initialized once on driver initialization.
+ * The first n entries of PDB0 are used as PTE by setting
+ * P bit to 1, pointing to VRAM. The n+1'th entry points
+ * to a big PTB covering system memory.
+ *
+ */
+void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev)
+{
+ int i;
+ uint64_t flags = adev->gart.gart_pte_flags; //TODO it is UC. explore NC/RW?
+ /* Each PDE0 (used as PTE) covers (2^vmid0_page_table_block_size)*2M
+ */
+ u64 vram_size = adev->gmc.xgmi.node_segment_size * adev->gmc.xgmi.num_physical_nodes;
+ u64 pde0_page_size = (1ULL<<adev->gmc.vmid0_page_table_block_size)<<21;
+ u64 vram_addr = adev->vm_manager.vram_base_offset -
+ adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
+ u64 vram_end = vram_addr + vram_size;
+ u64 gart_ptb_gpu_pa = amdgpu_bo_gpu_offset(adev->gart.bo) +
+ adev->vm_manager.vram_base_offset - adev->gmc.vram_start;
+
+ flags |= AMDGPU_PTE_VALID | AMDGPU_PTE_READABLE;
+ flags |= AMDGPU_PTE_WRITEABLE;
+ flags |= AMDGPU_PTE_SNOOPED;
+ flags |= AMDGPU_PTE_FRAG((adev->gmc.vmid0_page_table_block_size + 9*1));
+ flags |= AMDGPU_PDE_PTE;
+
+ /* The first n PDE0 entries are used as PTE,
+ * pointing to vram
+ */
+ for (i = 0; vram_addr < vram_end; i++, vram_addr += pde0_page_size)
+ amdgpu_gmc_set_pte_pde(adev, adev->gmc.ptr_pdb0, i, vram_addr, flags);
+
+ /* The n+1'th PDE0 entry points to a huge
+ * PTB who has more than 512 entries each
+ * pointing to a 4K system page
+ */
+ flags = AMDGPU_PTE_VALID;
+ flags |= AMDGPU_PDE_BFS(0) | AMDGPU_PTE_SNOOPED;
+ /* Requires gart_ptb_gpu_pa to be 4K aligned */
+ amdgpu_gmc_set_pte_pde(adev, adev->gmc.ptr_pdb0, i, gart_ptb_gpu_pa, flags);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index aa0c83776ce0..cbb7735c6988 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -135,6 +135,14 @@ struct amdgpu_gmc_funcs {
unsigned int (*get_vbios_fb_size)(struct amdgpu_device *adev);
};
+struct amdgpu_xgmi_ras_funcs {
+ int (*ras_late_init)(struct amdgpu_device *adev);
+ void (*ras_fini)(struct amdgpu_device *adev);
+ int (*query_ras_error_count)(struct amdgpu_device *adev,
+ void *ras_error_status);
+ void (*reset_ras_error_count)(struct amdgpu_device *adev);
+};
+
struct amdgpu_xgmi {
/* from psp */
u64 node_id;
@@ -149,6 +157,9 @@ struct amdgpu_xgmi {
struct list_head head;
bool supported;
struct ras_common_if *ras_if;
+ bool connected_to_cpu;
+ bool pending_reset;
+ const struct amdgpu_xgmi_ras_funcs *ras_funcs;
};
struct amdgpu_gmc {
@@ -189,10 +200,13 @@ struct amdgpu_gmc {
u64 gart_end;
/* Frame buffer aperture of this GPU device. Different from
* fb_start (see below), this only covers the local GPU device.
- * Driver get fb_start from MC_VM_FB_LOCATION_BASE (set by vbios)
- * and calculate vram_start of this local device by adding an
- * offset inside the XGMI hive.
- * Under VMID0, logical address == MC address
+ * If driver uses FB aperture to access FB, driver get fb_start from
+ * MC_VM_FB_LOCATION_BASE (set by vbios) and calculate vram_start
+ * of this local device by adding an offset inside the XGMI hive.
+ * If driver uses GART table for VMID0 FB access, driver finds a hole in
+ * VMID0's virtual address space to place the SYSVM aperture inside
+ * which the first part is vram and the second part is gart (covering
+ * system ram).
*/
u64 vram_start;
u64 vram_end;
@@ -204,6 +218,15 @@ struct amdgpu_gmc {
*/
u64 fb_start;
u64 fb_end;
+ /* In the case of use GART table for vmid0 FB access, [fb_start, fb_end]
+ * will be squeezed to GART aperture. But we have a PSP FW issue to fix
+ * for now. To temporarily workaround the PSP FW issue, added below two
+ * variables to remember the original fb_start/end to re-enable FB
+ * aperture to workaround the PSP FW issue. Will delete it after we
+ * get a proper PSP FW fix.
+ */
+ u64 fb_start_original;
+ u64 fb_end_original;
unsigned vram_width;
u64 real_vram_size;
int vram_mtrr;
@@ -240,6 +263,12 @@ struct amdgpu_gmc {
struct amdgpu_xgmi xgmi;
struct amdgpu_irq_src ecc_irq;
int noretry;
+
+ uint32_t vmid0_page_table_block_size;
+ uint32_t vmid0_page_table_depth;
+ struct amdgpu_bo *pdb0_bo;
+ /* CPU kmapped address of pdb0*/
+ void *ptr_pdb0;
};
#define amdgpu_gmc_flush_gpu_tlb(adev, vmid, vmhub, type) ((adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (vmhub), (type)))
@@ -281,6 +310,7 @@ static inline uint64_t amdgpu_gmc_sign_extend(uint64_t addr)
return addr;
}
+int amdgpu_gmc_pdb0_alloc(struct amdgpu_device *adev);
void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
uint64_t *addr, uint64_t *flags);
int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
@@ -288,6 +318,7 @@ int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
uint64_t flags);
uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo);
uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo);
+void amdgpu_gmc_sysvm_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc);
void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
u64 base);
void amdgpu_gmc_gart_location(struct amdgpu_device *adev,
@@ -309,4 +340,5 @@ amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type,
void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev);
+void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 8980329cded0..540c01052b21 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -49,8 +49,7 @@ static ssize_t amdgpu_mem_info_gtt_total_show(struct device *dev,
struct amdgpu_device *adev = drm_to_adev(ddev);
struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
- return snprintf(buf, PAGE_SIZE, "%llu\n",
- man->size * PAGE_SIZE);
+ return sysfs_emit(buf, "%llu\n", man->size * PAGE_SIZE);
}
/**
@@ -68,8 +67,7 @@ static ssize_t amdgpu_mem_info_gtt_used_show(struct device *dev,
struct amdgpu_device *adev = drm_to_adev(ddev);
struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
- return snprintf(buf, PAGE_SIZE, "%llu\n",
- amdgpu_gtt_mgr_usage(man));
+ return sysfs_emit(buf, "%llu\n", amdgpu_gtt_mgr_usage(man));
}
static DEVICE_ATTR(mem_info_gtt_total, S_IRUGO,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 7645223ea0ef..148a3b481b12 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -30,7 +30,6 @@
#include <linux/slab.h>
#include <drm/amdgpu_drm.h>
-#include <drm/drm_debugfs.h>
#include "amdgpu.h"
#include "atom.h"
@@ -453,11 +452,9 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
*/
#if defined(CONFIG_DEBUG_FS)
-static int amdgpu_debugfs_sa_info(struct seq_file *m, void *data)
+static int amdgpu_debugfs_sa_info_show(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
seq_printf(m, "--------------------- DELAYED --------------------- \n");
amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_DELAYED],
@@ -471,18 +468,18 @@ static int amdgpu_debugfs_sa_info(struct seq_file *m, void *data)
return 0;
}
-static const struct drm_info_list amdgpu_debugfs_sa_list[] = {
- {"amdgpu_sa_info", &amdgpu_debugfs_sa_info, 0, NULL},
-};
+DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_sa_info);
#endif
-int amdgpu_debugfs_sa_init(struct amdgpu_device *adev)
+void amdgpu_debugfs_sa_init(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
- return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_sa_list,
- ARRAY_SIZE(amdgpu_debugfs_sa_list));
-#else
- return 0;
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+ struct dentry *root = minor->debugfs_root;
+
+ debugfs_create_file("amdgpu_sa_info", 0444, root, adev,
+ &amdgpu_debugfs_sa_info_fops);
+
#endif
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index dc852af4f3b7..faaa6aa2faaf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -99,6 +99,8 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
ih->rptr_addr = adev->wb.gpu_addr + rptr_offs * 4;
ih->rptr_cpu = &adev->wb.wb[rptr_offs];
}
+
+ init_waitqueue_head(&ih->wait_process);
return 0;
}
@@ -160,6 +162,52 @@ void amdgpu_ih_ring_write(struct amdgpu_ih_ring *ih, const uint32_t *iv,
}
}
+/* Waiter helper that checks current rptr matches or passes checkpoint wptr */
+static bool amdgpu_ih_has_checkpoint_processed(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih,
+ uint32_t checkpoint_wptr,
+ uint32_t *prev_rptr)
+{
+ uint32_t cur_rptr = ih->rptr | (*prev_rptr & ~ih->ptr_mask);
+
+ /* rptr has wrapped. */
+ if (cur_rptr < *prev_rptr)
+ cur_rptr += ih->ptr_mask + 1;
+ *prev_rptr = cur_rptr;
+
+ return cur_rptr >= checkpoint_wptr;
+}
+
+/**
+ * amdgpu_ih_wait_on_checkpoint_process - wait to process IVs up to checkpoint
+ *
+ * @adev: amdgpu_device pointer
+ * @ih: ih ring to process
+ *
+ * Used to ensure ring has processed IVs up to the checkpoint write pointer.
+ */
+int amdgpu_ih_wait_on_checkpoint_process(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
+{
+ uint32_t checkpoint_wptr, rptr;
+
+ if (!ih->enabled || adev->shutdown)
+ return -ENODEV;
+
+ checkpoint_wptr = amdgpu_ih_get_wptr(adev, ih);
+ /* Order wptr with rptr. */
+ rmb();
+ rptr = READ_ONCE(ih->rptr);
+
+ /* wptr has wrapped. */
+ if (rptr > checkpoint_wptr)
+ checkpoint_wptr += ih->ptr_mask + 1;
+
+ return wait_event_interruptible(ih->wait_process,
+ amdgpu_ih_has_checkpoint_processed(adev, ih,
+ checkpoint_wptr, &rptr));
+}
+
/**
* amdgpu_ih_process - interrupt handler
*
@@ -180,10 +228,6 @@ int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
wptr = amdgpu_ih_get_wptr(adev, ih);
restart_ih:
- /* is somebody else already processing irqs? */
- if (atomic_xchg(&ih->lock, 1))
- return IRQ_NONE;
-
DRM_DEBUG("%s: rptr %d, wptr %d\n", __func__, ih->rptr, wptr);
/* Order reading of wptr vs. reading of IH ring data */
@@ -195,7 +239,7 @@ restart_ih:
}
amdgpu_ih_set_rptr(adev, ih);
- atomic_set(&ih->lock, 0);
+ wake_up_all(&ih->wait_process);
/* make sure wptr hasn't changed while processing */
wptr = amdgpu_ih_get_wptr(adev, ih);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index 6ed4a85fc7c3..0649b59830a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -64,8 +64,10 @@ struct amdgpu_ih_ring {
bool enabled;
unsigned rptr;
- atomic_t lock;
struct amdgpu_ih_regs ih_regs;
+
+ /* For waiting on IH processing at checkpoint. */
+ wait_queue_head_t wait_process;
};
/* provided by the ih block */
@@ -87,6 +89,8 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
void amdgpu_ih_ring_write(struct amdgpu_ih_ring *ih, const uint32_t *iv,
unsigned int num_dw);
+int amdgpu_ih_wait_on_checkpoint_process(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih);
int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
void amdgpu_ih_decode_iv_helper(struct amdgpu_device *adev,
struct amdgpu_ih_ring *ih,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index afbbec82a289..90f50561b43a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -65,6 +65,41 @@
#define AMDGPU_WAIT_IDLE_TIMEOUT 200
+const char *soc15_ih_clientid_name[] = {
+ "IH",
+ "SDMA2 or ACP",
+ "ATHUB",
+ "BIF",
+ "SDMA3 or DCE",
+ "SDMA4 or ISP",
+ "VMC1 or PCIE0",
+ "RLC",
+ "SDMA0",
+ "SDMA1",
+ "SE0SH",
+ "SE1SH",
+ "SE2SH",
+ "SE3SH",
+ "VCN1 or UVD1",
+ "THM",
+ "VCN or UVD",
+ "SDMA5 or VCE0",
+ "VMC",
+ "SDMA6 or XDMA",
+ "GRBM_CP",
+ "ATS",
+ "ROM_SMUIO",
+ "DF",
+ "SDMA7 or VCE1",
+ "PWR",
+ "reserved",
+ "UTCL2",
+ "EA",
+ "UTCL2LOG",
+ "MP0",
+ "MP1"
+};
+
/**
* amdgpu_hotplug_work_func - work handler for display hotplug event
*
@@ -164,13 +199,13 @@ irqreturn_t amdgpu_irq_handler(int irq, void *arg)
* ack the interrupt if it is there
*/
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF)) {
- if (adev->nbio.funcs &&
- adev->nbio.funcs->handle_ras_controller_intr_no_bifring)
- adev->nbio.funcs->handle_ras_controller_intr_no_bifring(adev);
+ if (adev->nbio.ras_funcs &&
+ adev->nbio.ras_funcs->handle_ras_controller_intr_no_bifring)
+ adev->nbio.ras_funcs->handle_ras_controller_intr_no_bifring(adev);
- if (adev->nbio.funcs &&
- adev->nbio.funcs->handle_ras_err_event_athub_intr_no_bifring)
- adev->nbio.funcs->handle_ras_err_event_athub_intr_no_bifring(adev);
+ if (adev->nbio.ras_funcs &&
+ adev->nbio.ras_funcs->handle_ras_err_event_athub_intr_no_bifring)
+ adev->nbio.ras_funcs->handle_ras_err_event_athub_intr_no_bifring(adev);
}
return ret;
@@ -347,11 +382,6 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
kfree(src->enabled_types);
src->enabled_types = NULL;
- if (src->data) {
- kfree(src->data);
- kfree(src);
- adev->irq.client[i].sources[j] = NULL;
- }
}
kfree(adev->irq.client[i].sources);
adev->irq.client[i].sources = NULL;
@@ -535,7 +565,7 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
- if (!src)
+ if (!src || !src->funcs || !src->funcs->set)
continue;
for (k = 0; k < src->num_types; k++)
amdgpu_irq_update(adev, src, k);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
index ac527e5deae6..cf6116648322 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
@@ -62,7 +62,6 @@ struct amdgpu_irq_src {
unsigned num_types;
atomic_t *enabled_types;
const struct amdgpu_irq_src_funcs *funcs;
- void *data;
};
struct amdgpu_irq_client {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index ff48101bab55..759b34799221 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -28,7 +28,7 @@
#include "amdgpu.h"
#include "amdgpu_trace.h"
-static void amdgpu_job_timedout(struct drm_sched_job *s_job)
+static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
{
struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
struct amdgpu_job *job = to_amdgpu_job(s_job);
@@ -41,7 +41,7 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job)
amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
DRM_ERROR("ring %s timeout, but soft recovered\n",
s_job->sched->name);
- return;
+ return DRM_GPU_SCHED_STAT_NOMINAL;
}
amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti);
@@ -53,10 +53,12 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job)
if (amdgpu_device_should_recover_gpu(ring->adev)) {
amdgpu_device_gpu_recover(ring->adev, job);
+ return DRM_GPU_SCHED_STAT_NOMINAL;
} else {
drm_sched_suspend_timeout(&ring->sched);
if (amdgpu_sriov_vf(adev))
adev->virt.tdr_debug = true;
+ return DRM_GPU_SCHED_STAT_NOMINAL;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index a4e2cf7cada1..39ee88d29cca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -27,7 +27,6 @@
*/
#include "amdgpu.h"
-#include <drm/drm_debugfs.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu_uvd.h"
#include "amdgpu_vce.h"
@@ -160,7 +159,7 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
goto out;
}
- if (amdgpu_device_supports_atpx(dev) &&
+ if (amdgpu_device_supports_px(dev) &&
(amdgpu_runtime_pm != 0)) { /* enable runpm by default for atpx */
adev->runpm = true;
dev_info(adev->dev, "Using ATPX for runtime pm\n");
@@ -201,9 +200,13 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
if (adev->runpm) {
/* only need to skip on ATPX */
- if (amdgpu_device_supports_atpx(dev) &&
- !amdgpu_is_atpx_hybrid())
+ if (amdgpu_device_supports_px(dev))
dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
+ /* we want direct complete for BOCO */
+ if (amdgpu_device_supports_boco(dev))
+ dev_pm_set_driver_flags(dev->dev, DPM_FLAG_SMART_PREPARE |
+ DPM_FLAG_SMART_SUSPEND |
+ DPM_FLAG_MAY_SKIP_RESUME);
pm_runtime_use_autosuspend(dev->dev);
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
pm_runtime_allow(dev->dev);
@@ -287,22 +290,30 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
break;
case AMDGPU_INFO_FW_TA:
switch (query_fw->index) {
- case 0:
+ case TA_FW_TYPE_PSP_XGMI:
fw_info->ver = adev->psp.ta_fw_version;
fw_info->feature = adev->psp.ta_xgmi_ucode_version;
break;
- case 1:
+ case TA_FW_TYPE_PSP_RAS:
fw_info->ver = adev->psp.ta_fw_version;
fw_info->feature = adev->psp.ta_ras_ucode_version;
break;
- case 2:
+ case TA_FW_TYPE_PSP_HDCP:
fw_info->ver = adev->psp.ta_fw_version;
fw_info->feature = adev->psp.ta_hdcp_ucode_version;
break;
- case 3:
+ case TA_FW_TYPE_PSP_DTM:
fw_info->ver = adev->psp.ta_fw_version;
fw_info->feature = adev->psp.ta_dtm_ucode_version;
break;
+ case TA_FW_TYPE_PSP_RAP:
+ fw_info->ver = adev->psp.ta_fw_version;
+ fw_info->feature = adev->psp.ta_rap_ucode_version;
+ break;
+ case TA_FW_TYPE_PSP_SECUREDISPLAY:
+ fw_info->ver = adev->psp.ta_fw_version;
+ fw_info->feature = adev->psp.ta_securedisplay_ucode_version;
+ break;
default:
return -EINVAL;
}
@@ -981,6 +992,63 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
min_t(u64, size, sizeof(ras_mask))) ?
-EFAULT : 0;
}
+ case AMDGPU_INFO_VIDEO_CAPS: {
+ const struct amdgpu_video_codecs *codecs;
+ struct drm_amdgpu_info_video_caps *caps;
+ int r;
+
+ switch (info->video_cap.type) {
+ case AMDGPU_INFO_VIDEO_CAPS_DECODE:
+ r = amdgpu_asic_query_video_codecs(adev, false, &codecs);
+ if (r)
+ return -EINVAL;
+ break;
+ case AMDGPU_INFO_VIDEO_CAPS_ENCODE:
+ r = amdgpu_asic_query_video_codecs(adev, true, &codecs);
+ if (r)
+ return -EINVAL;
+ break;
+ default:
+ DRM_DEBUG_KMS("Invalid request %d\n",
+ info->video_cap.type);
+ return -EINVAL;
+ }
+
+ caps = kzalloc(sizeof(*caps), GFP_KERNEL);
+ if (!caps)
+ return -ENOMEM;
+
+ for (i = 0; i < codecs->codec_count; i++) {
+ int idx = codecs->codec_array[i].codec_type;
+
+ switch (idx) {
+ case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2:
+ case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4:
+ case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1:
+ case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC:
+ case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC:
+ case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG:
+ case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9:
+ case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1:
+ caps->codec_info[idx].valid = 1;
+ caps->codec_info[idx].max_width =
+ codecs->codec_array[i].max_width;
+ caps->codec_info[idx].max_height =
+ codecs->codec_array[i].max_height;
+ caps->codec_info[idx].max_pixels_per_frame =
+ codecs->codec_array[i].max_pixels_per_frame;
+ caps->codec_info[idx].max_level =
+ codecs->codec_array[i].max_level;
+ break;
+ default:
+ break;
+ }
+ }
+ r = copy_to_user(out, caps,
+ min((size_t)size, sizeof(*caps))) ? -EFAULT : 0;
+ kfree(caps);
+ return r;
+ }
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->query);
return -EINVAL;
@@ -1262,16 +1330,25 @@ void amdgpu_disable_vblank_kms(struct drm_crtc *crtc)
*/
#if defined(CONFIG_DEBUG_FS)
-static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
+static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
struct drm_amdgpu_info_firmware fw_info;
struct drm_amdgpu_query_fw query_fw;
struct atom_context *ctx = adev->mode_info.atom_context;
int ret, i;
+ static const char *ta_fw_name[TA_FW_TYPE_MAX_INDEX] = {
+#define TA_FW_NAME(type) [TA_FW_TYPE_PSP_##type] = #type
+ TA_FW_NAME(XGMI),
+ TA_FW_NAME(RAS),
+ TA_FW_NAME(HDCP),
+ TA_FW_NAME(DTM),
+ TA_FW_NAME(RAP),
+ TA_FW_NAME(SECUREDISPLAY),
+#undef TA_FW_NAME
+ };
+
/* VCE */
query_fw.fw_type = AMDGPU_INFO_FW_VCE;
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
@@ -1389,31 +1466,14 @@ static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
fw_info.feature, fw_info.ver);
query_fw.fw_type = AMDGPU_INFO_FW_TA;
- for (i = 0; i < 4; i++) {
+ for (i = TA_FW_TYPE_PSP_XGMI; i < TA_FW_TYPE_MAX_INDEX; i++) {
query_fw.index = i;
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
if (ret)
continue;
- switch (query_fw.index) {
- case 0:
- seq_printf(m, "TA %s feature version: 0x%08x, firmware version: 0x%08x\n",
- "RAS", fw_info.feature, fw_info.ver);
- break;
- case 1:
- seq_printf(m, "TA %s feature version: 0x%08x, firmware version: 0x%08x\n",
- "XGMI", fw_info.feature, fw_info.ver);
- break;
- case 2:
- seq_printf(m, "TA %s feature version: 0x%08x, firmware version: 0x%08x\n",
- "HDCP", fw_info.feature, fw_info.ver);
- break;
- case 3:
- seq_printf(m, "TA %s feature version: 0x%08x, firmware version: 0x%08x\n",
- "DTM", fw_info.feature, fw_info.ver);
- break;
- default:
- return -EINVAL;
- }
+
+ seq_printf(m, "TA %s feature version: 0x%08x, firmware version: 0x%08x\n",
+ ta_fw_name[i], fw_info.feature, fw_info.ver);
}
/* SMC */
@@ -1472,17 +1532,18 @@ static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
return 0;
}
-static const struct drm_info_list amdgpu_firmware_info_list[] = {
- {"amdgpu_firmware_info", amdgpu_debugfs_firmware_info, 0, NULL},
-};
+DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_firmware_info);
+
#endif
-int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev)
+void amdgpu_debugfs_firmware_init(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
- return amdgpu_debugfs_add_files(adev, amdgpu_firmware_info_list,
- ARRAY_SIZE(amdgpu_firmware_info_list));
-#else
- return 0;
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+ struct dentry *root = minor->debugfs_root;
+
+ debugfs_create_file("amdgpu_firmware_info", 0444, root,
+ adev, &amdgpu_debugfs_firmware_info_fops);
+
#endif
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
index 1ae9bdae7311..11aa29933c1f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
@@ -21,12 +21,16 @@
#ifndef __AMDGPU_MMHUB_H__
#define __AMDGPU_MMHUB_H__
-struct amdgpu_mmhub_funcs {
- void (*ras_init)(struct amdgpu_device *adev);
+struct amdgpu_mmhub_ras_funcs {
int (*ras_late_init)(struct amdgpu_device *adev);
+ void (*ras_fini)(struct amdgpu_device *adev);
void (*query_ras_error_count)(struct amdgpu_device *adev,
- void *ras_error_status);
+ void *ras_error_status);
+ void (*query_ras_error_status)(struct amdgpu_device *adev);
void (*reset_ras_error_count)(struct amdgpu_device *adev);
+};
+
+struct amdgpu_mmhub_funcs {
u64 (*get_fb_location)(struct amdgpu_device *adev);
void (*init)(struct amdgpu_device *adev);
int (*gart_enable)(struct amdgpu_device *adev);
@@ -40,12 +44,12 @@ struct amdgpu_mmhub_funcs {
uint64_t page_table_base);
void (*update_power_gating)(struct amdgpu_device *adev,
bool enable);
- void (*query_ras_error_status)(struct amdgpu_device *adev);
};
struct amdgpu_mmhub {
struct ras_common_if *ras_if;
const struct amdgpu_mmhub_funcs *funcs;
+ const struct amdgpu_mmhub_ras_funcs *ras_funcs;
};
int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 319cb19e1b99..cb0b581bbce7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -602,6 +602,14 @@ int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev,
int *hpos, ktime_t *stime, ktime_t *etime,
const struct drm_display_mode *mode);
+int amdgpu_display_gem_fb_init(struct drm_device *dev,
+ struct amdgpu_framebuffer *rfb,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj);
+int amdgpu_display_gem_fb_verify_and_init(
+ struct drm_device *dev, struct amdgpu_framebuffer *rfb,
+ struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj);
int amdgpu_display_framebuffer_init(struct drm_device *dev,
struct amdgpu_framebuffer *rfb,
const struct drm_mode_fb_cmd2 *mode_cmd,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
index 7c11bce4514b..25ee53545837 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
@@ -47,6 +47,17 @@ struct nbio_hdp_flush_reg {
u32 ref_and_mask_sdma7;
};
+struct amdgpu_nbio_ras_funcs {
+ void (*handle_ras_controller_intr_no_bifring)(struct amdgpu_device *adev);
+ void (*handle_ras_err_event_athub_intr_no_bifring)(struct amdgpu_device *adev);
+ int (*init_ras_controller_interrupt)(struct amdgpu_device *adev);
+ int (*init_ras_err_event_athub_interrupt)(struct amdgpu_device *adev);
+ void (*query_ras_error_count)(struct amdgpu_device *adev,
+ void *ras_error_status);
+ int (*ras_late_init)(struct amdgpu_device *adev);
+ void (*ras_fini)(struct amdgpu_device *adev);
+};
+
struct amdgpu_nbio_funcs {
const struct nbio_hdp_flush_reg *hdp_flush_reg;
u32 (*get_hdp_flush_req_offset)(struct amdgpu_device *adev);
@@ -79,13 +90,6 @@ struct amdgpu_nbio_funcs {
void (*ih_control)(struct amdgpu_device *adev);
void (*init_registers)(struct amdgpu_device *adev);
void (*remap_hdp_registers)(struct amdgpu_device *adev);
- void (*handle_ras_controller_intr_no_bifring)(struct amdgpu_device *adev);
- void (*handle_ras_err_event_athub_intr_no_bifring)(struct amdgpu_device *adev);
- int (*init_ras_controller_interrupt)(struct amdgpu_device *adev);
- int (*init_ras_err_event_athub_interrupt)(struct amdgpu_device *adev);
- void (*query_ras_error_count)(struct amdgpu_device *adev,
- void *ras_error_status);
- int (*ras_late_init)(struct amdgpu_device *adev);
void (*enable_aspm)(struct amdgpu_device *adev,
bool enable);
void (*program_aspm)(struct amdgpu_device *adev);
@@ -97,6 +101,7 @@ struct amdgpu_nbio {
struct amdgpu_irq_src ras_err_event_athub_irq;
struct ras_common_if *ras_if;
const struct amdgpu_nbio_funcs *funcs;
+ const struct amdgpu_nbio_ras_funcs *ras_funcs;
};
int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 072050429a2f..1345f7eba011 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -77,6 +77,7 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
+ struct amdgpu_bo_user *ubo;
if (bo->tbo.pin_count > 0)
amdgpu_bo_subtract_pin_size(bo);
@@ -94,7 +95,11 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
}
amdgpu_bo_unref(&bo->parent);
- kfree(bo->metadata);
+ if (bo->tbo.type == ttm_bo_type_device) {
+ ubo = to_amdgpu_bo_user(bo);
+ kfree(ubo->metadata);
+ }
+
kfree(bo);
}
@@ -248,6 +253,7 @@ int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
bp.flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
bp.type = ttm_bo_type_kernel;
bp.resv = NULL;
+ bp.bo_ptr_size = sizeof(struct amdgpu_bo);
if (!*bo_ptr) {
r = amdgpu_bo_create(adev, &bp, bo_ptr);
@@ -523,7 +529,6 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
};
struct amdgpu_bo *bo;
unsigned long page_align, size = bp->size;
- size_t acc_size;
int r;
/* Note that GDS/GWS/OA allocates 1 page per byte/resource. */
@@ -544,12 +549,10 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
if (!amdgpu_bo_validate_size(adev, size, bp->domain))
return -ENOMEM;
- *bo_ptr = NULL;
+ BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo));
- acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
- sizeof(struct amdgpu_bo));
-
- bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
+ *bo_ptr = NULL;
+ bo = kzalloc(bp->bo_ptr_size, GFP_KERNEL);
if (bo == NULL)
return -ENOMEM;
drm_gem_private_object_init(adev_to_drm(adev), &bo->tbo.base, size);
@@ -577,8 +580,8 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
bo->tbo.priority = 1;
r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type,
- &bo->placement, page_align, &ctx, acc_size,
- NULL, bp->resv, &amdgpu_bo_destroy);
+ &bo->placement, page_align, &ctx, NULL,
+ bp->resv, &amdgpu_bo_destroy);
if (unlikely(r != 0))
return r;
@@ -639,6 +642,7 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
AMDGPU_GEM_CREATE_SHADOW;
bp.type = ttm_bo_type_kernel;
bp.resv = bo->tbo.base.resv;
+ bp.bo_ptr_size = sizeof(struct amdgpu_bo);
r = amdgpu_bo_do_create(adev, &bp, &bo->shadow);
if (!r) {
@@ -673,6 +677,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
int r;
bp->flags = bp->flags & ~AMDGPU_GEM_CREATE_SHADOW;
+
r = amdgpu_bo_do_create(adev, bp, bo_ptr);
if (r)
return r;
@@ -695,6 +700,34 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
}
/**
+ * amdgpu_bo_create_user - create an &amdgpu_bo_user buffer object
+ * @adev: amdgpu device object
+ * @bp: parameters to be used for the buffer object
+ * @ubo_ptr: pointer to the buffer object pointer
+ *
+ * Create a BO to be used by user application;
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
+
+int amdgpu_bo_create_user(struct amdgpu_device *adev,
+ struct amdgpu_bo_param *bp,
+ struct amdgpu_bo_user **ubo_ptr)
+{
+ struct amdgpu_bo *bo_ptr;
+ int r;
+
+ bp->flags = bp->flags & ~AMDGPU_GEM_CREATE_SHADOW;
+ bp->bo_ptr_size = sizeof(struct amdgpu_bo_user);
+ r = amdgpu_bo_do_create(adev, bp, &bo_ptr);
+ if (r)
+ return r;
+
+ *ubo_ptr = to_amdgpu_bo_user(bo_ptr);
+ return r;
+}
+/**
* amdgpu_bo_validate - validate an &amdgpu_bo buffer object
* @bo: pointer to the buffer object
*
@@ -1062,13 +1095,17 @@ static const char *amdgpu_vram_names[] = {
*/
int amdgpu_bo_init(struct amdgpu_device *adev)
{
- /* reserve PAT memory space to WC for VRAM */
- arch_io_reserve_memtype_wc(adev->gmc.aper_base,
- adev->gmc.aper_size);
+ /* On A+A platform, VRAM can be mapped as WB */
+ if (!adev->gmc.xgmi.connected_to_cpu) {
+ /* reserve PAT memory space to WC for VRAM */
+ arch_io_reserve_memtype_wc(adev->gmc.aper_base,
+ adev->gmc.aper_size);
+
+ /* Add an MTRR for the VRAM */
+ adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base,
+ adev->gmc.aper_size);
+ }
- /* Add an MTRR for the VRAM */
- adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base,
- adev->gmc.aper_size);
DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
adev->gmc.mc_vram_size >> 20,
(unsigned long long)adev->gmc.aper_size >> 20);
@@ -1086,27 +1123,10 @@ int amdgpu_bo_init(struct amdgpu_device *adev)
void amdgpu_bo_fini(struct amdgpu_device *adev)
{
amdgpu_ttm_fini(adev);
- arch_phys_wc_del(adev->gmc.vram_mtrr);
- arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
-}
-
-/**
- * amdgpu_bo_fbdev_mmap - mmap fbdev memory
- * @bo: &amdgpu_bo buffer object
- * @vma: vma as input from the fbdev mmap method
- *
- * Calls ttm_fbdev_mmap() to mmap fbdev memory if it is backed by a bo.
- *
- * Returns:
- * 0 for success or a negative error code on failure.
- */
-int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
- struct vm_area_struct *vma)
-{
- if (vma->vm_pgoff != 0)
- return -EACCES;
-
- return ttm_bo_mmap_obj(vma, &bo->tbo);
+ if (!adev->gmc.xgmi.connected_to_cpu) {
+ arch_phys_wc_del(adev->gmc.vram_mtrr);
+ arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
+ }
}
/**
@@ -1123,12 +1143,15 @@ int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct amdgpu_bo_user *ubo;
+ BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
if (adev->family <= AMDGPU_FAMILY_CZ &&
AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
return -EINVAL;
- bo->tiling_flags = tiling_flags;
+ ubo = to_amdgpu_bo_user(bo);
+ ubo->tiling_flags = tiling_flags;
return 0;
}
@@ -1142,10 +1165,14 @@ int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
*/
void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
{
+ struct amdgpu_bo_user *ubo;
+
+ BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
dma_resv_assert_held(bo->tbo.base.resv);
+ ubo = to_amdgpu_bo_user(bo);
if (tiling_flags)
- *tiling_flags = bo->tiling_flags;
+ *tiling_flags = ubo->tiling_flags;
}
/**
@@ -1164,13 +1191,16 @@ void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
uint32_t metadata_size, uint64_t flags)
{
+ struct amdgpu_bo_user *ubo;
void *buffer;
+ BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
+ ubo = to_amdgpu_bo_user(bo);
if (!metadata_size) {
- if (bo->metadata_size) {
- kfree(bo->metadata);
- bo->metadata = NULL;
- bo->metadata_size = 0;
+ if (ubo->metadata_size) {
+ kfree(ubo->metadata);
+ ubo->metadata = NULL;
+ ubo->metadata_size = 0;
}
return 0;
}
@@ -1182,10 +1212,10 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
if (buffer == NULL)
return -ENOMEM;
- kfree(bo->metadata);
- bo->metadata_flags = flags;
- bo->metadata = buffer;
- bo->metadata_size = metadata_size;
+ kfree(ubo->metadata);
+ ubo->metadata_flags = flags;
+ ubo->metadata = buffer;
+ ubo->metadata_size = metadata_size;
return 0;
}
@@ -1209,21 +1239,25 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
size_t buffer_size, uint32_t *metadata_size,
uint64_t *flags)
{
+ struct amdgpu_bo_user *ubo;
+
if (!buffer && !metadata_size)
return -EINVAL;
+ BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
+ ubo = to_amdgpu_bo_user(bo);
if (buffer) {
- if (buffer_size < bo->metadata_size)
+ if (buffer_size < ubo->metadata_size)
return -EINVAL;
- if (bo->metadata_size)
- memcpy(buffer, bo->metadata, bo->metadata_size);
+ if (ubo->metadata_size)
+ memcpy(buffer, ubo->metadata, ubo->metadata_size);
}
if (metadata_size)
- *metadata_size = bo->metadata_size;
+ *metadata_size = ubo->metadata_size;
if (flags)
- *flags = bo->metadata_flags;
+ *flags = ubo->metadata_flags;
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 9ac37569823f..2d1fefbe1e99 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -37,9 +37,12 @@
#define AMDGPU_BO_INVALID_OFFSET LONG_MAX
#define AMDGPU_BO_MAX_PLACEMENTS 3
+#define to_amdgpu_bo_user(abo) container_of((abo), struct amdgpu_bo_user, bo)
+
struct amdgpu_bo_param {
unsigned long size;
int byte_align;
+ u32 bo_ptr_size;
u32 domain;
u32 preferred_domain;
u64 flags;
@@ -89,10 +92,6 @@ struct amdgpu_bo {
struct ttm_buffer_object tbo;
struct ttm_bo_kmap_obj kmap;
u64 flags;
- u64 tiling_flags;
- u64 metadata_flags;
- void *metadata;
- u32 metadata_size;
unsigned prime_shared_count;
/* per VM structure for page tables and with virtual addresses */
struct amdgpu_vm_bo_base *vm_bo;
@@ -100,7 +99,6 @@ struct amdgpu_bo {
struct amdgpu_bo *parent;
struct amdgpu_bo *shadow;
- struct amdgpu_mn *mn;
#ifdef CONFIG_MMU_NOTIFIER
@@ -112,6 +110,15 @@ struct amdgpu_bo {
struct kgd_mem *kfd_bo;
};
+struct amdgpu_bo_user {
+ struct amdgpu_bo bo;
+ u64 tiling_flags;
+ u64 metadata_flags;
+ void *metadata;
+ u32 metadata_size;
+
+};
+
static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo)
{
return container_of(tbo, struct amdgpu_bo, tbo);
@@ -255,6 +262,9 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
uint64_t offset, uint64_t size, uint32_t domain,
struct amdgpu_bo **bo_ptr, void **cpu_addr);
+int amdgpu_bo_create_user(struct amdgpu_device *adev,
+ struct amdgpu_bo_param *bp,
+ struct amdgpu_bo_user **ubo_ptr);
void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
void **cpu_addr);
int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
@@ -269,8 +279,6 @@ void amdgpu_bo_unpin(struct amdgpu_bo *bo);
int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
int amdgpu_bo_init(struct amdgpu_device *adev);
void amdgpu_bo_fini(struct amdgpu_device *adev);
-int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
- struct vm_area_struct *vma);
int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags);
void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags);
int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
@@ -329,7 +337,7 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
struct seq_file *m);
u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m);
#endif
-int amdgpu_debugfs_sa_init(struct amdgpu_device *adev);
+void amdgpu_debugfs_sa_init(struct amdgpu_device *adev);
bool amdgpu_bo_support_uswc(u64 bo_flags);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 839917eb7bc3..9e769cf6095b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -34,6 +34,7 @@
#include "psp_v10_0.h"
#include "psp_v11_0.h"
#include "psp_v12_0.h"
+#include "psp_v13_0.h"
#include "amdgpu_ras.h"
#include "amdgpu_securedisplay.h"
@@ -56,7 +57,7 @@ static int psp_load_smu_fw(struct psp_context *psp);
* - Load XGMI/RAS/HDCP/DTM TA if any
*
* This new sequence is required for
- * - Arcturus
+ * - Arcturus and onwards
* - Navi12 and onwards
*/
static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp)
@@ -71,7 +72,7 @@ static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp
if (adev->flags & AMD_IS_APU)
return;
- if ((adev->asic_type == CHIP_ARCTURUS) ||
+ if ((adev->asic_type >= CHIP_ARCTURUS) ||
(adev->asic_type >= CHIP_NAVI12))
psp->pmfw_centralized_cstate_management = true;
}
@@ -109,6 +110,9 @@ static int psp_early_init(void *handle)
case CHIP_RENOIR:
psp_v12_0_set_psp_funcs(psp);
break;
+ case CHIP_ALDEBARAN:
+ psp_v13_0_set_psp_funcs(psp);
+ break;
default:
return -EINVAL;
}
@@ -383,7 +387,7 @@ static int psp_tmr_init(struct psp_context *psp)
* Note: this memory need be reserved till the driver
* uninitializes.
*/
- tmr_size = PSP_TMR_SIZE;
+ tmr_size = PSP_TMR_SIZE(psp->adev);
/* For ASICs support RLC autoload, psp will parse the toc
* and calculate the total size of TMR needed */
@@ -399,10 +403,20 @@ static int psp_tmr_init(struct psp_context *psp)
}
pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
- ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_SIZE,
+ ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_SIZE(psp->adev),
AMDGPU_GEM_DOMAIN_VRAM,
&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
+ /* workaround the tmr_mc_addr:
+ * PSP requires an address in FB aperture. Right now driver produce
+ * tmr_mc_addr in the GART aperture. Convert it back to FB aperture
+ * for PSP. Will revert it after we get a fix from PSP FW.
+ */
+ if (psp->adev->asic_type == CHIP_ALDEBARAN) {
+ psp->tmr_mc_addr -= psp->adev->gmc.fb_start;
+ psp->tmr_mc_addr += psp->adev->gmc.fb_start_original;
+ }
+
return ret;
}
@@ -542,6 +556,46 @@ int psp_get_fw_attestation_records_addr(struct psp_context *psp,
return ret;
}
+static int psp_boot_config_set(struct amdgpu_device *adev)
+{
+ struct psp_context *psp = &adev->psp;
+ struct psp_gfx_cmd_resp *cmd = psp->cmd;
+
+ if (adev->asic_type != CHIP_SIENNA_CICHLID)
+ return 0;
+
+ memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
+
+ cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
+ cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_SET;
+ cmd->cmd.boot_cfg.boot_config = BOOT_CONFIG_GECC;
+ cmd->cmd.boot_cfg.boot_config_valid = BOOT_CONFIG_GECC;
+
+ return psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+}
+
+static int psp_rl_load(struct amdgpu_device *adev)
+{
+ struct psp_context *psp = &adev->psp;
+ struct psp_gfx_cmd_resp *cmd = psp->cmd;
+
+ if (psp->rl_bin_size == 0)
+ return 0;
+
+ memset(psp->fw_pri_buf, 0, PSP_1_MEG);
+ memcpy(psp->fw_pri_buf, psp->rl_start_addr, psp->rl_bin_size);
+
+ memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
+
+ cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
+ cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(psp->fw_pri_mc_addr);
+ cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(psp->fw_pri_mc_addr);
+ cmd->cmd.cmd_load_ip_fw.fw_size = psp->rl_bin_size;
+ cmd->cmd.cmd_load_ip_fw.fw_type = GFX_FW_TYPE_REG_LIST;
+
+ return psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+}
+
static void psp_prep_asd_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
uint64_t asd_mc, uint32_t size)
{
@@ -755,8 +809,9 @@ static int psp_xgmi_unload(struct psp_context *psp)
struct psp_gfx_cmd_resp *cmd;
struct amdgpu_device *adev = psp->adev;
- /* XGMI TA unload currently is not supported on Arcturus */
- if (adev->asic_type == CHIP_ARCTURUS)
+ /* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */
+ if (adev->asic_type == CHIP_ARCTURUS ||
+ (adev->asic_type == CHIP_ALDEBARAN && adev->gmc.xgmi.connected_to_cpu))
return 0;
/*
@@ -1561,6 +1616,7 @@ static int psp_rap_unload(struct psp_context *psp)
static int psp_rap_initialize(struct psp_context *psp)
{
int ret;
+ enum ta_rap_status status = TA_RAP_STATUS__SUCCESS;
/*
* TODO: bypass the initialize in sriov for now
@@ -1584,8 +1640,8 @@ static int psp_rap_initialize(struct psp_context *psp)
if (ret)
return ret;
- ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE);
- if (ret != TA_RAP_STATUS__SUCCESS) {
+ ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE, &status);
+ if (ret || status != TA_RAP_STATUS__SUCCESS) {
psp_rap_unload(psp);
amdgpu_bo_free_kernel(&psp->rap_context.rap_shared_bo,
@@ -1594,8 +1650,10 @@ static int psp_rap_initialize(struct psp_context *psp)
psp->rap_context.rap_initialized = false;
- dev_warn(psp->adev->dev, "RAP TA initialize fail.\n");
- return -EINVAL;
+ dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n",
+ ret, status);
+
+ return ret;
}
return 0;
@@ -1620,13 +1678,13 @@ static int psp_rap_terminate(struct psp_context *psp)
return ret;
}
-int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
+int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_status *status)
{
struct ta_rap_shared_memory *rap_cmd;
- int ret;
+ int ret = 0;
if (!psp->rap_context.rap_initialized)
- return -EINVAL;
+ return 0;
if (ta_cmd_id != TA_CMD_RAP__INITIALIZE &&
ta_cmd_id != TA_CMD_RAP__VALIDATE_L0)
@@ -1642,14 +1700,16 @@ int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
rap_cmd->validation_method_id = METHOD_A;
ret = psp_ta_invoke(psp, rap_cmd->cmd_id, psp->rap_context.session_id);
- if (ret) {
- mutex_unlock(&psp->rap_context.mutex);
- return ret;
- }
+ if (ret)
+ goto out_unlock;
+
+ if (status)
+ *status = rap_cmd->rap_status;
+out_unlock:
mutex_unlock(&psp->rap_context.mutex);
- return rap_cmd->rap_status;
+ return ret;
}
// RAP end
@@ -1870,6 +1930,11 @@ static int psp_hw_start(struct psp_context *psp)
return ret;
}
+ ret = psp_boot_config_set(adev);
+ if (ret) {
+ DRM_WARN("PSP set boot config@\n");
+ }
+
ret = psp_tmr_init(psp);
if (ret) {
DRM_ERROR("PSP tmr init failed!\n");
@@ -2104,9 +2169,13 @@ static int psp_load_smu_fw(struct psp_context *psp)
if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
return 0;
-
- if (amdgpu_in_reset(adev) && ras && ras->supported &&
- adev->asic_type == CHIP_ARCTURUS) {
+ if ((amdgpu_in_reset(adev) &&
+ ras && ras->supported &&
+ (adev->asic_type == CHIP_ARCTURUS ||
+ adev->asic_type == CHIP_VEGA20)) ||
+ (adev->in_runpm &&
+ adev->asic_type >= CHIP_NAVI10 &&
+ adev->asic_type <= CHIP_NAVI12)) {
ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD);
if (ret) {
DRM_WARN("Failed to set MP1 state prepare for reload\n");
@@ -2159,6 +2228,22 @@ static bool fw_load_skip_check(struct psp_context *psp,
return false;
}
+int psp_load_fw_list(struct psp_context *psp,
+ struct amdgpu_firmware_info **ucode_list, int ucode_count)
+{
+ int ret = 0, i;
+ struct amdgpu_firmware_info *ucode;
+
+ for (i = 0; i < ucode_count; ++i) {
+ ucode = ucode_list[i];
+ psp_print_fw_hdr(psp, ucode);
+ ret = psp_execute_np_fw_load(psp, ucode);
+ if (ret)
+ return ret;
+ }
+ return ret;
+}
+
static int psp_np_fw_load(struct psp_context *psp)
{
int i, ret;
@@ -2276,6 +2361,12 @@ skip_memalloc:
return ret;
}
+ ret = psp_rl_load(adev);
+ if (ret) {
+ DRM_ERROR("PSP load RL failed!\n");
+ return ret;
+ }
+
if (psp->adev->psp.ta_fw) {
ret = psp_ras_initialize(psp);
if (ret)
@@ -2751,6 +2842,9 @@ int psp_init_sos_microcode(struct psp_context *psp,
adev->psp.spl_bin_size = le32_to_cpu(sos_hdr_v1_3->spl_size_bytes);
adev->psp.spl_start_addr = (uint8_t *)adev->psp.sys_start_addr +
le32_to_cpu(sos_hdr_v1_3->spl_offset_bytes);
+ adev->psp.rl_bin_size = le32_to_cpu(sos_hdr_v1_3->rl_size_bytes);
+ adev->psp.rl_start_addr = (uint8_t *)adev->psp.sys_start_addr +
+ le32_to_cpu(sos_hdr_v1_3->rl_offset_bytes);
}
break;
default:
@@ -2916,7 +3010,7 @@ static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
return ret;
}
- return snprintf(buf, PAGE_SIZE, "%x\n", fw_ver);
+ return sysfs_emit(buf, "%x\n", fw_ver);
}
static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
@@ -3052,3 +3146,11 @@ const struct amdgpu_ip_block_version psp_v12_0_ip_block =
.rev = 0,
.funcs = &psp_ip_funcs,
};
+
+const struct amdgpu_ip_block_version psp_v13_0_ip_block = {
+ .type = AMD_IP_BLOCK_TYPE_PSP,
+ .major = 13,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &psp_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index cb50ba445f8c..46a5328e00e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -37,7 +37,7 @@
#define PSP_XGMI_SHARED_MEM_SIZE 0x4000
#define PSP_RAS_SHARED_MEM_SIZE 0x4000
#define PSP_1_MEG 0x100000
-#define PSP_TMR_SIZE 0x400000
+#define PSP_TMR_SIZE(adev) ((adev)->asic_type == CHIP_ALDEBARAN ? 0x800000 : 0x400000)
#define PSP_HDCP_SHARED_MEM_SIZE 0x4000
#define PSP_DTM_SHARED_MEM_SIZE 0x4000
#define PSP_RAP_SHARED_MEM_SIZE 0x4000
@@ -248,11 +248,13 @@ struct psp_context
uint32_t toc_bin_size;
uint32_t kdb_bin_size;
uint32_t spl_bin_size;
+ uint32_t rl_bin_size;
uint8_t *sys_start_addr;
uint8_t *sos_start_addr;
uint8_t *toc_start_addr;
uint8_t *kdb_start_addr;
uint8_t *spl_start_addr;
+ uint8_t *rl_start_addr;
/* tmr buffer */
struct amdgpu_bo *tmr_bo;
@@ -365,11 +367,13 @@ struct amdgpu_psp_funcs {
extern const struct amd_ip_funcs psp_ip_funcs;
extern const struct amdgpu_ip_block_version psp_v3_1_ip_block;
-extern int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
- uint32_t field_val, uint32_t mask, bool check_changed);
-
extern const struct amdgpu_ip_block_version psp_v10_0_ip_block;
+extern const struct amdgpu_ip_block_version psp_v11_0_ip_block;
extern const struct amdgpu_ip_block_version psp_v12_0_ip_block;
+extern const struct amdgpu_ip_block_version psp_v13_0_ip_block;
+
+extern int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
+ uint32_t field_val, uint32_t mask, bool check_changed);
int psp_gpu_reset(struct amdgpu_device *adev);
int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx,
@@ -395,12 +399,11 @@ int psp_ras_trigger_error(struct psp_context *psp,
int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
-int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
+int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_status *status);
int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_rlc_autoload_start(struct psp_context *psp);
-extern const struct amdgpu_ip_block_version psp_v11_0_ip_block;
int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
uint32_t value);
int psp_ring_cmd_submit(struct psp_context *psp,
@@ -417,4 +420,7 @@ int psp_init_ta_microcode(struct psp_context *psp,
const char *chip_name);
int psp_get_fw_attestation_records_addr(struct psp_context *psp,
uint64_t *output_ptr);
+
+int psp_load_fw_list(struct psp_context *psp,
+ struct amdgpu_firmware_info **ucode_list, int ucode_count);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c
index 8da5356c36f1..51909bf8798c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c
@@ -48,6 +48,7 @@ static ssize_t amdgpu_rap_debugfs_write(struct file *f, const char __user *buf,
struct ta_rap_cmd_output_data *rap_cmd_output;
struct drm_device *dev = adev_to_drm(adev);
uint32_t op;
+ enum ta_rap_status status;
int ret;
if (*pos || size != 2)
@@ -70,9 +71,8 @@ static ssize_t amdgpu_rap_debugfs_write(struct file *f, const char __user *buf,
switch (op) {
case 2:
- ret = psp_rap_invoke(&adev->psp, op);
-
- if (ret == TA_RAP_STATUS__SUCCESS) {
+ ret = psp_rap_invoke(&adev->psp, op, &status);
+ if (!ret && status == TA_RAP_STATUS__SUCCESS) {
dev_info(adev->dev, "RAP L0 validate test success.\n");
} else {
rap_shared_mem = (struct ta_rap_shared_memory *)
@@ -97,6 +97,7 @@ static ssize_t amdgpu_rap_debugfs_write(struct file *f, const char __user *buf,
default:
dev_info(adev->dev, "Unsupported op id: %d, ", op);
dev_info(adev->dev, "Only support op 2(L0 validate test).\n");
+ break;
}
amdgpu_gfx_off_ctrl(adev, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 1fb2a91ad30a..0541196ae1ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -99,6 +99,49 @@ static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev)
return false;
}
+static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t address)
+{
+ struct ras_err_data err_data = {0, 0, 0, NULL};
+ struct eeprom_table_record err_rec;
+
+ if ((address >= adev->gmc.mc_vram_size) ||
+ (address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
+ dev_warn(adev->dev,
+ "RAS WARN: input address 0x%llx is invalid.\n",
+ address);
+ return -EINVAL;
+ }
+
+ if (amdgpu_ras_check_bad_page(adev, address)) {
+ dev_warn(adev->dev,
+ "RAS WARN: 0x%llx has been marked as bad page!\n",
+ address);
+ return 0;
+ }
+
+ memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
+
+ err_rec.address = address;
+ err_rec.retired_page = address >> AMDGPU_GPU_PAGE_SHIFT;
+ err_rec.ts = (uint64_t)ktime_get_real_seconds();
+ err_rec.err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
+
+ err_data.err_addr = &err_rec;
+ err_data.err_addr_cnt = 1;
+
+ if (amdgpu_bad_page_threshold != 0) {
+ amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
+ err_data.err_addr_cnt);
+ amdgpu_ras_save_bad_pages(adev);
+ }
+
+ dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n");
+ dev_warn(adev->dev, "Clear EEPROM:\n");
+ dev_warn(adev->dev, " echo 1 > /sys/kernel/debug/dri/0/ras/ras_eeprom_reset\n");
+
+ return 0;
+}
+
static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
@@ -109,7 +152,7 @@ static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
ssize_t s;
char val[128];
- if (amdgpu_ras_error_query(obj->adev, &info))
+ if (amdgpu_ras_query_error_status(obj->adev, &info))
return -EINVAL;
s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n",
@@ -178,11 +221,25 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
op = 1;
else if (sscanf(str, "inject %32s %8s", block_name, err) == 2)
op = 2;
+ else if (sscanf(str, "retire_page") == 0)
+ op = 3;
else if (str[0] && str[1] && str[2] && str[3])
/* ascii string, but commands are not matched. */
return -EINVAL;
if (op != -1) {
+
+ if (op == 3) {
+ if (sscanf(str, "%*s %llu", &address) != 1)
+ if (sscanf(str, "%*s 0x%llx", &address) != 1)
+ return -EINVAL;
+
+ data->op = op;
+ data->inject.address = address;
+
+ return 0;
+ }
+
if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
return -EINVAL;
@@ -310,6 +367,16 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
if (ret)
return -EINVAL;
+ if (data.op == 3)
+ {
+ ret = amdgpu_reserve_page_direct(adev, data.inject.address);
+
+ if (ret)
+ return size;
+ else
+ return ret;
+ }
+
if (!amdgpu_ras_is_supported(adev, data.head.block))
return -EINVAL;
@@ -431,15 +498,13 @@ static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
};
if (!amdgpu_ras_get_error_query_ready(obj->adev))
- return snprintf(buf, PAGE_SIZE,
- "Query currently inaccessible\n");
+ return sysfs_emit(buf, "Query currently inaccessible\n");
- if (amdgpu_ras_error_query(obj->adev, &info))
+ if (amdgpu_ras_query_error_status(obj->adev, &info))
return -EINVAL;
- return snprintf(buf, PAGE_SIZE, "%s: %lu\n%s: %lu\n",
- "ue", info.ue_count,
- "ce", info.ce_count);
+ return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
+ "ce", info.ce_count);
}
/* obj begin */
@@ -449,11 +514,10 @@ static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
static inline void put_obj(struct ras_manager *obj)
{
- if (obj && --obj->use == 0)
+ if (obj && (--obj->use == 0))
list_del(&obj->node);
- if (obj && obj->use < 0) {
- DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", obj->head.name);
- }
+ if (obj && (obj->use < 0))
+ DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", obj->head.name);
}
/* make one obj and return it. */
@@ -463,7 +527,7 @@ static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_manager *obj;
- if (!con)
+ if (!adev->ras_features || !con)
return NULL;
if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
@@ -490,7 +554,7 @@ struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
struct ras_manager *obj;
int i;
- if (!con)
+ if (!adev->ras_features || !con)
return NULL;
if (head) {
@@ -590,7 +654,11 @@ static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
con->features |= BIT(head->block);
} else {
if (obj && amdgpu_ras_is_feature_enabled(adev, head)) {
- con->features &= ~BIT(head->block);
+ /* skip clean gfx ras context feature for VEGA20 Gaming.
+ * will clean later
+ */
+ if (!(!adev->ras_features && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)))
+ con->features &= ~BIT(head->block);
put_obj(obj);
}
}
@@ -693,6 +761,10 @@ int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
if (ret)
return ret;
+ /* gfx block ras dsiable cmd must send to ras-ta */
+ if (head->block == AMDGPU_RAS_BLOCK__GFX)
+ con->features |= BIT(head->block);
+
ret = amdgpu_ras_feature_enable(adev, head, 0);
}
} else
@@ -757,8 +829,8 @@ static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
/* feature ctl end */
/* query/inject/cure begin */
-int amdgpu_ras_error_query(struct amdgpu_device *adev,
- struct ras_query_if *info)
+int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
+ struct ras_query_if *info)
{
struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
struct ras_err_data err_data = {0, 0, 0, NULL};
@@ -769,13 +841,15 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev,
switch (info->head.block) {
case AMDGPU_RAS_BLOCK__UMC:
- if (adev->umc.funcs->query_ras_error_count)
- adev->umc.funcs->query_ras_error_count(adev, &err_data);
+ if (adev->umc.ras_funcs &&
+ adev->umc.ras_funcs->query_ras_error_count)
+ adev->umc.ras_funcs->query_ras_error_count(adev, &err_data);
/* umc query_ras_error_address is also responsible for clearing
* error status
*/
- if (adev->umc.funcs->query_ras_error_address)
- adev->umc.funcs->query_ras_error_address(adev, &err_data);
+ if (adev->umc.ras_funcs &&
+ adev->umc.ras_funcs->query_ras_error_address)
+ adev->umc.ras_funcs->query_ras_error_address(adev, &err_data);
break;
case AMDGPU_RAS_BLOCK__SDMA:
if (adev->sdma.funcs->query_ras_error_count) {
@@ -785,19 +859,32 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev,
}
break;
case AMDGPU_RAS_BLOCK__GFX:
- if (adev->gfx.funcs->query_ras_error_count)
- adev->gfx.funcs->query_ras_error_count(adev, &err_data);
+ if (adev->gfx.ras_funcs &&
+ adev->gfx.ras_funcs->query_ras_error_count)
+ adev->gfx.ras_funcs->query_ras_error_count(adev, &err_data);
+
+ if (adev->gfx.ras_funcs &&
+ adev->gfx.ras_funcs->query_ras_error_status)
+ adev->gfx.ras_funcs->query_ras_error_status(adev);
break;
case AMDGPU_RAS_BLOCK__MMHUB:
- if (adev->mmhub.funcs->query_ras_error_count)
- adev->mmhub.funcs->query_ras_error_count(adev, &err_data);
+ if (adev->mmhub.ras_funcs &&
+ adev->mmhub.ras_funcs->query_ras_error_count)
+ adev->mmhub.ras_funcs->query_ras_error_count(adev, &err_data);
+
+ if (adev->mmhub.ras_funcs &&
+ adev->mmhub.ras_funcs->query_ras_error_status)
+ adev->mmhub.ras_funcs->query_ras_error_status(adev);
break;
case AMDGPU_RAS_BLOCK__PCIE_BIF:
- if (adev->nbio.funcs->query_ras_error_count)
- adev->nbio.funcs->query_ras_error_count(adev, &err_data);
+ if (adev->nbio.ras_funcs &&
+ adev->nbio.ras_funcs->query_ras_error_count)
+ adev->nbio.ras_funcs->query_ras_error_count(adev, &err_data);
break;
case AMDGPU_RAS_BLOCK__XGMI_WAFL:
- amdgpu_xgmi_query_ras_error_count(adev, &err_data);
+ if (adev->gmc.xgmi.ras_funcs &&
+ adev->gmc.xgmi.ras_funcs->query_ras_error_count)
+ adev->gmc.xgmi.ras_funcs->query_ras_error_count(adev, &err_data);
break;
default:
break;
@@ -826,6 +913,38 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev,
return 0;
}
+int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block)
+{
+ if (!amdgpu_ras_is_supported(adev, block))
+ return -EINVAL;
+
+ switch (block) {
+ case AMDGPU_RAS_BLOCK__GFX:
+ if (adev->gfx.ras_funcs &&
+ adev->gfx.ras_funcs->reset_ras_error_count)
+ adev->gfx.ras_funcs->reset_ras_error_count(adev);
+
+ if (adev->gfx.ras_funcs &&
+ adev->gfx.ras_funcs->reset_ras_error_status)
+ adev->gfx.ras_funcs->reset_ras_error_status(adev);
+ break;
+ case AMDGPU_RAS_BLOCK__MMHUB:
+ if (adev->mmhub.ras_funcs &&
+ adev->mmhub.ras_funcs->reset_ras_error_count)
+ adev->mmhub.ras_funcs->reset_ras_error_count(adev);
+ break;
+ case AMDGPU_RAS_BLOCK__SDMA:
+ if (adev->sdma.funcs->reset_ras_error_count)
+ adev->sdma.funcs->reset_ras_error_count(adev);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
/* Trigger XGMI/WAFL error */
static int amdgpu_ras_error_inject_xgmi(struct amdgpu_device *adev,
struct ta_ras_trigger_error_input *block_info)
@@ -878,12 +997,14 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
switch (info->head.block) {
case AMDGPU_RAS_BLOCK__GFX:
- if (adev->gfx.funcs->ras_error_inject)
- ret = adev->gfx.funcs->ras_error_inject(adev, info);
+ if (adev->gfx.ras_funcs &&
+ adev->gfx.ras_funcs->ras_error_inject)
+ ret = adev->gfx.ras_funcs->ras_error_inject(adev, info);
else
ret = -EINVAL;
break;
case AMDGPU_RAS_BLOCK__UMC:
+ case AMDGPU_RAS_BLOCK__SDMA:
case AMDGPU_RAS_BLOCK__MMHUB:
case AMDGPU_RAS_BLOCK__PCIE_BIF:
ret = psp_ras_trigger_error(&adev->psp, &block_info);
@@ -913,7 +1034,7 @@ unsigned long amdgpu_ras_query_error_count(struct amdgpu_device *adev,
struct ras_manager *obj;
struct ras_err_data data = {0, 0};
- if (!con)
+ if (!adev->ras_features || !con)
return 0;
list_for_each_entry(obj, &con->head, node) {
@@ -921,7 +1042,7 @@ unsigned long amdgpu_ras_query_error_count(struct amdgpu_device *adev,
.head = obj->head,
};
- if (amdgpu_ras_error_query(adev, &info))
+ if (amdgpu_ras_query_error_status(adev, &info))
return 0;
data.ce_count += info.ce_count;
@@ -1137,16 +1258,17 @@ static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
*
*/
/* debugfs begin */
-static void amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
+static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct dentry *dir;
struct drm_minor *minor = adev_to_drm(adev)->primary;
- con->dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root);
- debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, con->dir,
- adev, &amdgpu_ras_debugfs_ctrl_ops);
- debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, con->dir,
- adev, &amdgpu_ras_debugfs_eeprom_ops);
+ dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root);
+ debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, dir, adev,
+ &amdgpu_ras_debugfs_ctrl_ops);
+ debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, dir, adev,
+ &amdgpu_ras_debugfs_eeprom_ops);
/*
* After one uncorrectable error happens, usually GPU recovery will
@@ -1156,24 +1278,24 @@ static void amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
* ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine
* will never be called.
*/
- debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, con->dir,
- &con->reboot);
+ debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot);
/*
* User could set this not to clean up hardware's error count register
* of RAS IPs during ras recovery.
*/
- debugfs_create_bool("disable_ras_err_cnt_harvest", 0644,
- con->dir, &con->disable_ras_err_cnt_harvest);
+ debugfs_create_bool("disable_ras_err_cnt_harvest", 0644, dir,
+ &con->disable_ras_err_cnt_harvest);
+ return dir;
}
static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
- struct ras_fs_if *head)
+ struct ras_fs_if *head,
+ struct dentry *dir)
{
- struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
- if (!obj || obj->ent)
+ if (!obj || !dir)
return;
get_obj(obj);
@@ -1182,14 +1304,14 @@ static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
head->debugfs_name,
sizeof(obj->fs_data.debugfs_name));
- obj->ent = debugfs_create_file(obj->fs_data.debugfs_name,
- S_IWUGO | S_IRUGO, con->dir, obj,
- &amdgpu_ras_debugfs_ops);
+ debugfs_create_file(obj->fs_data.debugfs_name, S_IWUGO | S_IRUGO, dir,
+ obj, &amdgpu_ras_debugfs_ops);
}
void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct dentry *dir;
struct ras_manager *obj;
struct ras_fs_if fs_info;
@@ -1200,7 +1322,7 @@ void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con)
return;
- amdgpu_ras_debugfs_create_ctrl_node(adev);
+ dir = amdgpu_ras_debugfs_create_ctrl_node(adev);
list_for_each_entry(obj, &con->head, node) {
if (amdgpu_ras_is_supported(adev, obj->head.block) &&
@@ -1208,34 +1330,11 @@ void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
sprintf(fs_info.debugfs_name, "%s_err_inject",
ras_block_str(obj->head.block));
fs_info.head = obj->head;
- amdgpu_ras_debugfs_create(adev, &fs_info);
+ amdgpu_ras_debugfs_create(adev, &fs_info, dir);
}
}
}
-static void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
- struct ras_common_if *head)
-{
- struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
-
- if (!obj || !obj->ent)
- return;
-
- obj->ent = NULL;
- put_obj(obj);
-}
-
-static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev)
-{
- struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- struct ras_manager *obj, *tmp;
-
- list_for_each_entry_safe(obj, tmp, &con->head, node) {
- amdgpu_ras_debugfs_remove(adev, &obj->head);
- }
-
- con->dir = NULL;
-}
/* debugfs end */
/* ras fs */
@@ -1282,8 +1381,17 @@ static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
{
- if (IS_ENABLED(CONFIG_DEBUG_FS))
- amdgpu_ras_debugfs_remove_all(adev);
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *con_obj, *ip_obj, *tmp;
+
+ if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+ list_for_each_entry_safe(con_obj, tmp, &con->head, node) {
+ ip_obj = amdgpu_ras_find_obj(adev, &con_obj->head);
+ if (ip_obj)
+ put_obj(ip_obj);
+ }
+ }
+
amdgpu_ras_sysfs_remove_all(adev);
return 0;
}
@@ -1447,7 +1555,7 @@ static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_manager *obj;
- if (!con)
+ if (!adev->ras_features || !con)
return;
list_for_each_entry(obj, &con->head, node) {
@@ -1464,7 +1572,7 @@ static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF)
continue;
- amdgpu_ras_error_query(adev, &info);
+ amdgpu_ras_query_error_status(adev, &info);
}
}
@@ -1478,12 +1586,14 @@ static void amdgpu_ras_error_status_query(struct amdgpu_device *adev,
*/
switch (info->head.block) {
case AMDGPU_RAS_BLOCK__GFX:
- if (adev->gfx.funcs->query_ras_error_status)
- adev->gfx.funcs->query_ras_error_status(adev);
+ if (adev->gfx.ras_funcs &&
+ adev->gfx.ras_funcs->query_ras_error_status)
+ adev->gfx.ras_funcs->query_ras_error_status(adev);
break;
case AMDGPU_RAS_BLOCK__MMHUB:
- if (adev->mmhub.funcs->query_ras_error_status)
- adev->mmhub.funcs->query_ras_error_status(adev);
+ if (adev->mmhub.ras_funcs &&
+ adev->mmhub.ras_funcs->query_ras_error_status)
+ adev->mmhub.ras_funcs->query_ras_error_status(adev);
break;
default:
break;
@@ -1495,7 +1605,7 @@ static void amdgpu_ras_query_err_status(struct amdgpu_device *adev)
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_manager *obj;
- if (!con)
+ if (!adev->ras_features || !con)
return;
list_for_each_entry(obj, &con->head, node) {
@@ -1809,7 +1919,7 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
bool exc_err_limit = false;
int ret;
- if (con)
+ if (adev->ras_features && con)
data = &con->eh_data;
else
return 0;
@@ -1828,6 +1938,12 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
max_eeprom_records_len = amdgpu_ras_eeprom_get_record_max_length();
amdgpu_ras_validate_threshold(adev, max_eeprom_records_len);
+ /* Todo: During test the SMU might fail to read the eeprom through I2C
+ * when the GPU is pending on XGMI reset during probe time
+ * (Mostly after second bus reset), skip it now
+ */
+ if (adev->gmc.xgmi.pending_reset)
+ return 0;
ret = amdgpu_ras_eeprom_init(&con->eeprom_control, &exc_err_limit);
/*
* This calling fails when exc_err_limit is true or
@@ -1897,15 +2013,13 @@ int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev,
return 0;
}
-static int amdgpu_ras_check_asic_type(struct amdgpu_device *adev)
+static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
{
- if (adev->asic_type != CHIP_VEGA10 &&
- adev->asic_type != CHIP_VEGA20 &&
- adev->asic_type != CHIP_ARCTURUS &&
- adev->asic_type != CHIP_SIENNA_CICHLID)
- return 1;
- else
- return 0;
+ return adev->asic_type == CHIP_VEGA10 ||
+ adev->asic_type == CHIP_VEGA20 ||
+ adev->asic_type == CHIP_ARCTURUS ||
+ adev->asic_type == CHIP_ALDEBARAN ||
+ adev->asic_type == CHIP_SIENNA_CICHLID;
}
/*
@@ -1924,22 +2038,32 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev,
*supported = 0;
if (amdgpu_sriov_vf(adev) || !adev->is_atom_fw ||
- amdgpu_ras_check_asic_type(adev))
+ !amdgpu_ras_asic_supported(adev))
return;
- if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
- dev_info(adev->dev, "HBM ECC is active.\n");
- *hw_supported |= (1 << AMDGPU_RAS_BLOCK__UMC |
- 1 << AMDGPU_RAS_BLOCK__DF);
- } else
- dev_info(adev->dev, "HBM ECC is not presented.\n");
+ if (!adev->gmc.xgmi.connected_to_cpu) {
+ if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
+ dev_info(adev->dev, "MEM ECC is active.\n");
+ *hw_supported |= (1 << AMDGPU_RAS_BLOCK__UMC |
+ 1 << AMDGPU_RAS_BLOCK__DF);
+ } else {
+ dev_info(adev->dev, "MEM ECC is not presented.\n");
+ }
- if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
- dev_info(adev->dev, "SRAM ECC is active.\n");
- *hw_supported |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
- 1 << AMDGPU_RAS_BLOCK__DF);
- } else
- dev_info(adev->dev, "SRAM ECC is not presented.\n");
+ if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
+ dev_info(adev->dev, "SRAM ECC is active.\n");
+ *hw_supported |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
+ 1 << AMDGPU_RAS_BLOCK__DF);
+ } else {
+ dev_info(adev->dev, "SRAM ECC is not presented.\n");
+ }
+ } else {
+ /* driver only manages a few IP blocks RAS feature
+ * when GPU is connected cpu through XGMI */
+ *hw_supported |= (1 << AMDGPU_RAS_BLOCK__GFX |
+ 1 << AMDGPU_RAS_BLOCK__SDMA |
+ 1 << AMDGPU_RAS_BLOCK__MMHUB);
+ }
/* hw_supported needs to be aligned with RAS block mask. */
*hw_supported &= AMDGPU_RAS_BLOCK_MASK;
@@ -1970,6 +2094,15 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
amdgpu_ras_check_supported(adev, &con->hw_supported,
&con->supported);
if (!con->hw_supported || (adev->asic_type == CHIP_VEGA10)) {
+ /* set gfx block ras context feature for VEGA20 Gaming
+ * send ras disable cmd to ras ta during ras late init.
+ */
+ if (!adev->ras_features && adev->asic_type == CHIP_VEGA20) {
+ con->features |= BIT(AMDGPU_RAS_BLOCK__GFX);
+
+ return 0;
+ }
+
r = 0;
goto release_con;
}
@@ -1979,14 +2112,31 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
/* Might need get this flag from vbios. */
con->flags = RAS_DEFAULT_FLAGS;
- if (adev->nbio.funcs->init_ras_controller_interrupt) {
- r = adev->nbio.funcs->init_ras_controller_interrupt(adev);
+ /* initialize nbio ras function ahead of any other
+ * ras functions so hardware fatal error interrupt
+ * can be enabled as early as possible */
+ switch (adev->asic_type) {
+ case CHIP_VEGA20:
+ case CHIP_ARCTURUS:
+ case CHIP_ALDEBARAN:
+ if (!adev->gmc.xgmi.connected_to_cpu)
+ adev->nbio.ras_funcs = &nbio_v7_4_ras_funcs;
+ break;
+ default:
+ /* nbio ras is not available */
+ break;
+ }
+
+ if (adev->nbio.ras_funcs &&
+ adev->nbio.ras_funcs->init_ras_controller_interrupt) {
+ r = adev->nbio.ras_funcs->init_ras_controller_interrupt(adev);
if (r)
goto release_con;
}
- if (adev->nbio.funcs->init_ras_err_event_athub_interrupt) {
- r = adev->nbio.funcs->init_ras_err_event_athub_interrupt(adev);
+ if (adev->nbio.ras_funcs &&
+ adev->nbio.ras_funcs->init_ras_err_event_athub_interrupt) {
+ r = adev->nbio.ras_funcs->init_ras_err_event_athub_interrupt(adev);
if (r)
goto release_con;
}
@@ -2007,6 +2157,32 @@ release_con:
return r;
}
+static int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev)
+{
+ if (adev->gmc.xgmi.connected_to_cpu)
+ return 1;
+ return 0;
+}
+
+static int amdgpu_persistent_edc_harvesting(struct amdgpu_device *adev,
+ struct ras_common_if *ras_block)
+{
+ struct ras_query_if info = {
+ .head = *ras_block,
+ };
+
+ if (!amdgpu_persistent_edc_harvesting_supported(adev))
+ return 0;
+
+ if (amdgpu_ras_query_error_status(adev, &info) != 0)
+ DRM_WARN("RAS init harvest failure");
+
+ if (amdgpu_ras_reset_error_status(adev, ras_block->block) != 0)
+ DRM_WARN("RAS init harvest reset failure");
+
+ return 0;
+}
+
/* helper function to handle common stuff in ip late init phase */
int amdgpu_ras_late_init(struct amdgpu_device *adev,
struct ras_common_if *ras_block,
@@ -2036,6 +2212,9 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev,
return r;
}
+ /* check for errors on warm reset edc persisant supported ASIC */
+ amdgpu_persistent_edc_harvesting(adev, ras_block);
+
/* in resume phase, no need to create ras fs node */
if (adev->in_suspend || amdgpu_in_reset(adev))
return 0;
@@ -2083,8 +2262,12 @@ void amdgpu_ras_resume(struct amdgpu_device *adev)
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_manager *obj, *tmp;
- if (!con)
+ if (!adev->ras_features || !con) {
+ /* clean ras context for VEGA20 Gaming after send ras disable cmd */
+ amdgpu_release_ras_context(adev);
+
return;
+ }
if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
/* Set up all other IPs which are not implemented. There is a
@@ -2125,7 +2308,7 @@ void amdgpu_ras_suspend(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- if (!con)
+ if (!adev->ras_features || !con)
return;
amdgpu_ras_disable_all_features(adev, 0);
@@ -2139,7 +2322,7 @@ int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- if (!con)
+ if (!adev->ras_features || !con)
return 0;
/* Need disable ras on all IPs here before ip [hw/sw]fini */
@@ -2152,7 +2335,7 @@ int amdgpu_ras_fini(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- if (!con)
+ if (!adev->ras_features || !con)
return 0;
amdgpu_ras_fs_fini(adev);
@@ -2196,18 +2379,16 @@ bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev)
return false;
}
-bool amdgpu_ras_check_err_threshold(struct amdgpu_device *adev)
+void amdgpu_release_ras_context(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- bool exc_err_limit = false;
- if (con && (amdgpu_bad_page_threshold != 0))
- amdgpu_ras_eeprom_check_err_threshold(&con->eeprom_control,
- &exc_err_limit);
+ if (!con)
+ return;
- /*
- * We are only interested in variable exc_err_limit,
- * as it says if GPU is in bad state or not.
- */
- return exc_err_limit;
+ if (!adev->ras_features && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) {
+ con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX);
+ amdgpu_ras_set_context(adev, NULL);
+ kfree(con);
+ }
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index 762f5e46c007..60df268a0c66 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -318,8 +318,6 @@ struct amdgpu_ras {
uint32_t supported;
uint32_t features;
struct list_head head;
- /* debugfs */
- struct dentry *dir;
/* sysfs */
struct device_attribute features_attr;
struct bin_attribute badpages_attr;
@@ -395,8 +393,6 @@ struct ras_manager {
struct list_head node;
/* the device */
struct amdgpu_device *adev;
- /* debugfs */
- struct dentry *ent;
/* sysfs */
struct device_attribute sysfs_attr;
int attr_inuse;
@@ -495,8 +491,6 @@ void amdgpu_ras_suspend(struct amdgpu_device *adev);
unsigned long amdgpu_ras_query_error_count(struct amdgpu_device *adev,
bool is_ce);
-bool amdgpu_ras_check_err_threshold(struct amdgpu_device *adev);
-
/* error handling functions */
int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
struct eeprom_table_record *bps, int pages);
@@ -594,9 +588,12 @@ int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev);
-int amdgpu_ras_error_query(struct amdgpu_device *adev,
+int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
struct ras_query_if *info);
+int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block);
+
int amdgpu_ras_error_inject(struct amdgpu_device *adev,
struct ras_inject_if *info);
@@ -629,4 +626,6 @@ void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev);
void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready);
bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev);
+
+void amdgpu_release_ras_context(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 19d9aa76cfbf..f40c871da0c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -31,6 +31,7 @@
#define EEPROM_I2C_TARGET_ADDR_ARCTURUS 0xA8
#define EEPROM_I2C_TARGET_ADDR_ARCTURUS_D342 0xA0
#define EEPROM_I2C_TARGET_ADDR_SIENNA_CICHLID 0xA0
+#define EEPROM_I2C_TARGET_ADDR_ALDEBARAN 0xA0
/*
* The 2 macros bellow represent the actual size in bytes that
@@ -64,7 +65,8 @@ static bool __is_ras_eeprom_supported(struct amdgpu_device *adev)
{
if ((adev->asic_type == CHIP_VEGA20) ||
(adev->asic_type == CHIP_ARCTURUS) ||
- (adev->asic_type == CHIP_SIENNA_CICHLID))
+ (adev->asic_type == CHIP_SIENNA_CICHLID) ||
+ (adev->asic_type == CHIP_ALDEBARAN))
return true;
return false;
@@ -106,6 +108,10 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
*i2c_addr = EEPROM_I2C_TARGET_ADDR_SIENNA_CICHLID;
break;
+ case CHIP_ALDEBARAN:
+ *i2c_addr = EEPROM_I2C_TARGET_ADDR_ALDEBARAN;
+ break;
+
default:
return false;
}
@@ -434,47 +440,28 @@ static uint32_t __correct_eeprom_dest_address(uint32_t curr_address)
return curr_address;
}
-int amdgpu_ras_eeprom_check_err_threshold(
- struct amdgpu_ras_eeprom_control *control,
- bool *exceed_err_limit)
+bool amdgpu_ras_eeprom_check_err_threshold(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = to_amdgpu_device(control);
- unsigned char buff[EEPROM_ADDRESS_SIZE +
- EEPROM_TABLE_HEADER_SIZE] = { 0 };
- struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
- struct i2c_msg msg = {
- .addr = control->i2c_address,
- .flags = I2C_M_RD,
- .len = EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE,
- .buf = buff,
- };
- int ret;
-
- *exceed_err_limit = false;
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
if (!__is_ras_eeprom_supported(adev))
- return 0;
-
- /* read EEPROM table header */
- mutex_lock(&control->tbl_mutex);
- ret = i2c_transfer(&adev->pm.smu_i2c, &msg, 1);
- if (ret < 1) {
- dev_err(adev->dev, "Failed to read EEPROM table header.\n");
- goto err;
- }
+ return false;
- __decode_table_header_from_buff(hdr, &buff[2]);
+ /* skip check eeprom table for VEGA20 Gaming */
+ if (!con)
+ return false;
+ else
+ if (!(con->features & BIT(AMDGPU_RAS_BLOCK__UMC)))
+ return false;
- if (hdr->header == EEPROM_TABLE_HDR_BAD) {
+ if (con->eeprom_control.tbl_hdr.header == EEPROM_TABLE_HDR_BAD) {
dev_warn(adev->dev, "This GPU is in BAD status.");
dev_warn(adev->dev, "Please retire it or setting one bigger "
"threshold value when reloading driver.\n");
- *exceed_err_limit = true;
+ return true;
}
-err:
- mutex_unlock(&control->tbl_mutex);
- return 0;
+ return false;
}
int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
index c7a5e5c7c61e..178721170974 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
@@ -80,9 +80,7 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control,
bool *exceed_err_limit);
int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control);
-int amdgpu_ras_eeprom_check_err_threshold(
- struct amdgpu_ras_eeprom_control *control,
- bool *exceed_err_limit);
+bool amdgpu_ras_eeprom_check_err_threshold(struct amdgpu_device *adev);
int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
struct eeprom_table_record *records,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
new file mode 100644
index 000000000000..40f2adf305bc
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+
+#ifndef __AMDGPU_RES_CURSOR_H__
+#define __AMDGPU_RES_CURSOR_H__
+
+#include <drm/drm_mm.h>
+#include <drm/ttm/ttm_resource.h>
+
+/* state back for walking over vram_mgr and gtt_mgr allocations */
+struct amdgpu_res_cursor {
+ uint64_t start;
+ uint64_t size;
+ uint64_t remaining;
+ struct drm_mm_node *node;
+};
+
+/**
+ * amdgpu_res_first - initialize a amdgpu_res_cursor
+ *
+ * @res: TTM resource object to walk
+ * @start: Start of the range
+ * @size: Size of the range
+ * @cur: cursor object to initialize
+ *
+ * Start walking over the range of allocations between @start and @size.
+ */
+static inline void amdgpu_res_first(struct ttm_resource *res,
+ uint64_t start, uint64_t size,
+ struct amdgpu_res_cursor *cur)
+{
+ struct drm_mm_node *node;
+
+ if (!res || !res->mm_node) {
+ cur->start = start;
+ cur->size = size;
+ cur->remaining = size;
+ cur->node = NULL;
+ return;
+ }
+
+ BUG_ON(start + size > res->num_pages << PAGE_SHIFT);
+
+ node = res->mm_node;
+ while (start >= node->size << PAGE_SHIFT)
+ start -= node++->size << PAGE_SHIFT;
+
+ cur->start = (node->start << PAGE_SHIFT) + start;
+ cur->size = min((node->size << PAGE_SHIFT) - start, size);
+ cur->remaining = size;
+ cur->node = node;
+}
+
+/**
+ * amdgpu_res_next - advance the cursor
+ *
+ * @cur: the cursor to advance
+ * @size: number of bytes to move forward
+ *
+ * Move the cursor @size bytes forwrad, walking to the next node if necessary.
+ */
+static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size)
+{
+ struct drm_mm_node *node = cur->node;
+
+ BUG_ON(size > cur->remaining);
+
+ cur->remaining -= size;
+ if (!cur->remaining)
+ return;
+
+ cur->size -= size;
+ if (cur->size) {
+ cur->start += size;
+ return;
+ }
+
+ cur->node = ++node;
+ cur->start = node->start << PAGE_SHIFT;
+ cur->size = min(node->size << PAGE_SHIFT, cur->remaining);
+}
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
new file mode 100644
index 000000000000..02afd4115675
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu_reset.h"
+#include "aldebaran.h"
+
+int amdgpu_reset_add_handler(struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_handler *handler)
+{
+ /* TODO: Check if handler exists? */
+ list_add_tail(&handler->handler_list, &reset_ctl->reset_handlers);
+ return 0;
+}
+
+int amdgpu_reset_init(struct amdgpu_device *adev)
+{
+ int ret = 0;
+
+ switch (adev->asic_type) {
+ case CHIP_ALDEBARAN:
+ ret = aldebaran_reset_init(adev);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+int amdgpu_reset_fini(struct amdgpu_device *adev)
+{
+ int ret = 0;
+
+ switch (adev->asic_type) {
+ case CHIP_ALDEBARAN:
+ ret = aldebaran_reset_fini(adev);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+int amdgpu_reset_prepare_hwcontext(struct amdgpu_device *adev,
+ struct amdgpu_reset_context *reset_context)
+{
+ struct amdgpu_reset_handler *reset_handler = NULL;
+
+ if (adev->reset_cntl && adev->reset_cntl->get_reset_handler)
+ reset_handler = adev->reset_cntl->get_reset_handler(
+ adev->reset_cntl, reset_context);
+ if (!reset_handler)
+ return -ENOSYS;
+
+ return reset_handler->prepare_hwcontext(adev->reset_cntl,
+ reset_context);
+}
+
+int amdgpu_reset_perform_reset(struct amdgpu_device *adev,
+ struct amdgpu_reset_context *reset_context)
+{
+ int ret;
+ struct amdgpu_reset_handler *reset_handler = NULL;
+
+ if (adev->reset_cntl)
+ reset_handler = adev->reset_cntl->get_reset_handler(
+ adev->reset_cntl, reset_context);
+ if (!reset_handler)
+ return -ENOSYS;
+
+ ret = reset_handler->perform_reset(adev->reset_cntl, reset_context);
+ if (ret)
+ return ret;
+
+ return reset_handler->restore_hwcontext(adev->reset_cntl,
+ reset_context);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
new file mode 100644
index 000000000000..e00d38d9160a
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_RESET_H__
+#define __AMDGPU_RESET_H__
+
+#include "amdgpu.h"
+
+enum AMDGPU_RESET_FLAGS {
+
+ AMDGPU_NEED_FULL_RESET = 0,
+ AMDGPU_SKIP_HW_RESET = 1,
+};
+
+struct amdgpu_reset_context {
+ enum amd_reset_method method;
+ struct amdgpu_device *reset_req_dev;
+ struct amdgpu_job *job;
+ struct amdgpu_hive_info *hive;
+ unsigned long flags;
+};
+
+struct amdgpu_reset_handler {
+ enum amd_reset_method reset_method;
+ struct list_head handler_list;
+ int (*prepare_env)(struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *context);
+ int (*prepare_hwcontext)(struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *context);
+ int (*perform_reset)(struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *context);
+ int (*restore_hwcontext)(struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *context);
+ int (*restore_env)(struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *context);
+
+ int (*do_reset)(struct amdgpu_device *adev);
+};
+
+struct amdgpu_reset_control {
+ void *handle;
+ struct work_struct reset_work;
+ struct mutex reset_lock;
+ struct list_head reset_handlers;
+ atomic_t in_reset;
+ enum amd_reset_method active_reset;
+ struct amdgpu_reset_handler *(*get_reset_handler)(
+ struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *context);
+ void (*async_reset)(struct work_struct *work);
+};
+
+int amdgpu_reset_init(struct amdgpu_device *adev);
+int amdgpu_reset_fini(struct amdgpu_device *adev);
+
+int amdgpu_reset_prepare_hwcontext(struct amdgpu_device *adev,
+ struct amdgpu_reset_context *reset_context);
+
+int amdgpu_reset_perform_reset(struct amdgpu_device *adev,
+ struct amdgpu_reset_context *reset_context);
+
+int amdgpu_reset_add_handler(struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_handler *handler);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index b644c78475fd..688624ebe421 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -164,7 +164,8 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
*/
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
unsigned int max_dw, struct amdgpu_irq_src *irq_src,
- unsigned int irq_type, unsigned int hw_prio)
+ unsigned int irq_type, unsigned int hw_prio,
+ atomic_t *sched_score)
{
int r;
int sched_hw_submission = amdgpu_sched_hw_submission;
@@ -189,7 +190,8 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
ring->adev = adev;
ring->idx = adev->num_rings++;
adev->rings[ring->idx] = ring;
- r = amdgpu_fence_driver_init_ring(ring, sched_hw_submission);
+ r = amdgpu_fence_driver_init_ring(ring, sched_hw_submission,
+ sched_score);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 56acec1075ac..ca1622835296 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -111,7 +111,8 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
- unsigned num_hw_submission);
+ unsigned num_hw_submission,
+ atomic_t *sched_score);
int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
struct amdgpu_irq_src *irq_src,
unsigned irq_type);
@@ -282,7 +283,8 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring);
void amdgpu_ring_undo(struct amdgpu_ring *ring);
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
unsigned int ring_size, struct amdgpu_irq_src *irq_src,
- unsigned int irq_type, unsigned int prio);
+ unsigned int irq_type, unsigned int prio,
+ atomic_t *sched_score);
void amdgpu_ring_fini(struct amdgpu_ring *ring);
void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
uint32_t reg0, uint32_t val0,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
index aeaaae713c59..4fc2ce8ce8ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
@@ -127,7 +127,8 @@ struct amdgpu_rlc_funcs {
void (*reset)(struct amdgpu_device *adev);
void (*start)(struct amdgpu_device *adev);
void (*update_spm_vmid)(struct amdgpu_device *adev, unsigned vmid);
- void (*rlcg_wreg)(struct amdgpu_device *adev, u32 offset, u32 v);
+ void (*rlcg_wreg)(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag);
+ u32 (*rlcg_rreg)(struct amdgpu_device *adev, u32 offset, u32 flag);
bool (*is_rlcg_access_range)(struct amdgpu_device *adev, uint32_t reg);
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index e5b8fb8e75c5..f8fb755e3aa6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -64,6 +64,11 @@ struct amdgpu_sdma {
struct amdgpu_irq_src trap_irq;
struct amdgpu_irq_src illegal_inst_irq;
struct amdgpu_irq_src ecc_irq;
+ struct amdgpu_irq_src vm_hole_irq;
+ struct amdgpu_irq_src doorbell_invalid_irq;
+ struct amdgpu_irq_src pool_timeout_irq;
+ struct amdgpu_irq_src srbm_write_irq;
+
int num_instances;
uint32_t srbm_soft_reset;
bool has_page_queue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
index 834440ab9ff7..5369c8dd0764 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
@@ -69,6 +69,9 @@ void psp_securedisplay_parse_resp_status(struct psp_context *psp,
case TA_SECUREDISPLAY_STATUS__READ_CRC_ERROR:
dev_err(psp->adev->dev, "Secure display: Failed to Read CRC");
break;
+ case TA_SECUREDISPLAY_STATUS__I2C_INIT_ERROR:
+ dev_err(psp->adev->dev, "Secure display: Failed to initialize I2C.");
+ break;
default:
dev_err(psp->adev->dev, "Secure display: Failed to parse status: %d\n", status);
}
@@ -92,9 +95,7 @@ static ssize_t amdgpu_securedisplay_debugfs_write(struct file *f, const char __u
struct drm_device *dev = adev_to_drm(adev);
uint32_t phy_id;
uint32_t op;
- int i;
char str[64];
- char i2c_output[256];
int ret;
if (*pos || size > sizeof(str) - 1)
@@ -136,11 +137,9 @@ static ssize_t amdgpu_securedisplay_debugfs_write(struct file *f, const char __u
ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
if (!ret) {
if (securedisplay_cmd->status == TA_SECUREDISPLAY_STATUS__SUCCESS) {
- memset(i2c_output, 0, sizeof(i2c_output));
- for (i = 0; i < TA_SECUREDISPLAY_I2C_BUFFER_SIZE; i++)
- sprintf(i2c_output, "%s 0x%X", i2c_output,
- securedisplay_cmd->securedisplay_out_message.send_roi_crc.i2c_buf[i]);
- dev_info(adev->dev, "SECUREDISPLAY: I2C buffer out put is :%s\n", i2c_output);
+ dev_info(adev->dev, "SECUREDISPLAY: I2C buffer out put is: %*ph\n",
+ TA_SECUREDISPLAY_I2C_BUFFER_SIZE,
+ securedisplay_cmd->securedisplay_out_message.send_roi_crc.i2c_buf);
} else {
psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h
index 03009157aec8..b860ec913ac5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h
@@ -28,6 +28,8 @@ struct amdgpu_smuio_funcs {
u32 (*get_rom_data_offset)(struct amdgpu_device *adev);
void (*update_rom_clock_gating)(struct amdgpu_device *adev, bool enable);
void (*get_clock_gating_state)(struct amdgpu_device *adev, u32 *flags);
+ u32 (*get_die_id)(struct amdgpu_device *adev);
+ bool (*is_host_gpu_xgmi_supported)(struct amdgpu_device *adev);
};
struct amdgpu_smuio {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index 7b230bcbf2c6..909d830b513e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -62,6 +62,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
bp.flags = 0;
bp.type = ttm_bo_type_kernel;
bp.resv = NULL;
+ bp.bo_ptr_size = sizeof(struct amdgpu_bo);
r = amdgpu_bo_create(adev, &bp, &vram_obj);
if (r) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 5efa331e3ee8..3bef0432cac2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -47,7 +47,6 @@
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
-#include <drm/drm_debugfs.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
@@ -57,14 +56,15 @@
#include "amdgpu_sdma.h"
#include "amdgpu_ras.h"
#include "amdgpu_atomfirmware.h"
+#include "amdgpu_res_cursor.h"
#include "bif/bif_4_1_d.h"
#define AMDGPU_TTM_VRAM_MAX_DW_READ (size_t)128
-static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
+static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
struct ttm_tt *ttm,
struct ttm_resource *bo_mem);
-static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
+static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
struct ttm_tt *ttm);
static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
@@ -179,54 +179,11 @@ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
}
/**
- * amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT buffer.
- *
- * @bo: The bo to assign the memory to.
- * @mm_node: Memory manager node for drm allocator.
- * @mem: The region where the bo resides.
- *
- */
-static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
- struct drm_mm_node *mm_node,
- struct ttm_resource *mem)
-{
- uint64_t addr = 0;
-
- if (mm_node->start != AMDGPU_BO_INVALID_OFFSET) {
- addr = mm_node->start << PAGE_SHIFT;
- addr += amdgpu_ttm_domain_start(amdgpu_ttm_adev(bo->bdev),
- mem->mem_type);
- }
- return addr;
-}
-
-/**
- * amdgpu_find_mm_node - Helper function finds the drm_mm_node corresponding to
- * @offset. It also modifies the offset to be within the drm_mm_node returned
- *
- * @mem: The region where the bo resides.
- * @offset: The offset that drm_mm_node is used for finding.
- *
- */
-static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_resource *mem,
- uint64_t *offset)
-{
- struct drm_mm_node *mm_node = mem->mm_node;
-
- while (*offset >= (mm_node->size << PAGE_SHIFT)) {
- *offset -= (mm_node->size << PAGE_SHIFT);
- ++mm_node;
- }
- return mm_node;
-}
-
-/**
* amdgpu_ttm_map_buffer - Map memory into the GART windows
* @bo: buffer object to map
* @mem: memory object to map
- * @mm_node: drm_mm node object to map
+ * @mm_cur: range to map
* @num_pages: number of pages to map
- * @offset: offset into @mm_node where to start
* @window: which GART window to use
* @ring: DMA ring to use for the copy
* @tmz: if we should setup a TMZ enabled mapping
@@ -237,10 +194,10 @@ static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_resource *mem,
*/
static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
struct ttm_resource *mem,
- struct drm_mm_node *mm_node,
- unsigned num_pages, uint64_t offset,
- unsigned window, struct amdgpu_ring *ring,
- bool tmz, uint64_t *addr)
+ struct amdgpu_res_cursor *mm_cur,
+ unsigned num_pages, unsigned window,
+ struct amdgpu_ring *ring, bool tmz,
+ uint64_t *addr)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_job *job;
@@ -257,14 +214,15 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
/* Map only what can't be accessed directly */
if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET) {
- *addr = amdgpu_mm_node_addr(bo, mm_node, mem) + offset;
+ *addr = amdgpu_ttm_domain_start(adev, mem->mem_type) +
+ mm_cur->start;
return 0;
}
*addr = adev->gmc.gart_start;
*addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
AMDGPU_GPU_PAGE_SIZE;
- *addr += offset & ~PAGE_MASK;
+ *addr += mm_cur->start & ~PAGE_MASK;
num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
num_bytes = num_pages * 8;
@@ -292,17 +250,17 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
cpu_addr = &job->ibs[0].ptr[num_dw];
if (mem->mem_type == TTM_PL_TT) {
- dma_addr_t *dma_address;
+ dma_addr_t *dma_addr;
- dma_address = &bo->ttm->dma_address[offset >> PAGE_SHIFT];
- r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
+ dma_addr = &bo->ttm->dma_address[mm_cur->start >> PAGE_SHIFT];
+ r = amdgpu_gart_map(adev, 0, num_pages, dma_addr, flags,
cpu_addr);
if (r)
goto error_free;
} else {
dma_addr_t dma_address;
- dma_address = (mm_node->start << PAGE_SHIFT) + offset;
+ dma_address = mm_cur->start;
dma_address += adev->vm_manager.vram_base_offset;
for (i = 0; i < num_pages; ++i) {
@@ -354,9 +312,8 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
const uint32_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
AMDGPU_GPU_PAGE_SIZE);
- uint64_t src_node_size, dst_node_size, src_offset, dst_offset;
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
- struct drm_mm_node *src_mm, *dst_mm;
+ struct amdgpu_res_cursor src_mm, dst_mm;
struct dma_fence *fence = NULL;
int r = 0;
@@ -365,29 +322,13 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
return -EINVAL;
}
- src_offset = src->offset;
- if (src->mem->mm_node) {
- src_mm = amdgpu_find_mm_node(src->mem, &src_offset);
- src_node_size = (src_mm->size << PAGE_SHIFT) - src_offset;
- } else {
- src_mm = NULL;
- src_node_size = ULLONG_MAX;
- }
-
- dst_offset = dst->offset;
- if (dst->mem->mm_node) {
- dst_mm = amdgpu_find_mm_node(dst->mem, &dst_offset);
- dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst_offset;
- } else {
- dst_mm = NULL;
- dst_node_size = ULLONG_MAX;
- }
+ amdgpu_res_first(src->mem, src->offset, size, &src_mm);
+ amdgpu_res_first(dst->mem, dst->offset, size, &dst_mm);
mutex_lock(&adev->mman.gtt_window_lock);
-
- while (size) {
- uint32_t src_page_offset = src_offset & ~PAGE_MASK;
- uint32_t dst_page_offset = dst_offset & ~PAGE_MASK;
+ while (src_mm.remaining) {
+ uint32_t src_page_offset = src_mm.start & ~PAGE_MASK;
+ uint32_t dst_page_offset = dst_mm.start & ~PAGE_MASK;
struct dma_fence *next;
uint32_t cur_size;
uint64_t from, to;
@@ -396,19 +337,19 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
* begins at an offset, then adjust the size accordingly
*/
cur_size = max(src_page_offset, dst_page_offset);
- cur_size = min(min3(src_node_size, dst_node_size, size),
+ cur_size = min(min3(src_mm.size, dst_mm.size, size),
(uint64_t)(GTT_MAX_BYTES - cur_size));
/* Map src to window 0 and dst to window 1. */
- r = amdgpu_ttm_map_buffer(src->bo, src->mem, src_mm,
+ r = amdgpu_ttm_map_buffer(src->bo, src->mem, &src_mm,
PFN_UP(cur_size + src_page_offset),
- src_offset, 0, ring, tmz, &from);
+ 0, ring, tmz, &from);
if (r)
goto error;
- r = amdgpu_ttm_map_buffer(dst->bo, dst->mem, dst_mm,
+ r = amdgpu_ttm_map_buffer(dst->bo, dst->mem, &dst_mm,
PFN_UP(cur_size + dst_page_offset),
- dst_offset, 1, ring, tmz, &to);
+ 1, ring, tmz, &to);
if (r)
goto error;
@@ -420,27 +361,8 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
dma_fence_put(fence);
fence = next;
- size -= cur_size;
- if (!size)
- break;
-
- src_node_size -= cur_size;
- if (!src_node_size) {
- ++src_mm;
- src_node_size = src_mm->size << PAGE_SHIFT;
- src_offset = 0;
- } else {
- src_offset += cur_size;
- }
-
- dst_node_size -= cur_size;
- if (!dst_node_size) {
- ++dst_mm;
- dst_node_size = dst_mm->size << PAGE_SHIFT;
- dst_offset = 0;
- } else {
- dst_offset += cur_size;
- }
+ amdgpu_res_next(&src_mm, cur_size);
+ amdgpu_res_next(&dst_mm, cur_size);
}
error:
mutex_unlock(&adev->mman.gtt_window_lock);
@@ -519,7 +441,8 @@ error:
static bool amdgpu_mem_visible(struct amdgpu_device *adev,
struct ttm_resource *mem)
{
- struct drm_mm_node *nodes = mem->mm_node;
+ uint64_t mem_size = (u64)mem->num_pages << PAGE_SHIFT;
+ struct amdgpu_res_cursor cursor;
if (mem->mem_type == TTM_PL_SYSTEM ||
mem->mem_type == TTM_PL_TT)
@@ -527,12 +450,13 @@ static bool amdgpu_mem_visible(struct amdgpu_device *adev,
if (mem->mem_type != TTM_PL_VRAM)
return false;
+ amdgpu_res_first(mem, 0, mem_size, &cursor);
+
/* ttm_resource_ioremap only supports contiguous memory */
- if (nodes->size != mem->num_pages)
+ if (cursor.size != mem_size)
return false;
- return ((nodes->start + nodes->size) << PAGE_SHIFT)
- <= adev->gmc.visible_vram_size;
+ return cursor.start + cursor.size <= adev->gmc.visible_vram_size;
}
/*
@@ -646,7 +570,7 @@ out:
*
* Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault()
*/
-static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem)
+static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
struct drm_mm_node *mm_node = mem->mm_node;
@@ -674,7 +598,10 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_reso
mem->bus.offset += adev->gmc.aper_base;
mem->bus.is_iomem = true;
- mem->bus.caching = ttm_write_combined;
+ if (adev->gmc.xgmi.connected_to_cpu)
+ mem->bus.caching = ttm_cached;
+ else
+ mem->bus.caching = ttm_write_combined;
break;
default:
return -EINVAL;
@@ -686,12 +613,10 @@ static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
unsigned long page_offset)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
- uint64_t offset = (page_offset << PAGE_SHIFT);
- struct drm_mm_node *mm;
+ struct amdgpu_res_cursor cursor;
- mm = amdgpu_find_mm_node(&bo->mem, &offset);
- offset += adev->gmc.aper_base;
- return mm->start + (offset >> PAGE_SHIFT);
+ amdgpu_res_first(&bo->mem, (u64)page_offset << PAGE_SHIFT, 0, &cursor);
+ return (adev->gmc.aper_base + cursor.start) >> PAGE_SHIFT;
}
/**
@@ -893,16 +818,15 @@ void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
*
* Called by amdgpu_ttm_backend_bind()
**/
-static int amdgpu_ttm_tt_pin_userptr(struct ttm_bo_device *bdev,
+static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
- int r;
-
int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
enum dma_data_direction direction = write ?
DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+ int r;
/* Allocate an SG array and squash pages into it */
r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
@@ -931,18 +855,17 @@ release_sg:
/*
* amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages
*/
-static void amdgpu_ttm_tt_unpin_userptr(struct ttm_bo_device *bdev,
+static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
-
int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
enum dma_data_direction direction = write ?
DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
/* double check that we don't free the table twice */
- if (!ttm->sg->sgl)
+ if (!ttm->sg || !ttm->sg->sgl)
return;
/* unmap the pages mapped to the device */
@@ -1015,7 +938,7 @@ gart_bind_fail:
* Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem().
* This handles binding GTT memory to the device address space.
*/
-static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
+static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
struct ttm_tt *ttm,
struct ttm_resource *bo_mem)
{
@@ -1155,20 +1078,20 @@ int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
* Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and
* ttm_tt_destroy().
*/
-static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
+static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
int r;
- if (!gtt->bound)
- return;
-
/* if the pages have userptr pinning then clear that first */
if (gtt->userptr)
amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
+ if (!gtt->bound)
+ return;
+
if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
return;
@@ -1180,7 +1103,7 @@ static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
gtt->bound = false;
}
-static void amdgpu_ttm_backend_destroy(struct ttm_bo_device *bdev,
+static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
@@ -1234,7 +1157,7 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
* Map the pages of a ttm_tt object to an address space visible
* to the underlying device.
*/
-static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev,
+static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
struct ttm_tt *ttm,
struct ttm_operation_ctx *ctx)
{
@@ -1278,7 +1201,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev,
* Unmaps pages of a ttm_tt object from the device address space and
* unpopulates the page array backing it.
*/
-static void amdgpu_ttm_tt_unpopulate(struct ttm_bo_device *bdev,
+static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
@@ -1430,6 +1353,10 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
flags |= AMDGPU_PTE_SNOOPED;
}
+ if (mem && mem->mem_type == TTM_PL_VRAM &&
+ mem->bus.caching == ttm_cached)
+ flags |= AMDGPU_PTE_SNOOPED;
+
return flags;
}
@@ -1469,7 +1396,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
const struct ttm_place *place)
{
unsigned long num_pages = bo->mem.num_pages;
- struct drm_mm_node *node = bo->mem.mm_node;
+ struct amdgpu_res_cursor cursor;
struct dma_resv_list *flist;
struct dma_fence *f;
int i;
@@ -1501,13 +1428,15 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
case TTM_PL_VRAM:
/* Check each drm MM node individually */
- while (num_pages) {
- if (place->fpfn < (node->start + node->size) &&
- !(place->lpfn && place->lpfn <= node->start))
+ amdgpu_res_first(&bo->mem, 0, (u64)num_pages << PAGE_SHIFT,
+ &cursor);
+ while (cursor.remaining) {
+ if (place->fpfn < PFN_DOWN(cursor.start + cursor.size)
+ && !(place->lpfn &&
+ place->lpfn <= PFN_DOWN(cursor.start)))
return true;
- num_pages -= node->size;
- ++node;
+ amdgpu_res_next(&cursor, cursor.size);
}
return false;
@@ -1531,41 +1460,36 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
* access for debugging purposes.
*/
static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
- unsigned long offset,
- void *buf, int len, int write)
+ unsigned long offset, void *buf, int len,
+ int write)
{
struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
- struct drm_mm_node *nodes;
+ struct amdgpu_res_cursor cursor;
+ unsigned long flags;
uint32_t value = 0;
int ret = 0;
- uint64_t pos;
- unsigned long flags;
if (bo->mem.mem_type != TTM_PL_VRAM)
return -EIO;
- pos = offset;
- nodes = amdgpu_find_mm_node(&abo->tbo.mem, &pos);
- pos += (nodes->start << PAGE_SHIFT);
-
- while (len && pos < adev->gmc.mc_vram_size) {
- uint64_t aligned_pos = pos & ~(uint64_t)3;
- uint64_t bytes = 4 - (pos & 3);
- uint32_t shift = (pos & 3) * 8;
+ amdgpu_res_first(&bo->mem, offset, len, &cursor);
+ while (cursor.remaining) {
+ uint64_t aligned_pos = cursor.start & ~(uint64_t)3;
+ uint64_t bytes = 4 - (cursor.start & 3);
+ uint32_t shift = (cursor.start & 3) * 8;
uint32_t mask = 0xffffffff << shift;
- if (len < bytes) {
- mask &= 0xffffffff >> (bytes - len) * 8;
- bytes = len;
+ if (cursor.size < bytes) {
+ mask &= 0xffffffff >> (bytes - cursor.size) * 8;
+ bytes = cursor.size;
}
if (mask != 0xffffffff) {
spin_lock_irqsave(&adev->mmio_idx_lock, flags);
WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
- if (!write || mask != 0xffffffff)
- value = RREG32_NO_KIQ(mmMM_DATA);
+ value = RREG32_NO_KIQ(mmMM_DATA);
if (write) {
value &= ~mask;
value |= (*(uint32_t *)buf << shift) & mask;
@@ -1577,21 +1501,15 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
memcpy(buf, &value, bytes);
}
} else {
- bytes = (nodes->start + nodes->size) << PAGE_SHIFT;
- bytes = min(bytes - pos, (uint64_t)len & ~0x3ull);
-
- amdgpu_device_vram_access(adev, pos, (uint32_t *)buf,
- bytes, write);
+ bytes = cursor.size & ~0x3ULL;
+ amdgpu_device_vram_access(adev, cursor.start,
+ (uint32_t *)buf, bytes,
+ write);
}
ret += bytes;
buf = (uint8_t *)buf + bytes;
- pos += bytes;
- len -= bytes;
- if (pos >= (nodes->start + nodes->size) << PAGE_SHIFT) {
- ++nodes;
- pos = (nodes->start << PAGE_SHIFT);
- }
+ amdgpu_res_next(&cursor, bytes);
}
return ret;
@@ -1603,7 +1521,7 @@ amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo)
amdgpu_bo_move_notify(bo, false, NULL);
}
-static struct ttm_bo_driver amdgpu_bo_driver = {
+static struct ttm_device_funcs amdgpu_bo_driver = {
.ttm_tt_create = &amdgpu_ttm_tt_create,
.ttm_tt_populate = &amdgpu_ttm_tt_populate,
.ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
@@ -1696,7 +1614,7 @@ static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev)
(adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET);
ctx->train_data_size =
GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES;
-
+
DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
ctx->train_data_size,
ctx->p2c_train_data_offset,
@@ -1785,7 +1703,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
mutex_init(&adev->mman.gtt_window_lock);
/* No others user of address space so set it to 0 */
- r = ttm_bo_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
+ r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
adev_to_drm(adev)->anon_inode->i_mapping,
adev_to_drm(adev)->vma_offset_manager,
adev->need_swiotlb,
@@ -1812,8 +1730,15 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
/* Change the size here instead of the init above so only lpfn is affected */
amdgpu_ttm_set_buffer_funcs_status(adev, false);
#ifdef CONFIG_64BIT
- adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base,
- adev->gmc.visible_vram_size);
+#ifdef CONFIG_X86
+ if (adev->gmc.xgmi.connected_to_cpu)
+ adev->mman.aper_base_kaddr = ioremap_cache(adev->gmc.aper_base,
+ adev->gmc.visible_vram_size);
+
+ else
+#endif
+ adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base,
+ adev->gmc.visible_vram_size);
#endif
/*
@@ -1926,7 +1851,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
- ttm_bo_device_release(&adev->mman.bdev);
+ ttm_device_fini(&adev->mman.bdev);
adev->mman.initialized = false;
DRM_INFO("amdgpu: ttm finalized\n");
}
@@ -2002,7 +1927,7 @@ unlock:
return ret;
}
-static struct vm_operations_struct amdgpu_ttm_vm_ops = {
+static const struct vm_operations_struct amdgpu_ttm_vm_ops = {
.fault = amdgpu_ttm_fault,
.open = ttm_bo_vm_open,
.close = ttm_bo_vm_close,
@@ -2053,7 +1978,8 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
return r;
if (vm_needs_flush) {
- job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo);
+ job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo ?
+ adev->gmc.pdb0_bo : adev->gart.bo);
job->vm_needs_flush = true;
}
if (resv) {
@@ -2104,9 +2030,9 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
- struct drm_mm_node *mm_node;
- unsigned long num_pages;
+ struct amdgpu_res_cursor cursor;
unsigned int num_loops, num_dw;
+ uint64_t num_bytes;
struct amdgpu_job *job;
int r;
@@ -2122,15 +2048,13 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
return r;
}
- num_pages = bo->tbo.mem.num_pages;
- mm_node = bo->tbo.mem.mm_node;
+ num_bytes = bo->tbo.mem.num_pages << PAGE_SHIFT;
num_loops = 0;
- while (num_pages) {
- uint64_t byte_count = mm_node->size << PAGE_SHIFT;
- num_loops += DIV_ROUND_UP_ULL(byte_count, max_bytes);
- num_pages -= mm_node->size;
- ++mm_node;
+ amdgpu_res_first(&bo->tbo.mem, 0, num_bytes, &cursor);
+ while (cursor.remaining) {
+ num_loops += DIV_ROUND_UP_ULL(cursor.size, max_bytes);
+ amdgpu_res_next(&cursor, cursor.size);
}
num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
@@ -2152,27 +2076,16 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
}
}
- num_pages = bo->tbo.mem.num_pages;
- mm_node = bo->tbo.mem.mm_node;
+ amdgpu_res_first(&bo->tbo.mem, 0, num_bytes, &cursor);
+ while (cursor.remaining) {
+ uint32_t cur_size = min_t(uint64_t, cursor.size, max_bytes);
+ uint64_t dst_addr = cursor.start;
- while (num_pages) {
- uint64_t byte_count = mm_node->size << PAGE_SHIFT;
- uint64_t dst_addr;
+ dst_addr += amdgpu_ttm_domain_start(adev, bo->tbo.mem.mem_type);
+ amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, dst_addr,
+ cur_size);
- dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem);
- while (byte_count) {
- uint32_t cur_size_in_bytes = min_t(uint64_t, byte_count,
- max_bytes);
-
- amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
- dst_addr, cur_size_in_bytes);
-
- dst_addr += cur_size_in_bytes;
- byte_count -= cur_size_in_bytes;
- }
-
- num_pages -= mm_node->size;
- ++mm_node;
+ amdgpu_res_next(&cursor, cur_size);
}
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
@@ -2191,36 +2104,74 @@ error_free:
#if defined(CONFIG_DEBUG_FS)
-static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
+static int amdgpu_mm_vram_table_show(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- unsigned ttm_pl = (uintptr_t)node->info_ent->data;
- struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, ttm_pl);
+ struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
+ struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
+ TTM_PL_VRAM);
struct drm_printer p = drm_seq_file_printer(m);
man->func->debug(man, &p);
return 0;
}
-static int amdgpu_ttm_pool_debugfs(struct seq_file *m, void *data)
+static int amdgpu_ttm_page_pool_show(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
return ttm_pool_debugfs(&adev->mman.bdev.pool, m);
}
-static const struct drm_info_list amdgpu_ttm_debugfs_list[] = {
- {"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_VRAM},
- {"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_TT},
- {"amdgpu_gds_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GDS},
- {"amdgpu_gws_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GWS},
- {"amdgpu_oa_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_OA},
- {"ttm_page_pool", amdgpu_ttm_pool_debugfs, 0, NULL},
-};
+static int amdgpu_mm_tt_table_show(struct seq_file *m, void *unused)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
+ struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
+ TTM_PL_TT);
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ man->func->debug(man, &p);
+ return 0;
+}
+
+static int amdgpu_mm_gds_table_show(struct seq_file *m, void *unused)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
+ struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
+ AMDGPU_PL_GDS);
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ man->func->debug(man, &p);
+ return 0;
+}
+
+static int amdgpu_mm_gws_table_show(struct seq_file *m, void *unused)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
+ struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
+ AMDGPU_PL_GWS);
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ man->func->debug(man, &p);
+ return 0;
+}
+
+static int amdgpu_mm_oa_table_show(struct seq_file *m, void *unused)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
+ struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
+ AMDGPU_PL_OA);
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ man->func->debug(man, &p);
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_vram_table);
+DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_tt_table);
+DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_gds_table);
+DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_gws_table);
+DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_oa_table);
+DEFINE_SHOW_ATTRIBUTE(amdgpu_ttm_page_pool);
/*
* amdgpu_ttm_vram_read - Linear read access to VRAM
@@ -2308,58 +2259,6 @@ static const struct file_operations amdgpu_ttm_vram_fops = {
.llseek = default_llseek,
};
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
-
-/*
- * amdgpu_ttm_gtt_read - Linear read access to GTT memory
- */
-static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- ssize_t result = 0;
- int r;
-
- while (size) {
- loff_t p = *pos / PAGE_SIZE;
- unsigned off = *pos & ~PAGE_MASK;
- size_t cur_size = min_t(size_t, size, PAGE_SIZE - off);
- struct page *page;
- void *ptr;
-
- if (p >= adev->gart.num_cpu_pages)
- return result;
-
- page = adev->gart.pages[p];
- if (page) {
- ptr = kmap(page);
- ptr += off;
-
- r = copy_to_user(buf, ptr, cur_size);
- kunmap(adev->gart.pages[p]);
- } else
- r = clear_user(buf, cur_size);
-
- if (r)
- return -EFAULT;
-
- result += cur_size;
- buf += cur_size;
- *pos += cur_size;
- size -= cur_size;
- }
-
- return result;
-}
-
-static const struct file_operations amdgpu_ttm_gtt_fops = {
- .owner = THIS_MODULE,
- .read = amdgpu_ttm_gtt_read,
- .llseek = default_llseek
-};
-
-#endif
-
/*
* amdgpu_iomem_read - Virtual read access to GPU mapped memory
*
@@ -2474,46 +2373,29 @@ static const struct file_operations amdgpu_ttm_iomem_fops = {
.llseek = default_llseek
};
-static const struct {
- char *name;
- const struct file_operations *fops;
- int domain;
-} ttm_debugfs_entries[] = {
- { "amdgpu_vram", &amdgpu_ttm_vram_fops, TTM_PL_VRAM },
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
- { "amdgpu_gtt", &amdgpu_ttm_gtt_fops, TTM_PL_TT },
#endif
- { "amdgpu_iomem", &amdgpu_ttm_iomem_fops, TTM_PL_SYSTEM },
-};
-#endif
-
-int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
+void amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
- unsigned count;
-
struct drm_minor *minor = adev_to_drm(adev)->primary;
- struct dentry *ent, *root = minor->debugfs_root;
-
- for (count = 0; count < ARRAY_SIZE(ttm_debugfs_entries); count++) {
- ent = debugfs_create_file(
- ttm_debugfs_entries[count].name,
- S_IFREG | S_IRUGO, root,
- adev,
- ttm_debugfs_entries[count].fops);
- if (IS_ERR(ent))
- return PTR_ERR(ent);
- if (ttm_debugfs_entries[count].domain == TTM_PL_VRAM)
- i_size_write(ent->d_inode, adev->gmc.mc_vram_size);
- else if (ttm_debugfs_entries[count].domain == TTM_PL_TT)
- i_size_write(ent->d_inode, adev->gmc.gart_size);
- adev->mman.debugfs_entries[count] = ent;
- }
-
- count = ARRAY_SIZE(amdgpu_ttm_debugfs_list);
- return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count);
-#else
- return 0;
+ struct dentry *root = minor->debugfs_root;
+
+ debugfs_create_file_size("amdgpu_vram", 0444, root, adev,
+ &amdgpu_ttm_vram_fops, adev->gmc.mc_vram_size);
+ debugfs_create_file("amdgpu_iomem", 0444, root, adev,
+ &amdgpu_ttm_iomem_fops);
+ debugfs_create_file("amdgpu_vram_mm", 0444, root, adev,
+ &amdgpu_mm_vram_table_fops);
+ debugfs_create_file("amdgpu_gtt_mm", 0444, root, adev,
+ &amdgpu_mm_tt_table_fops);
+ debugfs_create_file("amdgpu_gds_mm", 0444, root, adev,
+ &amdgpu_mm_gds_table_fops);
+ debugfs_create_file("amdgpu_gws_mm", 0444, root, adev,
+ &amdgpu_mm_gws_table_fops);
+ debugfs_create_file("amdgpu_oa_mm", 0444, root, adev,
+ &amdgpu_mm_oa_table_fops);
+ debugfs_create_file("ttm_page_pool", 0444, root, adev,
+ &amdgpu_ttm_page_pool_fops);
#endif
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index d2987536d7cd..dec0db8b0b13 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -60,14 +60,10 @@ struct amdgpu_gtt_mgr {
};
struct amdgpu_mman {
- struct ttm_bo_device bdev;
+ struct ttm_device bdev;
bool initialized;
void __iomem *aper_base_kaddr;
-#if defined(CONFIG_DEBUG_FS)
- struct dentry *debugfs_entries[8];
-#endif
-
/* buffer handling */
const struct amdgpu_buffer_funcs *buffer_funcs;
struct amdgpu_ring *buffer_funcs_ring;
@@ -119,8 +115,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
struct device *dev,
enum dma_data_direction dir,
struct sg_table **sgt);
-void amdgpu_vram_mgr_free_sgt(struct amdgpu_device *adev,
- struct device *dev,
+void amdgpu_vram_mgr_free_sgt(struct device *dev,
enum dma_data_direction dir,
struct sg_table *sgt);
uint64_t amdgpu_vram_mgr_usage(struct ttm_resource_manager *man);
@@ -186,6 +181,6 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem);
uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
struct ttm_resource *mem);
-int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
+void amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 1beb08af347f..9733224117e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -402,6 +402,7 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
case CHIP_NAVY_FLOUNDER:
case CHIP_VANGOGH:
case CHIP_DIMGREY_CAVEFISH:
+ case CHIP_ALDEBARAN:
if (!load_type)
return AMDGPU_FW_LOAD_DIRECT;
else
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index 46449e70348b..2c42874f7784 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -105,6 +105,9 @@ struct psp_firmware_header_v1_3 {
uint32_t spl_header_version;
uint32_t spl_offset_bytes;
uint32_t spl_size_bytes;
+ uint32_t rl_header_version;
+ uint32_t rl_offset_bytes;
+ uint32_t rl_size_bytes;
};
/* version_major=1, version_minor=0 */
@@ -136,6 +139,7 @@ enum ta_fw_type {
TA_FW_TYPE_PSP_DTM,
TA_FW_TYPE_PSP_RAP,
TA_FW_TYPE_PSP_SECUREDISPLAY,
+ TA_FW_TYPE_MAX_INDEX,
};
struct ta_fw_bin_desc {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index a2975c8092a9..ea6f99be070b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -60,8 +60,9 @@ int amdgpu_umc_ras_late_init(struct amdgpu_device *adev)
}
/* ras init of specific umc version */
- if (adev->umc.funcs && adev->umc.funcs->err_cnt_init)
- adev->umc.funcs->err_cnt_init(adev);
+ if (adev->umc.ras_funcs &&
+ adev->umc.ras_funcs->err_cnt_init)
+ adev->umc.ras_funcs->err_cnt_init(adev);
return 0;
@@ -95,12 +96,12 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
- if (adev->umc.funcs &&
- adev->umc.funcs->query_ras_error_count)
- adev->umc.funcs->query_ras_error_count(adev, ras_error_status);
+ if (adev->umc.ras_funcs &&
+ adev->umc.ras_funcs->query_ras_error_count)
+ adev->umc.ras_funcs->query_ras_error_count(adev, ras_error_status);
- if (adev->umc.funcs &&
- adev->umc.funcs->query_ras_error_address &&
+ if (adev->umc.ras_funcs &&
+ adev->umc.ras_funcs->query_ras_error_address &&
adev->umc.max_ras_err_cnt_per_query) {
err_data->err_addr =
kcalloc(adev->umc.max_ras_err_cnt_per_query,
@@ -116,7 +117,7 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
/* umc query_ras_error_address is also responsible for clearing
* error status
*/
- adev->umc.funcs->query_ras_error_address(adev, ras_error_status);
+ adev->umc.ras_funcs->query_ras_error_address(adev, ras_error_status);
}
/* only uncorrectable error needs gpu reset */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index 183814493658..bbcccf53080d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -35,13 +35,17 @@
#define LOOP_UMC_CH_INST(ch_inst) for ((ch_inst) = 0; (ch_inst) < adev->umc.channel_inst_num; (ch_inst)++)
#define LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) LOOP_UMC_INST((umc_inst)) LOOP_UMC_CH_INST((ch_inst))
-struct amdgpu_umc_funcs {
+struct amdgpu_umc_ras_funcs {
void (*err_cnt_init)(struct amdgpu_device *adev);
int (*ras_late_init)(struct amdgpu_device *adev);
+ void (*ras_fini)(struct amdgpu_device *adev);
void (*query_ras_error_count)(struct amdgpu_device *adev,
- void *ras_error_status);
+ void *ras_error_status);
void (*query_ras_error_address)(struct amdgpu_device *adev,
void *ras_error_status);
+};
+
+struct amdgpu_umc_funcs {
void (*init_registers)(struct amdgpu_device *adev);
};
@@ -59,6 +63,7 @@ struct amdgpu_umc {
struct ras_common_if *ras_if;
const struct amdgpu_umc_funcs *funcs;
+ const struct amdgpu_umc_ras_funcs *ras_funcs;
};
int amdgpu_umc_ras_late_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index e2ed4689118a..c6dbc0801604 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -259,7 +259,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
if ((adev->asic_type == CHIP_POLARIS10 ||
adev->asic_type == CHIP_POLARIS11) &&
(adev->uvd.fw_version < FW_1_66_16))
- DRM_ERROR("POLARIS10/11 UVD firmware version %hu.%hu is too old.\n",
+ DRM_ERROR("POLARIS10/11 UVD firmware version %u.%u is too old.\n",
version_major, version_minor);
} else {
unsigned int enc_major, enc_minor, dec_minor;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 99b82f3c2617..201645963ba5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -47,6 +47,7 @@
#define FIRMWARE_NAVY_FLOUNDER "amdgpu/navy_flounder_vcn.bin"
#define FIRMWARE_VANGOGH "amdgpu/vangogh_vcn.bin"
#define FIRMWARE_DIMGREY_CAVEFISH "amdgpu/dimgrey_cavefish_vcn.bin"
+#define FIRMWARE_ALDEBARAN "amdgpu/aldebaran_vcn.bin"
MODULE_FIRMWARE(FIRMWARE_RAVEN);
MODULE_FIRMWARE(FIRMWARE_PICASSO);
@@ -54,6 +55,7 @@ MODULE_FIRMWARE(FIRMWARE_RAVEN2);
MODULE_FIRMWARE(FIRMWARE_ARCTURUS);
MODULE_FIRMWARE(FIRMWARE_RENOIR);
MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE);
+MODULE_FIRMWARE(FIRMWARE_ALDEBARAN);
MODULE_FIRMWARE(FIRMWARE_NAVI10);
MODULE_FIRMWARE(FIRMWARE_NAVI14);
MODULE_FIRMWARE(FIRMWARE_NAVI12);
@@ -104,6 +106,12 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
adev->vcn.indirect_sram = true;
break;
+ case CHIP_ALDEBARAN:
+ fw_name = FIRMWARE_ALDEBARAN;
+ if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
+ (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
+ adev->vcn.indirect_sram = true;
+ break;
case CHIP_NAVI10:
fw_name = FIRMWARE_NAVI10;
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 13aa417f6be7..bc76cab67697 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -155,6 +155,7 @@
} \
} while (0)
+#define AMDGPU_VCN_FW_SHARED_FLAG_0_RB (1 << 6)
#define AMDGPU_VCN_MULTI_QUEUE_FLAG (1 << 8)
#define AMDGPU_VCN_SW_RING_FLAG (1 << 9)
@@ -211,6 +212,7 @@ struct amdgpu_vcn_inst {
void *saved_bo;
struct amdgpu_ring ring_dec;
struct amdgpu_ring ring_enc[AMDGPU_VCN_MAX_ENC_RINGS];
+ atomic_t sched_score;
struct amdgpu_irq_src irq;
struct amdgpu_vcn_reg external;
struct amdgpu_bo *dpg_sram_bo;
@@ -243,6 +245,12 @@ struct amdgpu_vcn {
int inst_idx, struct dpg_pause_state *new_state);
};
+struct amdgpu_fw_shared_rb_ptrs_struct {
+ /* to WA DPG R/W ptr issues.*/
+ uint32_t rptr;
+ uint32_t wptr;
+};
+
struct amdgpu_fw_shared_multi_queue {
uint8_t decode_queue_mode;
uint8_t encode_generalpurpose_queue_mode;
@@ -258,10 +266,12 @@ struct amdgpu_fw_shared_sw_ring {
struct amdgpu_fw_shared {
uint32_t present_flag_0;
- uint8_t pad[53];
+ uint8_t pad[44];
+ struct amdgpu_fw_shared_rb_ptrs_struct rb;
+ uint8_t pad1[1];
struct amdgpu_fw_shared_multi_queue multi_queue;
struct amdgpu_fw_shared_sw_ring sw_ring;
-} __attribute__((__packed__));
+};
struct amdgpu_vcn_decode_buffer {
uint32_t valid_buf_flag;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 5da04d45b637..0c9c5255aa42 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -466,6 +466,8 @@ static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev)
((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->vf2pf_update_interval_ms;
adev->virt.gim_feature =
((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->feature_flags.all;
+ adev->virt.reg_access =
+ ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->reg_access_flags.all;
break;
default:
@@ -617,6 +619,14 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
if (adev->virt.ras_init_done)
amdgpu_virt_add_bad_page(adev, bp_block_offset, bp_block_size);
}
+ } else if (adev->bios != NULL) {
+ adev->virt.fw_reserve.p_pf2vf =
+ (struct amd_sriov_msg_pf2vf_info_header *)
+ (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
+
+ amdgpu_virt_read_pf2vf_data(adev);
+
+ return;
}
if (adev->virt.vf2pf_update_interval_ms != 0) {
@@ -640,6 +650,7 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev)
case CHIP_NAVI12:
case CHIP_SIENNA_CICHLID:
case CHIP_ARCTURUS:
+ case CHIP_ALDEBARAN:
reg = RREG32(mmRCC_IOV_FUNC_IDENTIFIER);
break;
default: /* other chip doesn't support SRIOV */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 8dd624c20f89..383d4bdc3fb5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -104,6 +104,17 @@ enum AMDGIM_FEATURE_FLAG {
AMDGIM_FEATURE_GIM_MM_BW_MGR = 0x8,
/* PP ONE VF MODE in GIM */
AMDGIM_FEATURE_PP_ONE_VF = (1 << 4),
+ /* Indirect Reg Access enabled */
+ AMDGIM_FEATURE_INDIRECT_REG_ACCESS = (1 << 5),
+};
+
+enum AMDGIM_REG_ACCESS_FLAG {
+ /* Use PSP to program IH_RB_CNTL */
+ AMDGIM_FEATURE_IH_REG_PSP_EN = (1 << 0),
+ /* Use RLC to program MMHUB regs */
+ AMDGIM_FEATURE_MMHUB_REG_RLC_EN = (1 << 1),
+ /* Use RLC to program GC regs */
+ AMDGIM_FEATURE_GC_REG_RLC_EN = (1 << 2),
};
struct amdgim_pf2vf_info_v1 {
@@ -217,6 +228,7 @@ struct amdgpu_virt {
bool tdr_debug;
struct amdgpu_virt_ras_err_handler_data *virt_eh_data;
bool ras_init_done;
+ uint32_t reg_access;
/* vf2pf message */
struct delayed_work vf2pf_work;
@@ -238,6 +250,22 @@ struct amdgpu_virt {
#define amdgpu_sriov_fullaccess(adev) \
(amdgpu_sriov_vf((adev)) && !amdgpu_sriov_runtime((adev)))
+#define amdgpu_sriov_reg_indirect_en(adev) \
+(amdgpu_sriov_vf((adev)) && \
+ ((adev)->virt.gim_feature & (AMDGIM_FEATURE_INDIRECT_REG_ACCESS)))
+
+#define amdgpu_sriov_reg_indirect_ih(adev) \
+(amdgpu_sriov_vf((adev)) && \
+ ((adev)->virt.reg_access & (AMDGIM_FEATURE_IH_REG_PSP_EN)))
+
+#define amdgpu_sriov_reg_indirect_mmhub(adev) \
+(amdgpu_sriov_vf((adev)) && \
+ ((adev)->virt.reg_access & (AMDGIM_FEATURE_MMHUB_REG_RLC_EN)))
+
+#define amdgpu_sriov_reg_indirect_gc(adev) \
+(amdgpu_sriov_vf((adev)) && \
+ ((adev)->virt.reg_access & (AMDGIM_FEATURE_GC_REG_RLC_EN)))
+
#define amdgpu_passthrough(adev) \
((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 326dae31b675..0ffdf847cad0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -92,13 +92,13 @@ struct amdgpu_prt_cb {
static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm)
{
mutex_lock(&vm->eviction_lock);
- vm->saved_flags = memalloc_nofs_save();
+ vm->saved_flags = memalloc_noreclaim_save();
}
static inline int amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm)
{
if (mutex_trylock(&vm->eviction_lock)) {
- vm->saved_flags = memalloc_nofs_save();
+ vm->saved_flags = memalloc_noreclaim_save();
return 1;
}
return 0;
@@ -106,7 +106,7 @@ static inline int amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm)
static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm)
{
- memalloc_nofs_restore(vm->saved_flags);
+ memalloc_noreclaim_restore(vm->saved_flags);
mutex_unlock(&vm->eviction_lock);
}
@@ -638,15 +638,15 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
struct amdgpu_vm_bo_base *bo_base;
if (vm->bulk_moveable) {
- spin_lock(&ttm_bo_glob.lru_lock);
+ spin_lock(&adev->mman.bdev.lru_lock);
ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
- spin_unlock(&ttm_bo_glob.lru_lock);
+ spin_unlock(&adev->mman.bdev.lru_lock);
return;
}
memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
- spin_lock(&ttm_bo_glob.lru_lock);
+ spin_lock(&adev->mman.bdev.lru_lock);
list_for_each_entry(bo_base, &vm->idle, vm_status) {
struct amdgpu_bo *bo = bo_base->bo;
@@ -660,7 +660,7 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
&bo->shadow->tbo.mem,
&vm->lru_bulk_move);
}
- spin_unlock(&ttm_bo_glob.lru_lock);
+ spin_unlock(&adev->mman.bdev.lru_lock);
vm->bulk_moveable = true;
}
@@ -869,6 +869,7 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
bp->domain = amdgpu_bo_get_preferred_pin_domain(adev, bp->domain);
bp->flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+ bp->bo_ptr_size = sizeof(struct amdgpu_bo);
if (vm->use_cpu_for_update)
bp->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
else if (!vm->root.base.bo || vm->root.base.bo->shadow)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index c89b66bb70e2..592a2dd16493 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -52,7 +52,7 @@ static ssize_t amdgpu_mem_info_vram_total_show(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.real_vram_size);
+ return sysfs_emit(buf, "%llu\n", adev->gmc.real_vram_size);
}
/**
@@ -69,7 +69,7 @@ static ssize_t amdgpu_mem_info_vis_vram_total_show(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.visible_vram_size);
+ return sysfs_emit(buf, "%llu\n", adev->gmc.visible_vram_size);
}
/**
@@ -87,8 +87,7 @@ static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev,
struct amdgpu_device *adev = drm_to_adev(ddev);
struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
- return snprintf(buf, PAGE_SIZE, "%llu\n",
- amdgpu_vram_mgr_usage(man));
+ return sysfs_emit(buf, "%llu\n", amdgpu_vram_mgr_usage(man));
}
/**
@@ -106,8 +105,7 @@ static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev,
struct amdgpu_device *adev = drm_to_adev(ddev);
struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
- return snprintf(buf, PAGE_SIZE, "%llu\n",
- amdgpu_vram_mgr_vis_usage(man));
+ return sysfs_emit(buf, "%llu\n", amdgpu_vram_mgr_vis_usage(man));
}
static ssize_t amdgpu_mem_info_vram_vendor(struct device *dev,
@@ -119,27 +117,27 @@ static ssize_t amdgpu_mem_info_vram_vendor(struct device *dev,
switch (adev->gmc.vram_vendor) {
case SAMSUNG:
- return snprintf(buf, PAGE_SIZE, "samsung\n");
+ return sysfs_emit(buf, "samsung\n");
case INFINEON:
- return snprintf(buf, PAGE_SIZE, "infineon\n");
+ return sysfs_emit(buf, "infineon\n");
case ELPIDA:
- return snprintf(buf, PAGE_SIZE, "elpida\n");
+ return sysfs_emit(buf, "elpida\n");
case ETRON:
- return snprintf(buf, PAGE_SIZE, "etron\n");
+ return sysfs_emit(buf, "etron\n");
case NANYA:
- return snprintf(buf, PAGE_SIZE, "nanya\n");
+ return sysfs_emit(buf, "nanya\n");
case HYNIX:
- return snprintf(buf, PAGE_SIZE, "hynix\n");
+ return sysfs_emit(buf, "hynix\n");
case MOSEL:
- return snprintf(buf, PAGE_SIZE, "mosel\n");
+ return sysfs_emit(buf, "mosel\n");
case WINBOND:
- return snprintf(buf, PAGE_SIZE, "winbond\n");
+ return sysfs_emit(buf, "winbond\n");
case ESMT:
- return snprintf(buf, PAGE_SIZE, "esmt\n");
+ return sysfs_emit(buf, "esmt\n");
case MICRON:
- return snprintf(buf, PAGE_SIZE, "micron\n");
+ return sysfs_emit(buf, "micron\n");
default:
- return snprintf(buf, PAGE_SIZE, "unknown\n");
+ return sysfs_emit(buf, "unknown\n");
}
}
@@ -639,15 +637,13 @@ error_free:
/**
* amdgpu_vram_mgr_free_sgt - allocate and fill a sg table
*
- * @adev: amdgpu device pointer
* @dev: device pointer
* @dir: data direction of resource to unmap
* @sgt: sg table to free
*
* Free a previously allocate sg table.
*/
-void amdgpu_vram_mgr_free_sgt(struct amdgpu_device *adev,
- struct device *dev,
+void amdgpu_vram_mgr_free_sgt(struct device *dev,
enum dma_data_direction dir,
struct sg_table *sgt)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 659b385b27b5..8567d5d77346 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -24,7 +24,6 @@
#include <linux/list.h>
#include "amdgpu.h"
#include "amdgpu_xgmi.h"
-#include "amdgpu_smu.h"
#include "amdgpu_ras.h"
#include "soc15.h"
#include "df/df_3_6_offset.h"
@@ -217,7 +216,7 @@ static ssize_t amdgpu_xgmi_show_device_id(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.xgmi.node_id);
+ return sysfs_emit(buf, "%llu\n", adev->gmc.xgmi.node_id);
}
@@ -246,7 +245,7 @@ static ssize_t amdgpu_xgmi_show_error(struct device *dev,
adev->df.funcs->set_fica(adev, ficaa_pie_status_in, 0, 0);
- return snprintf(buf, PAGE_SIZE, "%u\n", error_count);
+ return sysfs_emit(buf, "%u\n", error_count);
}
@@ -468,15 +467,22 @@ int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_dev
}
+/*
+ * NOTE psp_xgmi_node_info.num_hops layout is as follows:
+ * num_hops[7:6] = link type (0 = xGMI2, 1 = xGMI3, 2/3 = reserved)
+ * num_hops[5:3] = reserved
+ * num_hops[2:0] = number of hops
+ */
int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
struct amdgpu_device *peer_adev)
{
struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
+ uint8_t num_hops_mask = 0x7;
int i;
for (i = 0 ; i < top->num_nodes; ++i)
if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id)
- return top->nodes[i].num_hops;
+ return top->nodes[i].num_hops & num_hops_mask;
return -EINVAL;
}
@@ -492,7 +498,8 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
if (!adev->gmc.xgmi.supported)
return 0;
- if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
+ if (!adev->gmc.xgmi.pending_reset &&
+ amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
ret = psp_xgmi_initialize(&adev->psp);
if (ret) {
dev_err(adev->dev,
@@ -538,7 +545,8 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
task_barrier_add_task(&hive->tb);
- if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
+ if (!adev->gmc.xgmi.pending_reset &&
+ amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
/* update node list for other device in the hive */
if (tmp_adev != adev) {
@@ -567,7 +575,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
}
}
- if (!ret)
+ if (!ret && !adev->gmc.xgmi.pending_reset)
ret = amdgpu_xgmi_sysfs_add_dev_info(adev, hive);
exit_unlock:
@@ -620,7 +628,7 @@ int amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
return psp_xgmi_terminate(&adev->psp);
}
-int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
+static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
{
int r;
struct ras_ih_if ih_info = {
@@ -634,7 +642,7 @@ int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
adev->gmc.xgmi.num_physical_nodes == 0)
return 0;
- amdgpu_xgmi_reset_ras_error_count(adev);
+ adev->gmc.xgmi.ras_funcs->reset_ras_error_count(adev);
if (!adev->gmc.xgmi.ras_if) {
adev->gmc.xgmi.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
@@ -656,7 +664,7 @@ int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
return r;
}
-void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev)
+static void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev)
{
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL) &&
adev->gmc.xgmi.ras_if) {
@@ -683,7 +691,7 @@ static void pcs_clear_status(struct amdgpu_device *adev, uint32_t pcs_status_reg
WREG32_PCIE(pcs_status_reg, 0);
}
-void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev)
+static void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev)
{
uint32_t i;
@@ -743,8 +751,8 @@ static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev,
return 0;
}
-int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
- void *ras_error_status)
+static int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status)
{
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
int i;
@@ -793,10 +801,17 @@ int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
break;
}
- amdgpu_xgmi_reset_ras_error_count(adev);
+ adev->gmc.xgmi.ras_funcs->reset_ras_error_count(adev);
err_data->ue_count += ue_cnt;
err_data->ce_count += ce_cnt;
return 0;
}
+
+const struct amdgpu_xgmi_ras_funcs xgmi_ras_funcs = {
+ .ras_late_init = amdgpu_xgmi_ras_late_init,
+ .ras_fini = amdgpu_xgmi_ras_fini,
+ .query_ras_error_count = amdgpu_xgmi_query_ras_error_count,
+ .reset_ras_error_count = amdgpu_xgmi_reset_ras_error_count,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
index 148560d63554..12969c0830d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -50,6 +50,7 @@ struct amdgpu_pcs_ras_field {
uint32_t pcs_err_shift;
};
+extern const struct amdgpu_xgmi_ras_funcs xgmi_ras_funcs;
struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev);
void amdgpu_put_xgmi_hive(struct amdgpu_hive_info *hive);
int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev);
@@ -58,14 +59,8 @@ int amdgpu_xgmi_remove_device(struct amdgpu_device *adev);
int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate);
int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
struct amdgpu_device *peer_adev);
-int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev);
-void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev);
uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
uint64_t addr);
-int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
- void *ras_error_status);
-void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev);
-
static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
struct amdgpu_device *bo_adev)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
index 5355827ed0ae..1a8f6d4baab2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
@@ -90,11 +90,22 @@ union amd_sriov_msg_feature_flags {
uint32_t host_flr_vramlost : 1;
uint32_t mm_bw_management : 1;
uint32_t pp_one_vf_mode : 1;
- uint32_t reserved : 27;
+ uint32_t reg_indirect_acc : 1;
+ uint32_t reserved : 26;
} flags;
uint32_t all;
};
+union amd_sriov_reg_access_flags {
+ struct {
+ uint32_t vf_reg_access_ih : 1;
+ uint32_t vf_reg_access_mmhub : 1;
+ uint32_t vf_reg_access_gc : 1;
+ uint32_t reserved : 29;
+ } flags;
+ uint32_t all;
+};
+
union amd_sriov_msg_os_info {
struct {
uint32_t windows : 1;
@@ -149,8 +160,10 @@ struct amd_sriov_msg_pf2vf_info {
/* identification in ROCm SMI */
uint64_t uuid;
uint32_t fcn_idx;
+ /* flags which indicate the register access method VF should use */
+ union amd_sriov_reg_access_flags reg_access_flags;
/* reserved */
- uint32_t reserved[256-26];
+ uint32_t reserved[256-27];
};
struct amd_sriov_msg_vf2pf_info_header {
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c
index 7b1b18350bf9..2ac4988ea0ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c
@@ -74,10 +74,8 @@ int athub_v2_1_set_clockgating(struct amdgpu_device *adev,
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
case CHIP_DIMGREY_CAVEFISH:
- athub_v2_1_update_medium_grain_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
- athub_v2_1_update_medium_grain_light_sleep(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ athub_v2_1_update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE);
+ athub_v2_1_update_medium_grain_light_sleep(adev, state == AMD_CG_STATE_GATE);
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
index 515890f4f5a0..3dcb8b32f48b 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.c
+++ b/drivers/gpu/drm/amd/amdgpu/atom.c
@@ -114,11 +114,11 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
base++;
break;
case ATOM_IIO_READ:
- temp = ctx->card->ioreg_read(ctx->card, CU16(base + 1));
+ temp = ctx->card->reg_read(ctx->card, CU16(base + 1));
base += 3;
break;
case ATOM_IIO_WRITE:
- ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp);
+ ctx->card->reg_write(ctx->card, CU16(base + 1), temp);
base += 3;
break;
case ATOM_IIO_CLEAR:
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.h b/drivers/gpu/drm/amd/amdgpu/atom.h
index 4205bbe5d8d7..d279759cab47 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.h
+++ b/drivers/gpu/drm/amd/amdgpu/atom.h
@@ -116,8 +116,6 @@ struct card_info {
struct drm_device *dev;
void (* reg_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
uint32_t (* reg_read)(struct card_info *, uint32_t); /* filled by driver */
- void (* ioreg_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
- uint32_t (* ioreg_read)(struct card_info *, uint32_t); /* filled by driver */
void (* mc_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
uint32_t (* mc_read)(struct card_info *, uint32_t); /* filled by driver */
void (* pll_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 4d6832cc7fb0..c0fcc41ee574 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -26,6 +26,8 @@
#include <linux/module.h>
#include <linux/pci.h>
+#include <drm/amdgpu_drm.h>
+
#include "amdgpu.h"
#include "amdgpu_atombios.h"
#include "amdgpu_ih.h"
@@ -70,6 +72,80 @@
#include "amdgpu_amdkfd.h"
#include "dce_virtual.h"
+static const struct amdgpu_video_codec_info cik_video_codecs_encode_array[] =
+{
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
+ .max_width = 2048,
+ .max_height = 1152,
+ .max_pixels_per_frame = 2048 * 1152,
+ .max_level = 0,
+ },
+};
+
+static const struct amdgpu_video_codecs cik_video_codecs_encode =
+{
+ .codec_count = ARRAY_SIZE(cik_video_codecs_encode_array),
+ .codec_array = cik_video_codecs_encode_array,
+};
+
+static const struct amdgpu_video_codec_info cik_video_codecs_decode_array[] =
+{
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
+ .max_width = 2048,
+ .max_height = 1152,
+ .max_pixels_per_frame = 2048 * 1152,
+ .max_level = 3,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
+ .max_width = 2048,
+ .max_height = 1152,
+ .max_pixels_per_frame = 2048 * 1152,
+ .max_level = 5,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
+ .max_width = 2048,
+ .max_height = 1152,
+ .max_pixels_per_frame = 2048 * 1152,
+ .max_level = 41,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
+ .max_width = 2048,
+ .max_height = 1152,
+ .max_pixels_per_frame = 2048 * 1152,
+ .max_level = 4,
+ },
+};
+
+static const struct amdgpu_video_codecs cik_video_codecs_decode =
+{
+ .codec_count = ARRAY_SIZE(cik_video_codecs_decode_array),
+ .codec_array = cik_video_codecs_decode_array,
+};
+
+static int cik_query_video_codecs(struct amdgpu_device *adev, bool encode,
+ const struct amdgpu_video_codecs **codecs)
+{
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ case CHIP_KAVERI:
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ if (encode)
+ *codecs = &cik_video_codecs_encode;
+ else
+ *codecs = &cik_video_codecs_decode;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
/*
* Indirect registers accessor
*/
@@ -1933,6 +2009,7 @@ static const struct amdgpu_asic_funcs cik_asic_funcs =
.get_pcie_replay_count = &cik_get_pcie_replay_count,
.supports_baco = &cik_asic_supports_baco,
.pre_asic_init = &cik_pre_asic_init,
+ .query_video_codecs = &cik_query_video_codecs,
};
static int cik_common_early_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 43b978144b79..c4bb8eed246d 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -984,10 +984,9 @@ static int cik_sdma_sw_init(void *handle)
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024,
&adev->sdma.trap_irq,
- (i == 0) ?
- AMDGPU_SDMA_IRQ_INSTANCE0 :
+ (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 :
AMDGPU_SDMA_IRQ_INSTANCE1,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 19abb740a169..d1570a462a51 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -1862,7 +1862,6 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
u32 tmp, viewport_w, viewport_h;
int r;
bool bypass_lut = false;
- struct drm_format_name_buf format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -1981,8 +1980,8 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
#endif
break;
default:
- DRM_ERROR("Unsupported screen format %s\n",
- drm_get_format_name(target_fb->format->format, &format_name));
+ DRM_ERROR("Unsupported screen format %p4cc\n",
+ &target_fb->format->format);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 320ec35bfd37..18a7b3bd633b 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -1904,7 +1904,6 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
u32 tmp, viewport_w, viewport_h;
int r;
bool bypass_lut = false;
- struct drm_format_name_buf format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -2023,8 +2022,8 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
#endif
break;
default:
- DRM_ERROR("Unsupported screen format %s\n",
- drm_get_format_name(target_fb->format->format, &format_name));
+ DRM_ERROR("Unsupported screen format %p4cc\n",
+ &target_fb->format->format);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 13322000ebd6..dbcb09cf83e6 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -1820,7 +1820,6 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
u32 viewport_w, viewport_h;
int r;
bool bypass_lut = false;
- struct drm_format_name_buf format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -1929,8 +1928,8 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
#endif
break;
default:
- DRM_ERROR("Unsupported screen format %s\n",
- drm_get_format_name(target_fb->format->format, &format_name));
+ DRM_ERROR("Unsupported screen format %p4cc\n",
+ &target_fb->format->format);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 04ebf02e5b8c..b200b9e722d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -1791,7 +1791,6 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
u32 viewport_w, viewport_h;
int r;
bool bypass_lut = false;
- struct drm_format_name_buf format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -1902,8 +1901,8 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
#endif
break;
default:
- DRM_ERROR("Unsupported screen format %s\n",
- drm_get_format_name(target_fb->format->format, &format_name));
+ DRM_ERROR("Unsupported screen format %p4cc\n",
+ &target_fb->format->format);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
index 6b4b30a8dce5..0d8459d63bac 100644
--- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
+++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
@@ -205,7 +205,7 @@ static ssize_t df_v3_6_get_df_cntr_avail(struct device *dev,
count++;
}
- return snprintf(buf, PAGE_SIZE, "%i\n", count);
+ return sysfs_emit(buf, "%i\n", count);
}
/* device attr for available perfmon counters */
@@ -568,6 +568,8 @@ static int df_v3_6_pmc_stop(struct amdgpu_device *adev, uint64_t config,
if (ret)
return ret;
+ df_v3_6_perfmon_wreg(adev, lo_base_addr, lo_val,
+ hi_base_addr, hi_val);
if (is_remove) {
df_v3_6_reset_perfmon_cntr(adev, config, counter_idx);
diff --git a/drivers/gpu/drm/amd/amdgpu/dimgrey_cavefish_reg_init.c b/drivers/gpu/drm/amd/amdgpu/dimgrey_cavefish_reg_init.c
index e9f177e9e3cf..e9f177e9e3cf 100755..100644
--- a/drivers/gpu/drm/amd/amdgpu/dimgrey_cavefish_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/dimgrey_cavefish_reg_init.c
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 63691deb7df3..2408ed4c7d84 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -29,7 +29,6 @@
#include "amdgpu.h"
#include "amdgpu_gfx.h"
#include "amdgpu_psp.h"
-#include "amdgpu_smu.h"
#include "nv.h"
#include "nvd.h"
@@ -174,6 +173,11 @@
#define mmGC_THROTTLE_CTRL_Sienna_Cichlid 0x2030
#define mmGC_THROTTLE_CTRL_Sienna_Cichlid_BASE_IDX 0
+#define GFX_RLCG_GC_WRITE_OLD (0x8 << 28)
+#define GFX_RLCG_GC_WRITE (0x0 << 28)
+#define GFX_RLCG_GC_READ (0x1 << 28)
+#define GFX_RLCG_MMHUB_WRITE (0x2 << 28)
+
MODULE_FIRMWARE("amdgpu/navi10_ce.bin");
MODULE_FIRMWARE("amdgpu/navi10_pfp.bin");
MODULE_FIRMWARE("amdgpu/navi10_me.bin");
@@ -1419,38 +1423,127 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00800000)
};
-static void gfx_v10_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 v)
+static bool gfx_v10_is_rlcg_rw(struct amdgpu_device *adev, u32 offset, uint32_t *flag, bool write)
+{
+ /* always programed by rlcg, only for gc */
+ if (offset == SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI) ||
+ offset == SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_LO) ||
+ offset == SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_LENGTH) ||
+ offset == SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL) ||
+ offset == SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX) ||
+ offset == SOC15_REG_OFFSET(GC, 0, mmCP_ME_CNTL)) {
+ if (!amdgpu_sriov_reg_indirect_gc(adev))
+ *flag = GFX_RLCG_GC_WRITE_OLD;
+ else
+ *flag = write ? GFX_RLCG_GC_WRITE : GFX_RLCG_GC_READ;
+
+ return true;
+ }
+
+ /* currently support gc read/write, mmhub write */
+ if (offset >= SOC15_REG_OFFSET(GC, 0, mmSDMA0_DEC_START) &&
+ offset <= SOC15_REG_OFFSET(GC, 0, mmRLC_GTS_OFFSET_MSB)) {
+ if (amdgpu_sriov_reg_indirect_gc(adev))
+ *flag = write ? GFX_RLCG_GC_WRITE : GFX_RLCG_GC_READ;
+ else
+ return false;
+ } else {
+ if (amdgpu_sriov_reg_indirect_mmhub(adev))
+ *flag = GFX_RLCG_MMHUB_WRITE;
+ else
+ return false;
+ }
+
+ return true;
+}
+
+static u32 gfx_v10_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, uint32_t flag)
{
static void *scratch_reg0;
static void *scratch_reg1;
+ static void *scratch_reg2;
+ static void *scratch_reg3;
static void *spare_int;
+ static uint32_t grbm_cntl;
+ static uint32_t grbm_idx;
uint32_t i = 0;
uint32_t retries = 50000;
+ u32 ret = 0;
+
+ scratch_reg0 = adev->rmmio +
+ (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0) * 4;
+ scratch_reg1 = adev->rmmio +
+ (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1) * 4;
+ scratch_reg2 = adev->rmmio +
+ (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG2) * 4;
+ scratch_reg3 = adev->rmmio +
+ (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3) * 4;
+ spare_int = adev->rmmio +
+ (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT) * 4;
+
+ grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL;
+ grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX;
+
+ if (offset == grbm_cntl || offset == grbm_idx) {
+ if (offset == grbm_cntl)
+ writel(v, scratch_reg2);
+ else if (offset == grbm_idx)
+ writel(v, scratch_reg3);
+
+ writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
+ } else {
+ writel(v, scratch_reg0);
+ writel(offset | flag, scratch_reg1);
+ writel(1, spare_int);
+ for (i = 0; i < retries; i++) {
+ u32 tmp;
+
+ tmp = readl(scratch_reg1);
+ if (!(tmp & flag))
+ break;
- scratch_reg0 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0)*4;
- scratch_reg1 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1)*4;
- spare_int = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT)*4;
+ udelay(10);
+ }
- if (amdgpu_sriov_runtime(adev)) {
- pr_err("shouldn't call rlcg write register during runtime\n");
- return;
+ if (i >= retries)
+ pr_err("timeout: rlcg program reg:0x%05x failed !\n", offset);
}
- writel(v, scratch_reg0);
- writel(offset | 0x80000000, scratch_reg1);
- writel(1, spare_int);
- for (i = 0; i < retries; i++) {
- u32 tmp;
+ ret = readl(scratch_reg0);
- tmp = readl(scratch_reg1);
- if (!(tmp & 0x80000000))
- break;
+ return ret;
+}
+
+static void gfx_v10_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 value, u32 flag)
+{
+ uint32_t rlcg_flag;
- udelay(10);
+ if (amdgpu_sriov_fullaccess(adev) &&
+ gfx_v10_is_rlcg_rw(adev, offset, &rlcg_flag, 1)) {
+ gfx_v10_rlcg_rw(adev, offset, value, rlcg_flag);
+
+ return;
}
+ if (flag & AMDGPU_REGS_NO_KIQ)
+ WREG32_NO_KIQ(offset, value);
+ else
+ WREG32(offset, value);
+}
- if (i >= retries)
- pr_err("timeout: rlcg program reg:0x%05x failed !\n", offset);
+static u32 gfx_v10_rlcg_rreg(struct amdgpu_device *adev, u32 offset, u32 flag)
+{
+ uint32_t rlcg_flag;
+
+ if (amdgpu_sriov_fullaccess(adev) &&
+ gfx_v10_is_rlcg_rw(adev, offset, &rlcg_flag, 0))
+ return gfx_v10_rlcg_rw(adev, offset, 0, rlcg_flag);
+
+ if (flag & AMDGPU_REGS_NO_KIQ)
+ return RREG32_NO_KIQ(offset);
+ else
+ return RREG32(offset);
+
+ return 0;
}
static const struct soc15_reg_golden golden_settings_gc_10_1_nv14[] =
@@ -4459,9 +4552,8 @@ static int gfx_v10_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id,
sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue);
irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe;
- r = amdgpu_ring_init(adev, ring, 1024,
- &adev->gfx.eop_irq, irq_type,
- AMDGPU_RING_PRIO_DEFAULT);
+ r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
return 0;
@@ -4495,8 +4587,8 @@ static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
/* type-2 packets are deprecated on MEC, use type-3 instead */
- r = amdgpu_ring_init(adev, ring, 1024,
- &adev->gfx.eop_irq, irq_type, hw_prio);
+ r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
+ hw_prio, NULL);
if (r)
return r;
@@ -7172,16 +7264,10 @@ static int gfx_v10_0_hw_init(void *handle)
* loaded firstly, so in direct type, it has to load smc ucode
* here before rlc.
*/
- if (adev->smu.ppt_funcs != NULL && !(adev->flags & AMD_IS_APU)) {
- r = smu_load_microcode(&adev->smu);
+ if (!(adev->flags & AMD_IS_APU)) {
+ r = amdgpu_pm_load_smu_firmware(adev, NULL);
if (r)
return r;
-
- r = smu_check_fw_status(&adev->smu);
- if (r) {
- pr_err("SMC firmware status is not correct\n");
- return r;
- }
}
gfx_v10_0_disable_gpa_mode(adev);
}
@@ -7892,6 +7978,7 @@ static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs_sriov = {
.start = gfx_v10_0_rlc_start,
.update_spm_vmid = gfx_v10_0_update_spm_vmid,
.rlcg_wreg = gfx_v10_rlcg_wreg,
+ .rlcg_rreg = gfx_v10_rlcg_rreg,
.is_rlcg_access_range = gfx_v10_0_is_rlcg_access_range,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index ca74638dec9b..3a8d52a54873 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -3114,7 +3114,7 @@ static int gfx_v6_0_sw_init(void *handle)
r = amdgpu_ring_init(adev, ring, 1024,
&adev->gfx.eop_irq,
AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
@@ -3137,7 +3137,7 @@ static int gfx_v6_0_sw_init(void *handle)
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
r = amdgpu_ring_init(adev, ring, 1024,
&adev->gfx.eop_irq, irq_type,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index a368724c3dfc..c35fdd2ef2d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -1877,7 +1877,7 @@ static void gfx_v7_0_init_compute_vmid(struct amdgpu_device *adev)
mutex_unlock(&adev->srbm_mutex);
/* Initialize all compute VMIDs to have no GDS, GWS, or OA
- acccess. These should be enabled by FW for target VMIDs. */
+ access. These should be enabled by FW for target VMIDs. */
for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
WREG32(amdgpu_gds_reg_offset[i].mem_base, 0);
WREG32(amdgpu_gds_reg_offset[i].mem_size, 0);
@@ -2058,7 +2058,7 @@ static void gfx_v7_0_constants_init(struct amdgpu_device *adev)
* @adev: amdgpu_device pointer
*
* Set up the number and offset of the CP scratch registers.
- * NOTE: use of CP scratch registers is a legacy inferface and
+ * NOTE: use of CP scratch registers is a legacy interface and
* is not used by default on newer asics (r6xx+). On newer asics,
* memory buffers are used for fences rather than scratch regs.
*/
@@ -2172,7 +2172,7 @@ static void gfx_v7_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
* @seq: sequence number
* @flags: fence related flags
*
- * Emits a fence sequnce number on the gfx ring and flushes
+ * Emits a fence sequence number on the gfx ring and flushes
* GPU caches.
*/
static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
@@ -2215,7 +2215,7 @@ static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
* @seq: sequence number
* @flags: fence related flags
*
- * Emits a fence sequnce number on the compute ring and flushes
+ * Emits a fence sequence number on the compute ring and flushes
* GPU caches.
*/
static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
@@ -2245,14 +2245,14 @@ static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
* gfx_v7_0_ring_emit_ib - emit an IB (Indirect Buffer) on the ring
*
* @ring: amdgpu_ring structure holding ring information
- * @job: job to retrive vmid from
+ * @job: job to retrieve vmid from
* @ib: amdgpu indirect buffer object
* @flags: options (AMDGPU_HAVE_CTX_SWITCH)
*
* Emits an DE (drawing engine) or CE (constant engine) IB
* on the gfx ring. IBs are usually generated by userspace
* acceleration drivers and submitted to the kernel for
- * sheduling on the ring. This function schedules the IB
+ * scheduling on the ring. This function schedules the IB
* on the gfx ring for execution by the GPU.
*/
static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
@@ -2402,7 +2402,7 @@ err1:
/*
* CP.
- * On CIK, gfx and compute now have independant command processors.
+ * On CIK, gfx and compute now have independent command processors.
*
* GFX
* Gfx consists of a single ring and can process both gfx jobs and
@@ -2630,7 +2630,7 @@ static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev)
ring->wptr = 0;
WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
- /* set the wb address wether it's enabled or not */
+ /* set the wb address whether it's enabled or not */
rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
@@ -2985,7 +2985,7 @@ static void gfx_v7_0_mqd_init(struct amdgpu_device *adev,
mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
- /* set the wb address wether it's enabled or not */
+ /* set the wb address whether it's enabled or not */
wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
mqd->cp_hqd_pq_rptr_report_addr_hi =
@@ -3198,7 +3198,7 @@ static int gfx_v7_0_cp_resume(struct amdgpu_device *adev)
/**
* gfx_v7_0_ring_emit_vm_flush - cik vm flush using the CP
*
- * @ring: the ring to emmit the commands to
+ * @ring: the ring to emit the commands to
*
* Sync the command pipeline with the PFP. E.g. wait for everything
* to be completed.
@@ -3220,7 +3220,7 @@ static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, 4); /* poll interval */
if (usepfp) {
- /* synce CE with ME to prevent CE fetch CEIB before context switch done */
+ /* sync CE with ME to prevent CE fetch CEIB before context switch done */
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
@@ -4438,7 +4438,7 @@ static int gfx_v7_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
/* type-2 packets are deprecated on MEC, use type-3 instead */
r = amdgpu_ring_init(adev, ring, 1024,
&adev->gfx.eop_irq, irq_type,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
@@ -4512,7 +4512,7 @@ static int gfx_v7_0_sw_init(void *handle)
r = amdgpu_ring_init(adev, ring, 1024,
&adev->gfx.eop_irq,
AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 84d2eaa38101..c26e06059466 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1927,8 +1927,8 @@ static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_RING_PRIO_DEFAULT;
/* type-2 packets are deprecated on MEC, use type-3 instead */
- r = amdgpu_ring_init(adev, ring, 1024,
- &adev->gfx.eop_irq, irq_type, hw_prio);
+ r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
+ hw_prio, NULL);
if (r)
return r;
@@ -2033,7 +2033,7 @@ static int gfx_v8_0_sw_init(void *handle)
r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
@@ -6718,7 +6718,8 @@ static int gfx_v8_0_cp_ecc_error_irq(struct amdgpu_device *adev,
return 0;
}
-static void gfx_v8_0_parse_sq_irq(struct amdgpu_device *adev, unsigned ih_data)
+static void gfx_v8_0_parse_sq_irq(struct amdgpu_device *adev, unsigned ih_data,
+ bool from_wq)
{
u32 enc, se_id, sh_id, cu_id;
char type[20];
@@ -6756,7 +6757,7 @@ static void gfx_v8_0_parse_sq_irq(struct amdgpu_device *adev, unsigned ih_data)
* or from BH in which case we can access SQ_EDC_INFO
* instance
*/
- if (in_task()) {
+ if (from_wq) {
mutex_lock(&adev->grbm_idx_mutex);
gfx_v8_0_select_se_sh(adev, se_id, sh_id, cu_id);
@@ -6794,7 +6795,7 @@ static void gfx_v8_0_sq_irq_work_func(struct work_struct *work)
struct amdgpu_device *adev = container_of(work, struct amdgpu_device, gfx.sq_work.work);
struct sq_work *sq_work = container_of(work, struct sq_work, work);
- gfx_v8_0_parse_sq_irq(adev, sq_work->ih_data);
+ gfx_v8_0_parse_sq_irq(adev, sq_work->ih_data, true);
}
static int gfx_v8_0_sq_irq(struct amdgpu_device *adev,
@@ -6809,7 +6810,7 @@ static int gfx_v8_0_sq_irq(struct amdgpu_device *adev,
* just print whatever info is possible directly from the ISR.
*/
if (work_pending(&adev->gfx.sq_work.work)) {
- gfx_v8_0_parse_sq_irq(adev, ih_data);
+ gfx_v8_0_parse_sq_irq(adev, ih_data, false);
} else {
adev->gfx.sq_work.ih_data = ih_data;
schedule_work(&adev->gfx.sq_work.work);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 65db88bb6cbc..06811a1f4625 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -49,6 +49,7 @@
#include "gfx_v9_4.h"
#include "gfx_v9_0.h"
+#include "gfx_v9_4_2.h"
#include "asic_reg/pwr/pwr_10_0_offset.h"
#include "asic_reg/pwr/pwr_10_0_sh_mask.h"
@@ -107,14 +108,12 @@ MODULE_FIRMWARE("amdgpu/raven2_rlc.bin");
MODULE_FIRMWARE("amdgpu/raven_kicker_rlc.bin");
MODULE_FIRMWARE("amdgpu/arcturus_mec.bin");
-MODULE_FIRMWARE("amdgpu/arcturus_mec2.bin");
MODULE_FIRMWARE("amdgpu/arcturus_rlc.bin");
MODULE_FIRMWARE("amdgpu/renoir_ce.bin");
MODULE_FIRMWARE("amdgpu/renoir_pfp.bin");
MODULE_FIRMWARE("amdgpu/renoir_me.bin");
MODULE_FIRMWARE("amdgpu/renoir_mec.bin");
-MODULE_FIRMWARE("amdgpu/renoir_mec2.bin");
MODULE_FIRMWARE("amdgpu/renoir_rlc.bin");
MODULE_FIRMWARE("amdgpu/green_sardine_ce.bin");
@@ -124,6 +123,10 @@ MODULE_FIRMWARE("amdgpu/green_sardine_mec.bin");
MODULE_FIRMWARE("amdgpu/green_sardine_mec2.bin");
MODULE_FIRMWARE("amdgpu/green_sardine_rlc.bin");
+MODULE_FIRMWARE("amdgpu/aldebaran_mec.bin");
+MODULE_FIRMWARE("amdgpu/aldebaran_mec2.bin");
+MODULE_FIRMWARE("amdgpu/aldebaran_rlc.bin");
+
#define mmTCP_CHAN_STEER_0_ARCT 0x0b03
#define mmTCP_CHAN_STEER_0_ARCT_BASE_IDX 0
#define mmTCP_CHAN_STEER_1_ARCT 0x0b04
@@ -731,7 +734,7 @@ static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] =
mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0,
};
-static void gfx_v9_0_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 v)
+static void gfx_v9_0_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag)
{
static void *scratch_reg0;
static void *scratch_reg1;
@@ -784,6 +787,20 @@ static void gfx_v9_0_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 v)
}
+static void gfx_v9_0_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag)
+{
+ if (amdgpu_sriov_fullaccess(adev)) {
+ gfx_v9_0_rlcg_rw(adev, offset, v, flag);
+
+ return;
+ }
+
+ if (flag & AMDGPU_REGS_NO_KIQ)
+ WREG32_NO_KIQ(offset, v);
+ else
+ WREG32(offset, v);
+}
+
#define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
#define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041
#define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
@@ -981,11 +998,16 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_gc_9_1_rn,
ARRAY_SIZE(golden_settings_gc_9_1_rn));
return; /* for renoir, don't need common goldensetting */
+ case CHIP_ALDEBARAN:
+ gfx_v9_4_2_init_golden_registers(adev,
+ adev->smuio.funcs->get_die_id(adev));
+ break;
default:
break;
}
- if (adev->asic_type != CHIP_ARCTURUS)
+ if ((adev->asic_type != CHIP_ARCTURUS) &&
+ (adev->asic_type != CHIP_ALDEBARAN))
soc15_program_register_sequence(adev, golden_settings_gc_9_x_common,
(const u32)ARRAY_SIZE(golden_settings_gc_9_x_common));
}
@@ -1517,6 +1539,16 @@ out:
return err;
}
+static bool gfx_v9_0_load_mec2_fw_bin_support(struct amdgpu_device *adev)
+{
+ if (adev->asic_type == CHIP_ALDEBARAN ||
+ adev->asic_type == CHIP_ARCTURUS ||
+ adev->asic_type == CHIP_RENOIR)
+ return false;
+
+ return true;
+}
+
static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
const char *chip_name)
{
@@ -1538,21 +1570,23 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
- err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
- if (!err) {
- err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
- if (err)
- goto out;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)
- adev->gfx.mec2_fw->data;
- adev->gfx.mec2_fw_version =
- le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.mec2_feature_version =
- le32_to_cpu(cp_hdr->ucode_feature_version);
- } else {
- err = 0;
- adev->gfx.mec2_fw = NULL;
+ if (gfx_v9_0_load_mec2_fw_bin_support(adev)) {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
+ err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
+ if (!err) {
+ err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
+ if (err)
+ goto out;
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)
+ adev->gfx.mec2_fw->data;
+ adev->gfx.mec2_fw_version =
+ le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.mec2_feature_version =
+ le32_to_cpu(cp_hdr->ucode_feature_version);
+ } else {
+ err = 0;
+ adev->gfx.mec2_fw = NULL;
+ }
}
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
@@ -1581,8 +1615,7 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
/* TODO: Determine if MEC2 JT FW loading can be removed
for all GFX V9 asic and above */
- if (adev->asic_type != CHIP_ARCTURUS &&
- adev->asic_type != CHIP_RENOIR) {
+ if (gfx_v9_0_load_mec2_fw_bin_support(adev)) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
info->fw = adev->gfx.mec2_fw;
@@ -1642,6 +1675,9 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
else
chip_name = "green_sardine";
break;
+ case CHIP_ALDEBARAN:
+ chip_name = "aldebaran";
+ break;
default:
BUG();
}
@@ -1882,7 +1918,10 @@ static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
static int gfx_v9_0_cp_jump_table_num(struct amdgpu_device *adev)
{
- return 5;
+ if (gfx_v9_0_load_mec2_fw_bin_support(adev))
+ return 5;
+ else
+ return 4;
}
static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
@@ -2064,30 +2103,22 @@ static void gfx_v9_0_select_me_pipe_q(struct amdgpu_device *adev,
}
static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
- .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
- .select_se_sh = &gfx_v9_0_select_se_sh,
- .read_wave_data = &gfx_v9_0_read_wave_data,
- .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
- .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
- .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
+ .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
+ .select_se_sh = &gfx_v9_0_select_se_sh,
+ .read_wave_data = &gfx_v9_0_read_wave_data,
+ .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
+ .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
+ .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
+};
+
+static const struct amdgpu_gfx_ras_funcs gfx_v9_0_ras_funcs = {
+ .ras_late_init = amdgpu_gfx_ras_late_init,
+ .ras_fini = amdgpu_gfx_ras_fini,
.ras_error_inject = &gfx_v9_0_ras_error_inject,
.query_ras_error_count = &gfx_v9_0_query_ras_error_count,
.reset_ras_error_count = &gfx_v9_0_reset_ras_error_count,
};
-static const struct amdgpu_gfx_funcs gfx_v9_4_gfx_funcs = {
- .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
- .select_se_sh = &gfx_v9_0_select_se_sh,
- .read_wave_data = &gfx_v9_0_read_wave_data,
- .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
- .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
- .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
- .ras_error_inject = &gfx_v9_4_ras_error_inject,
- .query_ras_error_count = &gfx_v9_4_query_ras_error_count,
- .reset_ras_error_count = &gfx_v9_4_reset_ras_error_count,
- .query_ras_error_status = &gfx_v9_4_query_ras_error_status,
-};
-
static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
{
u32 gb_addr_config;
@@ -2114,6 +2145,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
DRM_INFO("fix gfx.config for vega12\n");
break;
case CHIP_VEGA20:
+ adev->gfx.ras_funcs = &gfx_v9_0_ras_funcs;
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -2139,7 +2171,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_ARCTURUS:
- adev->gfx.funcs = &gfx_v9_4_gfx_funcs;
+ adev->gfx.ras_funcs = &gfx_v9_4_ras_funcs;
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -2159,6 +2191,21 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
gb_addr_config &= ~0xf3e777ff;
gb_addr_config |= 0x22010042;
break;
+ case CHIP_ALDEBARAN:
+ adev->gfx.ras_funcs = &gfx_v9_4_2_ras_funcs;
+ adev->gfx.config.max_hw_contexts = 8;
+ adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+ adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+ adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+ adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
+ gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
+ gb_addr_config &= ~0xf3e777ff;
+ gb_addr_config |= 0x22014042;
+ /* check vbios table if gpu info is not available */
+ err = amdgpu_atomfirmware_get_gfx_info(adev);
+ if (err)
+ return err;
+ break;
default:
BUG();
break;
@@ -2231,8 +2278,8 @@ static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
/* type-2 packets are deprecated on MEC, use type-3 instead */
- return amdgpu_ring_init(adev, ring, 1024,
- &adev->gfx.eop_irq, irq_type, hw_prio);
+ return amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
+ hw_prio, NULL);
}
static int gfx_v9_0_sw_init(void *handle)
@@ -2249,6 +2296,7 @@ static int gfx_v9_0_sw_init(void *handle)
case CHIP_RAVEN:
case CHIP_ARCTURUS:
case CHIP_RENOIR:
+ case CHIP_ALDEBARAN:
adev->gfx.mec.num_mec = 2;
break;
default:
@@ -2320,10 +2368,9 @@ static int gfx_v9_0_sw_init(void *handle)
sprintf(ring->name, "gfx_%d", i);
ring->use_doorbell = true;
ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
- r = amdgpu_ring_init(adev, ring, 1024,
- &adev->gfx.eop_irq,
+ r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
@@ -2378,7 +2425,9 @@ static int gfx_v9_0_sw_fini(void *handle)
int i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- amdgpu_gfx_ras_fini(adev);
+ if (adev->gfx.ras_funcs &&
+ adev->gfx.ras_funcs->ras_fini)
+ adev->gfx.ras_funcs->ras_fini(adev);
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
@@ -2634,17 +2683,15 @@ static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
{
u32 tmp;
- /* don't toggle interrupts that are only applicable
- * to me0 pipe0 on AISCs that have me0 removed */
- if (!adev->gfx.num_gfx_rings)
- return;
+ /* These interrupts should be enabled to drive DS clock */
tmp= RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
- tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
+ if(adev->gfx.num_gfx_rings)
+ tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
}
@@ -3696,11 +3743,18 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev;
struct v9_mqd *mqd = ring->mqd_ptr;
int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
+ struct v9_mqd *tmp_mqd;
gfx_v9_0_kiq_setting(ring);
- if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
- /* reset MQD to a clean status */
+ /* GPU could be in bad state during probe, driver trigger the reset
+ * after load the SMU, in this case , the mqd is not be initialized.
+ * driver need to re-init the mqd.
+ * check mqd->cp_hqd_pq_control since this value should not be 0
+ */
+ tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx];
+ if (amdgpu_in_reset(adev) && tmp_mqd->cp_hqd_pq_control){
+ /* for GPU_RESET case , reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
@@ -3736,8 +3790,15 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev;
struct v9_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.compute_ring[0];
+ struct v9_mqd *tmp_mqd;
- if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
+ /* Same as above kiq init, driver need to re-init the mqd if mqd->cp_hqd_pq_control
+ * is not be initialized before
+ */
+ tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx];
+
+ if (!tmp_mqd->cp_hqd_pq_control ||
+ (!amdgpu_in_reset(adev) && !adev->in_suspend)) {
memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
@@ -3913,6 +3974,9 @@ static int gfx_v9_0_hw_init(void *handle)
if (r)
return r;
+ if (adev->asic_type == CHIP_ALDEBARAN)
+ gfx_v9_4_2_set_power_brake_sequence(adev);
+
return r;
}
@@ -3954,8 +4018,14 @@ static int gfx_v9_0_hw_fini(void *handle)
}
gfx_v9_0_cp_enable(adev, false);
- adev->gfx.rlc.funcs->stop(adev);
+ /* Skip suspend with A+A reset */
+ if (adev->gmc.xgmi.connected_to_cpu && amdgpu_in_reset(adev)) {
+ dev_dbg(adev->dev, "Device in reset. Skipping RLC halt\n");
+ return 0;
+ }
+
+ adev->gfx.rlc.funcs->stop(adev);
return 0;
}
@@ -4101,7 +4171,7 @@ static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev)
*
* also don't wait anymore for IRQ context
* */
- if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
+ if (r < 1 && (amdgpu_in_reset(adev)))
goto failed_kiq_read;
might_sleep();
@@ -4486,7 +4556,8 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
if (!ring->sched.ready)
return 0;
- if (adev->asic_type == CHIP_ARCTURUS) {
+ if (adev->asic_type == CHIP_ARCTURUS ||
+ adev->asic_type == CHIP_ALDEBARAN) {
vgpr_init_shader_ptr = vgpr_init_compute_shader_arcturus;
vgpr_init_shader_size = sizeof(vgpr_init_compute_shader_arcturus);
vgpr_init_regs_ptr = vgpr_init_regs_arcturus;
@@ -4636,7 +4707,8 @@ static int gfx_v9_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (adev->asic_type == CHIP_ARCTURUS)
+ if (adev->asic_type == CHIP_ARCTURUS ||
+ adev->asic_type == CHIP_ALDEBARAN)
adev->gfx.num_gfx_rings = 0;
else
adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;
@@ -4662,7 +4734,8 @@ static int gfx_v9_0_ecc_late_init(void *handle)
* to GDS in suspend/resume sequence on several cards. So just
* limit this operation in cold boot sequence.
*/
- if (!adev->in_suspend) {
+ if ((!adev->in_suspend) &&
+ (adev->gds.gds_size)) {
r = gfx_v9_0_do_edc_gds_workarounds(adev);
if (r)
return r;
@@ -4673,13 +4746,16 @@ static int gfx_v9_0_ecc_late_init(void *handle)
if (r)
return r;
- if (adev->gfx.funcs &&
- adev->gfx.funcs->reset_ras_error_count)
- adev->gfx.funcs->reset_ras_error_count(adev);
+ if (adev->gfx.ras_funcs &&
+ adev->gfx.ras_funcs->ras_late_init) {
+ r = adev->gfx.ras_funcs->ras_late_init(adev);
+ if (r)
+ return r;
+ }
- r = amdgpu_gfx_ras_late_init(adev);
- if (r)
- return r;
+ if (adev->gfx.ras_funcs &&
+ adev->gfx.ras_funcs->enable_watchdog_timer)
+ adev->gfx.ras_funcs->enable_watchdog_timer(adev);
return 0;
}
@@ -4858,7 +4934,7 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
{
uint32_t data, def;
- if (adev->asic_type == CHIP_ARCTURUS)
+ if (!adev->gfx.num_gfx_rings)
return;
amdgpu_gfx_rlc_enter_safe_mode(adev);
@@ -5105,6 +5181,7 @@ static int gfx_v9_0_set_clockgating_state(void *handle,
case CHIP_RAVEN:
case CHIP_ARCTURUS:
case CHIP_RENOIR:
+ case CHIP_ALDEBARAN:
gfx_v9_0_update_gfx_clock_gating(adev,
state == AMD_CG_STATE_GATE);
break;
@@ -6924,6 +7001,7 @@ static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
case CHIP_RAVEN:
case CHIP_ARCTURUS:
case CHIP_RENOIR:
+ case CHIP_ALDEBARAN:
adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs;
break;
default:
@@ -6944,6 +7022,12 @@ static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
case CHIP_ARCTURUS:
adev->gds.gds_size = 0x1000;
break;
+ case CHIP_ALDEBARAN:
+ /* aldebaran removed all the GDS internal memory,
+ * only support GWS opcode in kernel, like barrier
+ * semaphore.etc */
+ adev->gds.gds_size = 0;
+ break;
default:
adev->gds.gds_size = 0x10000;
break;
@@ -6966,6 +7050,10 @@ static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
case CHIP_ARCTURUS:
adev->gds.gds_compute_max_wave_id = 0xfff;
break;
+ case CHIP_ALDEBARAN:
+ /* deprecated for Aldebaran, no usage at all */
+ adev->gds.gds_compute_max_wave_id = 0;
+ break;
default:
/* this really depends on the chip */
adev->gds.gds_compute_max_wave_id = 0x7ff;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
index bc699d680ce8..830080ff90d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
@@ -863,8 +863,8 @@ static int gfx_v9_4_ras_error_count(struct amdgpu_device *adev,
return 0;
}
-int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev,
- void *ras_error_status)
+static int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status)
{
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
uint32_t sec_count = 0, ded_count = 0;
@@ -906,7 +906,7 @@ int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev,
return 0;
}
-void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev)
+static void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev)
{
int i, j, k;
@@ -971,7 +971,8 @@ void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev)
WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255);
}
-int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev, void *inject_if)
+static int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev,
+ void *inject_if)
{
struct ras_inject_if *info = (struct ras_inject_if *)inject_if;
int ret;
@@ -996,7 +997,7 @@ int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev, void *inject_if)
static const struct soc15_reg_entry gfx_v9_4_rdrsp_status_regs =
{ SOC15_REG_ENTRY(GC, 0, mmGCEA_ERR_STATUS), 0, 1, 32 };
-void gfx_v9_4_query_ras_error_status(struct amdgpu_device *adev)
+static void gfx_v9_4_query_ras_error_status(struct amdgpu_device *adev)
{
uint32_t i, j;
uint32_t reg_value;
@@ -1021,3 +1022,12 @@ void gfx_v9_4_query_ras_error_status(struct amdgpu_device *adev)
gfx_v9_4_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
}
+
+const struct amdgpu_gfx_ras_funcs gfx_v9_4_ras_funcs = {
+ .ras_late_init = amdgpu_gfx_ras_late_init,
+ .ras_fini = amdgpu_gfx_ras_fini,
+ .ras_error_inject = &gfx_v9_4_ras_error_inject,
+ .query_ras_error_count = &gfx_v9_4_query_ras_error_count,
+ .reset_ras_error_count = &gfx_v9_4_reset_ras_error_count,
+ .query_ras_error_status = &gfx_v9_4_query_ras_error_status,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h
index 875f18473a98..bdd16b568021 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h
@@ -24,16 +24,6 @@
#ifndef __GFX_V9_4_H__
#define __GFX_V9_4_H__
-void gfx_v9_4_clear_ras_edc_counter(struct amdgpu_device *adev);
-
-int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev,
- void *ras_error_status);
-
-int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev,
- void *inject_if);
-
-void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev);
-
-void gfx_v9_4_query_ras_error_status(struct amdgpu_device *adev);
+extern const struct amdgpu_gfx_ras_funcs gfx_v9_4_ras_funcs;
#endif /* __GFX_V9_4_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
new file mode 100644
index 000000000000..9ca76a3ac38c
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
@@ -0,0 +1,1297 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "soc15.h"
+
+#include "gc/gc_9_4_2_offset.h"
+#include "gc/gc_9_4_2_sh_mask.h"
+#include "gfx_v9_0.h"
+
+#include "gfx_v9_4_2.h"
+#include "amdgpu_ras.h"
+#include "amdgpu_gfx.h"
+
+enum gfx_v9_4_2_utc_type {
+ VML2_MEM,
+ VML2_WALKER_MEM,
+ UTCL2_MEM,
+ ATC_L2_CACHE_2M,
+ ATC_L2_CACHE_32K,
+ ATC_L2_CACHE_4K
+};
+
+struct gfx_v9_4_2_utc_block {
+ enum gfx_v9_4_2_utc_type type;
+ uint32_t num_banks;
+ uint32_t num_ways;
+ uint32_t num_mem_blocks;
+ struct soc15_reg idx_reg;
+ struct soc15_reg data_reg;
+ uint32_t sec_count_mask;
+ uint32_t sec_count_shift;
+ uint32_t ded_count_mask;
+ uint32_t ded_count_shift;
+ uint32_t clear;
+};
+
+static const struct soc15_reg_golden golden_settings_gc_9_4_2_alde_die_0[] = {
+ SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_0, 0x3fffffff, 0x141dc920),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_1, 0x3fffffff, 0x3b458b93),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_2, 0x3fffffff, 0x1a4f5583),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_3, 0x3fffffff, 0x317717f6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_4, 0x3fffffff, 0x107cc1e6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_5, 0x3ff, 0x351),
+};
+
+static const struct soc15_reg_golden golden_settings_gc_9_4_2_alde_die_1[] = {
+ SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_0, 0x3fffffff, 0x2591aa38),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_1, 0x3fffffff, 0xac9e88b),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_2, 0x3fffffff, 0x2bc3369b),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_3, 0x3fffffff, 0xfb74ee),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_4, 0x3fffffff, 0x21f0a2fe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_5, 0x3ff, 0x49),
+};
+
+static const struct soc15_reg_golden golden_settings_gc_9_4_2_alde[] = {
+ SOC15_REG_GOLDEN_VALUE(GC, 0, regGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, regTA_CNTL_AUX, 0xfffffeef, 0x10b0000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_UTCL1_CNTL1, 0xffffffff, 0x30800400),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, regTCI_CNTL_3, 0xff, 0x20),
+};
+
+static void gfx_v9_4_2_query_sq_timeout_status(struct amdgpu_device *adev);
+static void gfx_v9_4_2_reset_sq_timeout_status(struct amdgpu_device *adev);
+
+void gfx_v9_4_2_init_golden_registers(struct amdgpu_device *adev,
+ uint32_t die_id)
+{
+ soc15_program_register_sequence(adev,
+ golden_settings_gc_9_4_2_alde,
+ ARRAY_SIZE(golden_settings_gc_9_4_2_alde));
+
+ /* apply golden settings per die */
+ switch (die_id) {
+ case 0:
+ soc15_program_register_sequence(adev,
+ golden_settings_gc_9_4_2_alde_die_0,
+ ARRAY_SIZE(golden_settings_gc_9_4_2_alde_die_0));
+ break;
+ case 1:
+ soc15_program_register_sequence(adev,
+ golden_settings_gc_9_4_2_alde_die_1,
+ ARRAY_SIZE(golden_settings_gc_9_4_2_alde_die_1));
+ break;
+ default:
+ dev_warn(adev->dev,
+ "invalid die id %d, ignore channel fabricid remap settings\n",
+ die_id);
+ break;
+ }
+
+ return;
+}
+
+void gfx_v9_4_2_debug_trap_config_init(struct amdgpu_device *adev,
+ uint32_t first_vmid,
+ uint32_t last_vmid)
+{
+ uint32_t data;
+ int i;
+
+ mutex_lock(&adev->srbm_mutex);
+
+ for (i = first_vmid; i < last_vmid; i++) {
+ data = 0;
+ soc15_grbm_select(adev, 0, 0, 0, i);
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, 0);
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE,
+ 0);
+ WREG32(SOC15_REG_OFFSET(GC, 0, regSPI_GDBG_PER_VMID_CNTL), data);
+ }
+
+ soc15_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+}
+
+void gfx_v9_4_2_set_power_brake_sequence(struct amdgpu_device *adev)
+{
+ u32 tmp;
+
+ gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+
+ tmp = 0;
+ tmp = REG_SET_FIELD(tmp, GC_THROTTLE_CTRL, PATTERN_MODE, 1);
+ WREG32_SOC15(GC, 0, regGC_THROTTLE_CTRL, tmp);
+
+ tmp = 0;
+ tmp = REG_SET_FIELD(tmp, GC_THROTTLE_CTRL1, PWRBRK_STALL_EN, 1);
+ WREG32_SOC15(GC, 0, regGC_THROTTLE_CTRL1, tmp);
+
+ WREG32_SOC15(GC, 0, regDIDT_IND_INDEX, ixDIDT_SQ_THROTTLE_CTRL);
+ tmp = 0;
+ tmp = REG_SET_FIELD(tmp, DIDT_SQ_THROTTLE_CTRL, PWRBRK_STALL_EN, 1);
+ WREG32_SOC15(GC, 0, regDIDT_IND_DATA, tmp);
+
+ WREG32_SOC15(GC, 0, regGC_CAC_IND_INDEX, ixPWRBRK_STALL_PATTERN_CTRL);
+ tmp = 0;
+ tmp = REG_SET_FIELD(tmp, PWRBRK_STALL_PATTERN_CTRL, PWRBRK_END_STEP, 0x12);
+ WREG32_SOC15(GC, 0, regGC_CAC_IND_DATA, tmp);
+}
+
+static const struct soc15_reg_entry gfx_v9_4_2_edc_counter_regs[] = {
+ /* CPF */
+ { SOC15_REG_ENTRY(GC, 0, regCPF_EDC_ROQ_CNT), 0, 1, 1 },
+ { SOC15_REG_ENTRY(GC, 0, regCPF_EDC_TAG_CNT), 0, 1, 1 },
+ /* CPC */
+ { SOC15_REG_ENTRY(GC, 0, regCPC_EDC_SCRATCH_CNT), 0, 1, 1 },
+ { SOC15_REG_ENTRY(GC, 0, regCPC_EDC_UCODE_CNT), 0, 1, 1 },
+ { SOC15_REG_ENTRY(GC, 0, regDC_EDC_STATE_CNT), 0, 1, 1 },
+ { SOC15_REG_ENTRY(GC, 0, regDC_EDC_CSINVOC_CNT), 0, 1, 1 },
+ { SOC15_REG_ENTRY(GC, 0, regDC_EDC_RESTORE_CNT), 0, 1, 1 },
+ /* GDS */
+ { SOC15_REG_ENTRY(GC, 0, regGDS_EDC_CNT), 0, 1, 1 },
+ { SOC15_REG_ENTRY(GC, 0, regGDS_EDC_GRBM_CNT), 0, 1, 1 },
+ { SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 1, 1 },
+ { SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_PHY_CNT), 0, 1, 1 },
+ { SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_PIPE_CNT), 0, 1, 1 },
+ /* RLC */
+ { SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT), 0, 1, 1 },
+ { SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT2), 0, 1, 1 },
+ /* SPI */
+ { SOC15_REG_ENTRY(GC, 0, regSPI_EDC_CNT), 0, 8, 1 },
+ /* SQC */
+ { SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT), 0, 8, 7 },
+ { SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT2), 0, 8, 7 },
+ { SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT3), 0, 8, 7 },
+ { SOC15_REG_ENTRY(GC, 0, regSQC_EDC_PARITY_CNT3), 0, 8, 7 },
+ /* SQ */
+ { SOC15_REG_ENTRY(GC, 0, regSQ_EDC_CNT), 0, 8, 14 },
+ /* TCP */
+ { SOC15_REG_ENTRY(GC, 0, regTCP_EDC_CNT_NEW), 0, 8, 14 },
+ /* TCI */
+ { SOC15_REG_ENTRY(GC, 0, regTCI_EDC_CNT), 0, 1, 69 },
+ /* TCC */
+ { SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT), 0, 1, 16 },
+ { SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT2), 0, 1, 16 },
+ /* TCA */
+ { SOC15_REG_ENTRY(GC, 0, regTCA_EDC_CNT), 0, 1, 2 },
+ /* TCX */
+ { SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT), 0, 1, 2 },
+ { SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT2), 0, 1, 2 },
+ /* TD */
+ { SOC15_REG_ENTRY(GC, 0, regTD_EDC_CNT), 0, 8, 14 },
+ /* TA */
+ { SOC15_REG_ENTRY(GC, 0, regTA_EDC_CNT), 0, 8, 14 },
+ /* GCEA */
+ { SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT), 0, 1, 16 },
+ { SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT2), 0, 1, 16 },
+ { SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3), 0, 1, 16 },
+};
+
+static void gfx_v9_4_2_select_se_sh(struct amdgpu_device *adev, u32 se_num,
+ u32 sh_num, u32 instance)
+{
+ u32 data;
+
+ if (instance == 0xffffffff)
+ data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
+ INSTANCE_BROADCAST_WRITES, 1);
+ else
+ data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX,
+ instance);
+
+ if (se_num == 0xffffffff)
+ data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES,
+ 1);
+ else
+ data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
+
+ if (sh_num == 0xffffffff)
+ data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES,
+ 1);
+ else
+ data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
+
+ WREG32_SOC15_RLC_SHADOW_EX(reg, GC, 0, regGRBM_GFX_INDEX, data);
+}
+
+static const struct soc15_ras_field_entry gfx_v9_4_2_ras_fields[] = {
+ /* CPF */
+ { "CPF_ROQ_ME2", SOC15_REG_ENTRY(GC, 0, regCPF_EDC_ROQ_CNT),
+ SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, SEC_COUNT_ME2),
+ SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, DED_COUNT_ME2) },
+ { "CPF_ROQ_ME1", SOC15_REG_ENTRY(GC, 0, regCPF_EDC_ROQ_CNT),
+ SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, SEC_COUNT_ME1),
+ SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, DED_COUNT_ME1) },
+ { "CPF_TCIU_TAG", SOC15_REG_ENTRY(GC, 0, regCPF_EDC_TAG_CNT),
+ SOC15_REG_FIELD(CPF_EDC_TAG_CNT, SEC_COUNT),
+ SOC15_REG_FIELD(CPF_EDC_TAG_CNT, DED_COUNT) },
+
+ /* CPC */
+ { "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, regCPC_EDC_SCRATCH_CNT),
+ SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT),
+ SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT) },
+ { "CPC_UCODE", SOC15_REG_ENTRY(GC, 0, regCPC_EDC_UCODE_CNT),
+ SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, SEC_COUNT),
+ SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, DED_COUNT) },
+ { "CPC_DC_STATE_RAM_ME1", SOC15_REG_ENTRY(GC, 0, regDC_EDC_STATE_CNT),
+ SOC15_REG_FIELD(DC_EDC_STATE_CNT, SEC_COUNT_ME1),
+ SOC15_REG_FIELD(DC_EDC_STATE_CNT, DED_COUNT_ME1) },
+ { "CPC_DC_CSINVOC_RAM_ME1",
+ SOC15_REG_ENTRY(GC, 0, regDC_EDC_CSINVOC_CNT),
+ SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, SEC_COUNT_ME1),
+ SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, DED_COUNT_ME1) },
+ { "CPC_DC_RESTORE_RAM_ME1",
+ SOC15_REG_ENTRY(GC, 0, regDC_EDC_RESTORE_CNT),
+ SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, SEC_COUNT_ME1),
+ SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, DED_COUNT_ME1) },
+ { "CPC_DC_CSINVOC_RAM1_ME1",
+ SOC15_REG_ENTRY(GC, 0, regDC_EDC_CSINVOC_CNT),
+ SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, SEC_COUNT1_ME1),
+ SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, DED_COUNT1_ME1) },
+ { "CPC_DC_RESTORE_RAM1_ME1",
+ SOC15_REG_ENTRY(GC, 0, regDC_EDC_RESTORE_CNT),
+ SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, SEC_COUNT1_ME1),
+ SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, DED_COUNT1_ME1) },
+
+ /* GDS */
+ { "GDS_GRBM", SOC15_REG_ENTRY(GC, 0, regGDS_EDC_GRBM_CNT),
+ SOC15_REG_FIELD(GDS_EDC_GRBM_CNT, SEC),
+ SOC15_REG_FIELD(GDS_EDC_GRBM_CNT, DED) },
+ { "GDS_MEM", SOC15_REG_ENTRY(GC, 0, regGDS_EDC_CNT),
+ SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_DED) },
+ { "GDS_PHY_CMD_RAM_MEM", SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_PHY_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_DED) },
+ { "GDS_PHY_DATA_RAM_MEM", SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_PHY_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_DED) },
+ { "GDS_ME0_CS_PIPE_MEM", SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_PHY_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_DED) },
+ { "GDS_ME1_PIPE0_PIPE_MEM",
+ SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_PIPE_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_DED) },
+ { "GDS_ME1_PIPE1_PIPE_MEM",
+ SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_PIPE_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_DED) },
+ { "GDS_ME1_PIPE2_PIPE_MEM",
+ SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_PIPE_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_DED) },
+ { "GDS_ME1_PIPE3_PIPE_MEM",
+ SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_PIPE_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_DED) },
+ { "GDS_ME0_GFXHP3D_PIX_DED",
+ SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0,
+ SOC15_REG_FIELD(GDS_EDC_OA_DED, ME0_GFXHP3D_PIX_DED) },
+ { "GDS_ME0_GFXHP3D_VTX_DED",
+ SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0,
+ SOC15_REG_FIELD(GDS_EDC_OA_DED, ME0_GFXHP3D_VTX_DED) },
+ { "GDS_ME0_CS_DED",
+ SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0,
+ SOC15_REG_FIELD(GDS_EDC_OA_DED, ME0_CS_DED) },
+ { "GDS_ME0_GFXHP3D_GS_DED",
+ SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0,
+ SOC15_REG_FIELD(GDS_EDC_OA_DED, ME0_GFXHP3D_GS_DED) },
+ { "GDS_ME1_PIPE0_DED",
+ SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0,
+ SOC15_REG_FIELD(GDS_EDC_OA_DED, ME1_PIPE0_DED) },
+ { "GDS_ME1_PIPE1_DED",
+ SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0,
+ SOC15_REG_FIELD(GDS_EDC_OA_DED, ME1_PIPE1_DED) },
+ { "GDS_ME1_PIPE2_DED",
+ SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0,
+ SOC15_REG_FIELD(GDS_EDC_OA_DED, ME1_PIPE2_DED) },
+ { "GDS_ME1_PIPE3_DED",
+ SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0,
+ SOC15_REG_FIELD(GDS_EDC_OA_DED, ME1_PIPE3_DED) },
+ { "GDS_ME2_PIPE0_DED",
+ SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0,
+ SOC15_REG_FIELD(GDS_EDC_OA_DED, ME2_PIPE0_DED) },
+ { "GDS_ME2_PIPE1_DED",
+ SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0,
+ SOC15_REG_FIELD(GDS_EDC_OA_DED, ME2_PIPE1_DED) },
+ { "GDS_ME2_PIPE2_DED",
+ SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0,
+ SOC15_REG_FIELD(GDS_EDC_OA_DED, ME2_PIPE2_DED) },
+ { "GDS_ME2_PIPE3_DED",
+ SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0,
+ SOC15_REG_FIELD(GDS_EDC_OA_DED, ME2_PIPE3_DED) },
+
+ /* RLC */
+ { "RLCG_INSTR_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_INSTR_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_INSTR_RAM_DED_COUNT) },
+ { "RLCG_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_SCRATCH_RAM_DED_COUNT) },
+ { "RLCV_INSTR_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_INSTR_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_INSTR_RAM_DED_COUNT) },
+ { "RLCV_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_SCRATCH_RAM_DED_COUNT) },
+ { "RLC_TCTAG_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLC_TCTAG_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLC_TCTAG_RAM_DED_COUNT) },
+ { "RLC_SPM_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SPM_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SPM_SCRATCH_RAM_DED_COUNT) },
+ { "RLC_SRM_DATA_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_DATA_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_DATA_RAM_DED_COUNT) },
+ { "RLC_SRM_ADDR_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_ADDR_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_ADDR_RAM_DED_COUNT) },
+ { "RLC_SPM_SE0_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT2),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE0_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE0_SCRATCH_RAM_DED_COUNT) },
+ { "RLC_SPM_SE1_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT2),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE1_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE1_SCRATCH_RAM_DED_COUNT) },
+ { "RLC_SPM_SE2_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT2),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE2_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE2_SCRATCH_RAM_DED_COUNT) },
+ { "RLC_SPM_SE3_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT2),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE3_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE3_SCRATCH_RAM_DED_COUNT) },
+ { "RLC_SPM_SE4_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT2),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE4_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE4_SCRATCH_RAM_DED_COUNT) },
+ { "RLC_SPM_SE5_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT2),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE5_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE5_SCRATCH_RAM_DED_COUNT) },
+ { "RLC_SPM_SE6_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT2),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE6_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE6_SCRATCH_RAM_DED_COUNT) },
+ { "RLC_SPM_SE7_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT2),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE7_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE7_SCRATCH_RAM_DED_COUNT) },
+
+ /* SPI */
+ { "SPI_SR_MEM", SOC15_REG_ENTRY(GC, 0, regSPI_EDC_CNT),
+ SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_SEC_COUNT),
+ SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_DED_COUNT) },
+ { "SPI_GDS_EXPREQ", SOC15_REG_ENTRY(GC, 0, regSPI_EDC_CNT),
+ SOC15_REG_FIELD(SPI_EDC_CNT, SPI_GDS_EXPREQ_SEC_COUNT),
+ SOC15_REG_FIELD(SPI_EDC_CNT, SPI_GDS_EXPREQ_DED_COUNT) },
+ { "SPI_WB_GRANT_30", SOC15_REG_ENTRY(GC, 0, regSPI_EDC_CNT),
+ SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_30_SEC_COUNT),
+ SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_30_DED_COUNT) },
+ { "SPI_LIFE_CNT", SOC15_REG_ENTRY(GC, 0, regSPI_EDC_CNT),
+ SOC15_REG_FIELD(SPI_EDC_CNT, SPI_LIFE_CNT_SEC_COUNT),
+ SOC15_REG_FIELD(SPI_EDC_CNT, SPI_LIFE_CNT_DED_COUNT) },
+
+ /* SQC - regSQC_EDC_CNT */
+ { "SQC_DATA_CU0_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_DED_COUNT) },
+ { "SQC_DATA_CU0_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_DED_COUNT) },
+ { "SQC_DATA_CU1_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_DED_COUNT) },
+ { "SQC_DATA_CU1_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_DED_COUNT) },
+ { "SQC_DATA_CU2_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_DED_COUNT) },
+ { "SQC_DATA_CU2_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_DED_COUNT) },
+ { "SQC_DATA_CU3_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU3_WRITE_DATA_BUF_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU3_WRITE_DATA_BUF_DED_COUNT) },
+ { "SQC_DATA_CU3_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU3_UTCL1_LFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU3_UTCL1_LFIFO_DED_COUNT) },
+
+ /* SQC - regSQC_EDC_CNT2 */
+ { "SQC_INST_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_DED_COUNT) },
+ { "SQC_INST_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_DED_COUNT) },
+ { "SQC_DATA_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_DED_COUNT) },
+ { "SQC_DATA_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_DED_COUNT) },
+ { "SQC_INST_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_DED_COUNT) },
+ { "SQC_DATA_BANKA_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_DIRTY_BIT_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_DIRTY_BIT_RAM_DED_COUNT) },
+
+ /* SQC - regSQC_EDC_CNT3 */
+ { "SQC_INST_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_DED_COUNT) },
+ { "SQC_INST_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_DED_COUNT) },
+ { "SQC_DATA_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_DED_COUNT) },
+ { "SQC_DATA_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_DED_COUNT) },
+ { "SQC_DATA_BANKB_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_DIRTY_BIT_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_DIRTY_BIT_RAM_DED_COUNT) },
+
+ /* SQC - regSQC_EDC_PARITY_CNT3 */
+ { "SQC_INST_BANKA_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_PARITY_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKA_UTCL1_MISS_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKA_UTCL1_MISS_FIFO_DED_COUNT) },
+ { "SQC_INST_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_PARITY_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKA_MISS_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKA_MISS_FIFO_DED_COUNT) },
+ { "SQC_DATA_BANKA_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_PARITY_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_HIT_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_HIT_FIFO_DED_COUNT) },
+ { "SQC_DATA_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_PARITY_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_MISS_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_MISS_FIFO_DED_COUNT) },
+ { "SQC_INST_BANKB_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_PARITY_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKB_UTCL1_MISS_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKB_UTCL1_MISS_FIFO_DED_COUNT) },
+ { "SQC_INST_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_PARITY_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKB_MISS_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKB_MISS_FIFO_DED_COUNT) },
+ { "SQC_DATA_BANKB_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_PARITY_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_HIT_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_HIT_FIFO_DED_COUNT) },
+ { "SQC_DATA_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_PARITY_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_MISS_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_MISS_FIFO_DED_COUNT) },
+
+ /* SQ */
+ { "SQ_LDS_D", SOC15_REG_ENTRY(GC, 0, regSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_DED_COUNT) },
+ { "SQ_LDS_I", SOC15_REG_ENTRY(GC, 0, regSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_DED_COUNT) },
+ { "SQ_SGPR", SOC15_REG_ENTRY(GC, 0, regSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_DED_COUNT) },
+ { "SQ_VGPR0", SOC15_REG_ENTRY(GC, 0, regSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_DED_COUNT) },
+ { "SQ_VGPR1", SOC15_REG_ENTRY(GC, 0, regSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_DED_COUNT) },
+ { "SQ_VGPR2", SOC15_REG_ENTRY(GC, 0, regSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_DED_COUNT) },
+ { "SQ_VGPR3", SOC15_REG_ENTRY(GC, 0, regSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_DED_COUNT) },
+
+ /* TCP */
+ { "TCP_CACHE_RAM", SOC15_REG_ENTRY(GC, 0, regTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_DED_COUNT) },
+ { "TCP_LFIFO_RAM", SOC15_REG_ENTRY(GC, 0, regTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_DED_COUNT) },
+ { "TCP_CMD_FIFO", SOC15_REG_ENTRY(GC, 0, regTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_DED_COUNT) },
+ { "TCP_VM_FIFO", SOC15_REG_ENTRY(GC, 0, regTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_DED_COUNT) },
+ { "TCP_DB_RAM", SOC15_REG_ENTRY(GC, 0, regTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, DB_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, DB_RAM_DED_COUNT) },
+ { "TCP_UTCL1_LFIFO0", SOC15_REG_ENTRY(GC, 0, regTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_SEC_COUNT),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_DED_COUNT) },
+ { "TCP_UTCL1_LFIFO1", SOC15_REG_ENTRY(GC, 0, regTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_SEC_COUNT),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_DED_COUNT) },
+
+ /* TCI */
+ { "TCI_WRITE_RAM", SOC15_REG_ENTRY(GC, 0, regTCI_EDC_CNT),
+ SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_DED_COUNT) },
+
+ /* TCC */
+ { "TCC_CACHE_DATA", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_DED_COUNT) },
+ { "TCC_CACHE_DIRTY", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_DED_COUNT) },
+ { "TCC_HIGH_RATE_TAG", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_DED_COUNT) },
+ { "TCC_LOW_RATE_TAG", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_DED_COUNT) },
+ { "TCC_SRC_FIFO", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_DED_COUNT) },
+ { "TCC_LATENCY_FIFO", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_DED_COUNT) },
+ { "TCC_LATENCY_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_NEXT_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_NEXT_RAM_DED_COUNT) },
+ { "TCC_CACHE_TAG_PROBE_FIFO", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_DED_COUNT) },
+ { "TCC_UC_ATOMIC_FIFO", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, UC_ATOMIC_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, UC_ATOMIC_FIFO_DED_COUNT) },
+ { "TCC_WRITE_CACHE_READ", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_DED_COUNT) },
+ { "TCC_RETURN_CONTROL", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_CONTROL_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_CONTROL_DED_COUNT) },
+ { "TCC_IN_USE_TRANSFER", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_TRANSFER_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_TRANSFER_DED_COUNT) },
+ { "TCC_IN_USE_DEC", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_DEC_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_DEC_DED_COUNT) },
+ { "TCC_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_DED_COUNT) },
+ { "TCC_RETURN_DATA", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_DATA_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_DATA_DED_COUNT) },
+
+ /* TCA */
+ { "TCA_HOLE_FIFO", SOC15_REG_ENTRY(GC, 0, regTCA_EDC_CNT),
+ SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_DED_COUNT) },
+ { "TCA_REQ_FIFO", SOC15_REG_ENTRY(GC, 0, regTCA_EDC_CNT),
+ SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_DED_COUNT) },
+
+ /* TCX */
+ { "TCX_GROUP0", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT),
+ SOC15_REG_FIELD(TCX_EDC_CNT, GROUP0_SEC_COUNT),
+ SOC15_REG_FIELD(TCX_EDC_CNT, GROUP0_DED_COUNT) },
+ { "TCX_GROUP1", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT),
+ SOC15_REG_FIELD(TCX_EDC_CNT, GROUP1_SEC_COUNT),
+ SOC15_REG_FIELD(TCX_EDC_CNT, GROUP1_DED_COUNT) },
+ { "TCX_GROUP2", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT),
+ SOC15_REG_FIELD(TCX_EDC_CNT, GROUP2_SEC_COUNT),
+ SOC15_REG_FIELD(TCX_EDC_CNT, GROUP2_DED_COUNT) },
+ { "TCX_GROUP3", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT),
+ SOC15_REG_FIELD(TCX_EDC_CNT, GROUP3_SEC_COUNT),
+ SOC15_REG_FIELD(TCX_EDC_CNT, GROUP3_DED_COUNT) },
+ { "TCX_GROUP4", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT),
+ SOC15_REG_FIELD(TCX_EDC_CNT, GROUP4_SEC_COUNT),
+ SOC15_REG_FIELD(TCX_EDC_CNT, GROUP4_DED_COUNT) },
+ { "TCX_GROUP5", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT),
+ SOC15_REG_FIELD(TCX_EDC_CNT, GROUP5_SED_COUNT), 0, 0 },
+ { "TCX_GROUP6", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT),
+ SOC15_REG_FIELD(TCX_EDC_CNT, GROUP6_SED_COUNT), 0, 0 },
+ { "TCX_GROUP7", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT),
+ SOC15_REG_FIELD(TCX_EDC_CNT, GROUP7_SED_COUNT), 0, 0 },
+ { "TCX_GROUP8", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT),
+ SOC15_REG_FIELD(TCX_EDC_CNT, GROUP8_SED_COUNT), 0, 0 },
+ { "TCX_GROUP9", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT),
+ SOC15_REG_FIELD(TCX_EDC_CNT, GROUP9_SED_COUNT), 0, 0 },
+ { "TCX_GROUP10", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT),
+ SOC15_REG_FIELD(TCX_EDC_CNT, GROUP10_SED_COUNT), 0, 0 },
+ { "TCX_GROUP11", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT2),
+ SOC15_REG_FIELD(TCX_EDC_CNT2, GROUP11_SED_COUNT), 0, 0 },
+ { "TCX_GROUP12", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT2),
+ SOC15_REG_FIELD(TCX_EDC_CNT2, GROUP12_SED_COUNT), 0, 0 },
+ { "TCX_GROUP13", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT2),
+ SOC15_REG_FIELD(TCX_EDC_CNT2, GROUP13_SED_COUNT), 0, 0 },
+ { "TCX_GROUP14", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT2),
+ SOC15_REG_FIELD(TCX_EDC_CNT2, GROUP14_SED_COUNT), 0, 0 },
+
+ /* TD */
+ { "TD_SS_FIFO_LO", SOC15_REG_ENTRY(GC, 0, regTD_EDC_CNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_SEC_COUNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_DED_COUNT) },
+ { "TD_SS_FIFO_HI", SOC15_REG_ENTRY(GC, 0, regTD_EDC_CNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_SEC_COUNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_DED_COUNT) },
+ { "TD_CS_FIFO", SOC15_REG_ENTRY(GC, 0, regTD_EDC_CNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_DED_COUNT) },
+
+ /* TA */
+ { "TA_FS_DFIFO", SOC15_REG_ENTRY(GC, 0, regTA_EDC_CNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_DED_COUNT) },
+ { "TA_FS_AFIFO_LO", SOC15_REG_ENTRY(GC, 0, regTA_EDC_CNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_LO_SEC_COUNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_LO_DED_COUNT) },
+ { "TA_FL_LFIFO", SOC15_REG_ENTRY(GC, 0, regTA_EDC_CNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_DED_COUNT) },
+ { "TA_FX_LFIFO", SOC15_REG_ENTRY(GC, 0, regTA_EDC_CNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_DED_COUNT) },
+ { "TA_FS_CFIFO", SOC15_REG_ENTRY(GC, 0, regTA_EDC_CNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_DED_COUNT) },
+ { "TA_FS_AFIFO_HI", SOC15_REG_ENTRY(GC, 0, regTA_EDC_CNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_HI_SEC_COUNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_HI_DED_COUNT) },
+
+ /* EA - regGCEA_EDC_CNT */
+ { "EA_DRAMRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT) },
+ { "EA_DRAMWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT) },
+ { "EA_DRAMWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT) },
+ { "EA_RRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_DED_COUNT) },
+ { "EA_WRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_DED_COUNT) },
+ { "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_DATAMEM_DED_COUNT) },
+ { "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), 0, 0 },
+ { "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), 0, 0 },
+ { "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, IORD_CMDMEM_SED_COUNT), 0, 0 },
+ { "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_CMDMEM_SED_COUNT), 0, 0 },
+
+ /* EA - regGCEA_EDC_CNT2 */
+ { "EA_GMIRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT) },
+ { "EA_GMIWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT) },
+ { "EA_GMIWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT) },
+ { "EA_GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), 0, 0 },
+ { "EA_GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), 0, 0 },
+ { "EA_MAM_D0MEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_DED_COUNT) },
+ { "EA_MAM_D1MEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_DED_COUNT) },
+ { "EA_MAM_D2MEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_DED_COUNT) },
+ { "EA_MAM_D3MEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_DED_COUNT) },
+
+ /* EA - regGCEA_EDC_CNT3 */
+ { "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3), 0, 0,
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT) },
+ { "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3), 0, 0,
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT) },
+ { "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3), 0, 0,
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, IORD_CMDMEM_DED_COUNT) },
+ { "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3), 0, 0,
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, IOWR_CMDMEM_DED_COUNT) },
+ { "EA_GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3), 0, 0,
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT) },
+ { "EA_GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3), 0, 0,
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT) },
+ { "EA_MAM_A0MEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3),
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A0MEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A0MEM_DED_COUNT) },
+ { "EA_MAM_A1MEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3),
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A1MEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A1MEM_DED_COUNT) },
+ { "EA_MAM_A2MEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3),
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A2MEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A2MEM_DED_COUNT) },
+ { "EA_MAM_A3MEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3),
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A3MEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A3MEM_DED_COUNT) },
+ { "EA_MAM_AFMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3),
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_AFMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_AFMEM_DED_COUNT) },
+};
+
+static const char * const vml2_walker_mems[] = {
+ "UTC_VML2_CACHE_PDE0_MEM0",
+ "UTC_VML2_CACHE_PDE0_MEM1",
+ "UTC_VML2_CACHE_PDE1_MEM0",
+ "UTC_VML2_CACHE_PDE1_MEM1",
+ "UTC_VML2_CACHE_PDE2_MEM0",
+ "UTC_VML2_CACHE_PDE2_MEM1",
+ "UTC_VML2_RDIF_ARADDRS",
+ "UTC_VML2_RDIF_LOG_FIFO",
+ "UTC_VML2_QUEUE_REQ",
+ "UTC_VML2_QUEUE_RET",
+};
+
+static struct gfx_v9_4_2_utc_block gfx_v9_4_2_utc_blocks[] = {
+ { VML2_MEM, 8, 2, 2,
+ { SOC15_REG_ENTRY(GC, 0, regVML2_MEM_ECC_INDEX) },
+ { SOC15_REG_ENTRY(GC, 0, regVML2_MEM_ECC_CNTL) },
+ SOC15_REG_FIELD(VML2_MEM_ECC_CNTL, SEC_COUNT),
+ SOC15_REG_FIELD(VML2_MEM_ECC_CNTL, DED_COUNT),
+ REG_SET_FIELD(0, VML2_MEM_ECC_CNTL, WRITE_COUNTERS, 1) },
+ { VML2_WALKER_MEM, ARRAY_SIZE(vml2_walker_mems), 1, 1,
+ { SOC15_REG_ENTRY(GC, 0, regVML2_WALKER_MEM_ECC_INDEX) },
+ { SOC15_REG_ENTRY(GC, 0, regVML2_WALKER_MEM_ECC_CNTL) },
+ SOC15_REG_FIELD(VML2_WALKER_MEM_ECC_CNTL, SEC_COUNT),
+ SOC15_REG_FIELD(VML2_WALKER_MEM_ECC_CNTL, DED_COUNT),
+ REG_SET_FIELD(0, VML2_WALKER_MEM_ECC_CNTL, WRITE_COUNTERS, 1) },
+ { UTCL2_MEM, 18, 1, 2,
+ { SOC15_REG_ENTRY(GC, 0, regUTCL2_MEM_ECC_INDEX) },
+ { SOC15_REG_ENTRY(GC, 0, regUTCL2_MEM_ECC_CNTL) },
+ SOC15_REG_FIELD(UTCL2_MEM_ECC_CNTL, SEC_COUNT),
+ SOC15_REG_FIELD(UTCL2_MEM_ECC_CNTL, DED_COUNT),
+ REG_SET_FIELD(0, UTCL2_MEM_ECC_CNTL, WRITE_COUNTERS, 1) },
+ { ATC_L2_CACHE_2M, 8, 2, 1,
+ { SOC15_REG_ENTRY(GC, 0, regATC_L2_CACHE_2M_DSM_INDEX) },
+ { SOC15_REG_ENTRY(GC, 0, regATC_L2_CACHE_2M_DSM_CNTL) },
+ SOC15_REG_FIELD(ATC_L2_CACHE_2M_DSM_CNTL, SEC_COUNT),
+ SOC15_REG_FIELD(ATC_L2_CACHE_2M_DSM_CNTL, DED_COUNT),
+ REG_SET_FIELD(0, ATC_L2_CACHE_2M_DSM_CNTL, WRITE_COUNTERS, 1) },
+ { ATC_L2_CACHE_32K, 8, 2, 2,
+ { SOC15_REG_ENTRY(GC, 0, regATC_L2_CACHE_32K_DSM_INDEX) },
+ { SOC15_REG_ENTRY(GC, 0, regATC_L2_CACHE_32K_DSM_CNTL) },
+ SOC15_REG_FIELD(ATC_L2_CACHE_32K_DSM_CNTL, SEC_COUNT),
+ SOC15_REG_FIELD(ATC_L2_CACHE_32K_DSM_CNTL, DED_COUNT),
+ REG_SET_FIELD(0, ATC_L2_CACHE_32K_DSM_CNTL, WRITE_COUNTERS, 1) },
+ { ATC_L2_CACHE_4K, 8, 2, 8,
+ { SOC15_REG_ENTRY(GC, 0, regATC_L2_CACHE_4K_DSM_INDEX) },
+ { SOC15_REG_ENTRY(GC, 0, regATC_L2_CACHE_4K_DSM_CNTL) },
+ SOC15_REG_FIELD(ATC_L2_CACHE_4K_DSM_CNTL, SEC_COUNT),
+ SOC15_REG_FIELD(ATC_L2_CACHE_4K_DSM_CNTL, DED_COUNT),
+ REG_SET_FIELD(0, ATC_L2_CACHE_4K_DSM_CNTL, WRITE_COUNTERS, 1) },
+};
+
+static const struct soc15_reg_entry gfx_v9_4_2_rdrsp_status_regs =
+ { SOC15_REG_ENTRY(GC, 0, regGCEA_ERR_STATUS), 0, 1, 16 };
+
+static int gfx_v9_4_2_get_reg_error_count(struct amdgpu_device *adev,
+ const struct soc15_reg_entry *reg,
+ uint32_t se_id, uint32_t inst_id,
+ uint32_t value, uint32_t *sec_count,
+ uint32_t *ded_count)
+{
+ uint32_t i;
+ uint32_t sec_cnt, ded_cnt;
+
+ for (i = 0; i < ARRAY_SIZE(gfx_v9_4_2_ras_fields); i++) {
+ if (gfx_v9_4_2_ras_fields[i].reg_offset != reg->reg_offset ||
+ gfx_v9_4_2_ras_fields[i].seg != reg->seg ||
+ gfx_v9_4_2_ras_fields[i].inst != reg->inst)
+ continue;
+
+ sec_cnt = SOC15_RAS_REG_FIELD_VAL(
+ value, gfx_v9_4_2_ras_fields[i], sec);
+ if (sec_cnt) {
+ dev_info(adev->dev,
+ "GFX SubBlock %s, Instance[%d][%d], SEC %d\n",
+ gfx_v9_4_2_ras_fields[i].name, se_id, inst_id,
+ sec_cnt);
+ *sec_count += sec_cnt;
+ }
+
+ ded_cnt = SOC15_RAS_REG_FIELD_VAL(
+ value, gfx_v9_4_2_ras_fields[i], ded);
+ if (ded_cnt) {
+ dev_info(adev->dev,
+ "GFX SubBlock %s, Instance[%d][%d], DED %d\n",
+ gfx_v9_4_2_ras_fields[i].name, se_id, inst_id,
+ ded_cnt);
+ *ded_count += ded_cnt;
+ }
+ }
+
+ return 0;
+}
+
+static int gfx_v9_4_2_query_sram_edc_count(struct amdgpu_device *adev,
+ uint32_t *sec_count, uint32_t *ded_count)
+{
+ uint32_t i, j, k, data;
+ uint32_t sec_cnt = 0, ded_cnt = 0;
+
+ if (sec_count && ded_count) {
+ *sec_count = 0;
+ *ded_count = 0;
+ }
+
+ mutex_lock(&adev->grbm_idx_mutex);
+
+ for (i = 0; i < ARRAY_SIZE(gfx_v9_4_2_edc_counter_regs); i++) {
+ for (j = 0; j < gfx_v9_4_2_edc_counter_regs[i].se_num; j++) {
+ for (k = 0; k < gfx_v9_4_2_edc_counter_regs[i].instance;
+ k++) {
+ gfx_v9_4_2_select_se_sh(adev, j, 0, k);
+
+ /* if sec/ded_count is null, just clear counter */
+ if (!sec_count || !ded_count) {
+ WREG32(SOC15_REG_ENTRY_OFFSET(
+ gfx_v9_4_2_edc_counter_regs[i]), 0);
+ continue;
+ }
+
+ data = RREG32(SOC15_REG_ENTRY_OFFSET(
+ gfx_v9_4_2_edc_counter_regs[i]));
+
+ if (!data)
+ continue;
+
+ gfx_v9_4_2_get_reg_error_count(adev,
+ &gfx_v9_4_2_edc_counter_regs[i],
+ j, k, data, &sec_cnt, &ded_cnt);
+
+ /* clear counter after read */
+ WREG32(SOC15_REG_ENTRY_OFFSET(
+ gfx_v9_4_2_edc_counter_regs[i]), 0);
+ }
+ }
+ }
+
+ if (sec_count && ded_count) {
+ *sec_count += sec_cnt;
+ *ded_count += ded_cnt;
+ }
+
+ gfx_v9_4_2_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ return 0;
+}
+
+static void gfx_v9_4_2_log_utc_edc_count(struct amdgpu_device *adev,
+ struct gfx_v9_4_2_utc_block *blk,
+ uint32_t instance, uint32_t sec_cnt,
+ uint32_t ded_cnt)
+{
+ uint32_t bank, way, mem;
+ static const char *vml2_way_str[] = { "BIGK", "4K" };
+ static const char *utcl2_rounter_str[] = { "VMC", "APT" };
+
+ mem = instance % blk->num_mem_blocks;
+ way = (instance / blk->num_mem_blocks) % blk->num_ways;
+ bank = instance / (blk->num_mem_blocks * blk->num_ways);
+
+ switch (blk->type) {
+ case VML2_MEM:
+ dev_info(
+ adev->dev,
+ "GFX SubBlock UTC_VML2_BANK_CACHE_%d_%s_MEM%d, SED %d, DED %d\n",
+ bank, vml2_way_str[way], mem, sec_cnt, ded_cnt);
+ break;
+ case VML2_WALKER_MEM:
+ dev_info(adev->dev, "GFX SubBlock %s, SED %d, DED %d\n",
+ vml2_walker_mems[bank], sec_cnt, ded_cnt);
+ break;
+ case UTCL2_MEM:
+ dev_info(
+ adev->dev,
+ "GFX SubBlock UTCL2_ROUTER_IFIF%d_GROUP0_%s, SED %d, DED %d\n",
+ bank, utcl2_rounter_str[mem], sec_cnt, ded_cnt);
+ break;
+ case ATC_L2_CACHE_2M:
+ dev_info(
+ adev->dev,
+ "GFX SubBlock UTC_ATCL2_CACHE_2M_BANK%d_WAY%d_MEM, SED %d, DED %d\n",
+ bank, way, sec_cnt, ded_cnt);
+ break;
+ case ATC_L2_CACHE_32K:
+ dev_info(
+ adev->dev,
+ "GFX SubBlock UTC_ATCL2_CACHE_32K_BANK%d_WAY%d_MEM%d, SED %d, DED %d\n",
+ bank, way, mem, sec_cnt, ded_cnt);
+ break;
+ case ATC_L2_CACHE_4K:
+ dev_info(
+ adev->dev,
+ "GFX SubBlock UTC_ATCL2_CACHE_4K_BANK%d_WAY%d_MEM%d, SED %d, DED %d\n",
+ bank, way, mem, sec_cnt, ded_cnt);
+ break;
+ }
+}
+
+static int gfx_v9_4_2_query_utc_edc_count(struct amdgpu_device *adev,
+ uint32_t *sec_count,
+ uint32_t *ded_count)
+{
+ uint32_t i, j, data;
+ uint32_t sec_cnt, ded_cnt;
+ uint32_t num_instances;
+ struct gfx_v9_4_2_utc_block *blk;
+
+ if (sec_count && ded_count) {
+ *sec_count = 0;
+ *ded_count = 0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(gfx_v9_4_2_utc_blocks); i++) {
+ blk = &gfx_v9_4_2_utc_blocks[i];
+ num_instances =
+ blk->num_banks * blk->num_ways * blk->num_mem_blocks;
+ for (j = 0; j < num_instances; j++) {
+ WREG32(SOC15_REG_ENTRY_OFFSET(blk->idx_reg), j);
+
+ /* if sec/ded_count is NULL, just clear counter */
+ if (!sec_count || !ded_count) {
+ WREG32(SOC15_REG_ENTRY_OFFSET(blk->data_reg),
+ blk->clear);
+ continue;
+ }
+
+ data = RREG32(SOC15_REG_ENTRY_OFFSET(blk->data_reg));
+ if (!data)
+ continue;
+
+ sec_cnt = SOC15_RAS_REG_FIELD_VAL(data, *blk, sec);
+ *sec_count += sec_cnt;
+ ded_cnt = SOC15_RAS_REG_FIELD_VAL(data, *blk, ded);
+ *ded_count += ded_cnt;
+
+ /* clear counter after read */
+ WREG32(SOC15_REG_ENTRY_OFFSET(blk->data_reg),
+ blk->clear);
+
+ /* print the edc count */
+ gfx_v9_4_2_log_utc_edc_count(adev, blk, j, sec_cnt,
+ ded_cnt);
+ }
+ }
+
+ return 0;
+}
+
+int gfx_v9_4_2_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+ uint32_t sec_count = 0, ded_count = 0;
+
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+ return -EINVAL;
+
+ err_data->ue_count = 0;
+ err_data->ce_count = 0;
+
+ gfx_v9_4_2_query_sram_edc_count(adev, &sec_count, &ded_count);
+ err_data->ce_count += sec_count;
+ err_data->ue_count += ded_count;
+
+ gfx_v9_4_2_query_utc_edc_count(adev, &sec_count, &ded_count);
+ err_data->ce_count += sec_count;
+ err_data->ue_count += ded_count;
+
+ return 0;
+}
+
+static void gfx_v9_4_2_reset_utc_err_status(struct amdgpu_device *adev)
+{
+ WREG32_SOC15(GC, 0, regUTCL2_MEM_ECC_STATUS, 0x3);
+ WREG32_SOC15(GC, 0, regVML2_MEM_ECC_STATUS, 0x3);
+ WREG32_SOC15(GC, 0, regVML2_WALKER_MEM_ECC_STATUS, 0x3);
+}
+
+static void gfx_v9_4_2_reset_ea_err_status(struct amdgpu_device *adev)
+{
+ uint32_t i, j;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ for (i = 0; i < gfx_v9_4_2_rdrsp_status_regs.se_num; i++) {
+ for (j = 0; j < gfx_v9_4_2_rdrsp_status_regs.instance;
+ j++) {
+ gfx_v9_4_2_select_se_sh(adev, i, 0, j);
+ WREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_4_2_rdrsp_status_regs), 0x10);
+ }
+ }
+ gfx_v9_4_2_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+}
+
+void gfx_v9_4_2_reset_ras_error_count(struct amdgpu_device *adev)
+{
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+ return;
+
+ gfx_v9_4_2_query_sram_edc_count(adev, NULL, NULL);
+ gfx_v9_4_2_query_utc_edc_count(adev, NULL, NULL);
+}
+
+int gfx_v9_4_2_ras_error_inject(struct amdgpu_device *adev, void *inject_if)
+{
+ struct ras_inject_if *info = (struct ras_inject_if *)inject_if;
+ int ret;
+ struct ta_ras_trigger_error_input block_info = { 0 };
+
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+ return -EINVAL;
+
+ block_info.block_id = amdgpu_ras_block_to_ta(info->head.block);
+ block_info.sub_block_index = info->head.sub_block_index;
+ block_info.inject_error_type = amdgpu_ras_error_to_ta(info->head.type);
+ block_info.address = info->address;
+ block_info.value = info->value;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ ret = psp_ras_trigger_error(&adev->psp, &block_info);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ return ret;
+}
+
+static void gfx_v9_4_2_query_ea_err_status(struct amdgpu_device *adev)
+{
+ uint32_t i, j;
+ uint32_t reg_value;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+
+ for (i = 0; i < gfx_v9_4_2_rdrsp_status_regs.se_num; i++) {
+ for (j = 0; j < gfx_v9_4_2_rdrsp_status_regs.instance;
+ j++) {
+ gfx_v9_4_2_select_se_sh(adev, i, 0, j);
+ reg_value = RREG32(SOC15_REG_ENTRY_OFFSET(
+ gfx_v9_4_2_rdrsp_status_regs));
+ if (reg_value)
+ dev_warn(adev->dev, "GCEA err detected at instance: %d, status: 0x%x!\n",
+ j, reg_value);
+ /* clear after read */
+ WREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_4_2_rdrsp_status_regs), 0x10);
+ }
+ }
+
+ gfx_v9_4_2_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+}
+
+static void gfx_v9_4_2_query_utc_err_status(struct amdgpu_device *adev)
+{
+ uint32_t data;
+
+ data = RREG32_SOC15(GC, 0, regUTCL2_MEM_ECC_STATUS);
+ if (!data) {
+ dev_warn(adev->dev, "GFX UTCL2 Mem Ecc Status: 0x%x!\n", data);
+ WREG32_SOC15(GC, 0, regUTCL2_MEM_ECC_STATUS, 0x3);
+ }
+
+ data = RREG32_SOC15(GC, 0, regVML2_MEM_ECC_STATUS);
+ if (!data) {
+ dev_warn(adev->dev, "GFX VML2 Mem Ecc Status: 0x%x!\n", data);
+ WREG32_SOC15(GC, 0, regVML2_MEM_ECC_STATUS, 0x3);
+ }
+
+ data = RREG32_SOC15(GC, 0, regVML2_WALKER_MEM_ECC_STATUS);
+ if (!data) {
+ dev_warn(adev->dev, "GFX VML2 Walker Mem Ecc Status: 0x%x!\n", data);
+ WREG32_SOC15(GC, 0, regVML2_WALKER_MEM_ECC_STATUS, 0x3);
+ }
+}
+
+void gfx_v9_4_2_query_ras_error_status(struct amdgpu_device *adev)
+{
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+ return;
+
+ gfx_v9_4_2_query_ea_err_status(adev);
+ gfx_v9_4_2_query_utc_err_status(adev);
+ gfx_v9_4_2_query_sq_timeout_status(adev);
+}
+
+void gfx_v9_4_2_reset_ras_error_status(struct amdgpu_device *adev)
+{
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+ return;
+
+ gfx_v9_4_2_reset_utc_err_status(adev);
+ gfx_v9_4_2_reset_ea_err_status(adev);
+ gfx_v9_4_2_reset_sq_timeout_status(adev);
+}
+
+void gfx_v9_4_2_enable_watchdog_timer(struct amdgpu_device *adev)
+{
+ uint32_t i;
+ uint32_t data;
+
+ data = REG_SET_FIELD(0, SQ_TIMEOUT_CONFIG, TIMEOUT_FATAL_DISABLE,
+ amdgpu_watchdog_timer.timeout_fatal_disable ? 1 :
+ 0);
+
+ if (amdgpu_watchdog_timer.timeout_fatal_disable &&
+ (amdgpu_watchdog_timer.period < 1 ||
+ amdgpu_watchdog_timer.period > 0x23)) {
+ dev_warn(adev->dev, "Watchdog period range is 1 to 0x23\n");
+ amdgpu_watchdog_timer.period = 0x23;
+ }
+ data = REG_SET_FIELD(data, SQ_TIMEOUT_CONFIG, PERIOD_SEL,
+ amdgpu_watchdog_timer.period);
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+ gfx_v9_4_2_select_se_sh(adev, i, 0xffffffff, 0xffffffff);
+ WREG32_SOC15(GC, 0, regSQ_TIMEOUT_CONFIG, data);
+ }
+ gfx_v9_4_2_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+}
+
+static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
+{
+ WREG32_SOC15_RLC_EX(reg, GC, 0, regSQ_IND_INDEX,
+ (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
+ (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
+ (address << SQ_IND_INDEX__INDEX__SHIFT) |
+ (SQ_IND_INDEX__FORCE_READ_MASK));
+ return RREG32_SOC15(GC, 0, regSQ_IND_DATA);
+}
+
+static void gfx_v9_4_2_log_cu_timeout_status(struct amdgpu_device *adev,
+ uint32_t status)
+{
+ struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
+ uint32_t i, simd, wave;
+ uint32_t wave_status;
+ uint32_t wave_pc_lo, wave_pc_hi;
+ uint32_t wave_exec_lo, wave_exec_hi;
+ uint32_t wave_inst_dw0, wave_inst_dw1;
+ uint32_t wave_ib_sts;
+
+ for (i = 0; i < 32; i++) {
+ if (!((i << 1) & status))
+ continue;
+
+ simd = i / cu_info->max_waves_per_simd;
+ wave = i % cu_info->max_waves_per_simd;
+
+ wave_status = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
+ wave_pc_lo = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
+ wave_pc_hi = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
+ wave_exec_lo =
+ wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
+ wave_exec_hi =
+ wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
+ wave_inst_dw0 =
+ wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
+ wave_inst_dw1 =
+ wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
+ wave_ib_sts = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
+
+ dev_info(
+ adev->dev,
+ "\t SIMD %d, Wave %d: status 0x%x, pc 0x%llx, exec 0x%llx, inst 0x%llx, ib_sts 0x%x\n",
+ simd, wave, wave_status,
+ ((uint64_t)wave_pc_hi << 32 | wave_pc_lo),
+ ((uint64_t)wave_exec_hi << 32 | wave_exec_lo),
+ ((uint64_t)wave_inst_dw1 << 32 | wave_inst_dw0),
+ wave_ib_sts);
+ }
+}
+
+static void gfx_v9_4_2_query_sq_timeout_status(struct amdgpu_device *adev)
+{
+ uint32_t se_idx, sh_idx, cu_idx;
+ uint32_t status;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ for (se_idx = 0; se_idx < adev->gfx.config.max_shader_engines;
+ se_idx++) {
+ for (sh_idx = 0; sh_idx < adev->gfx.config.max_sh_per_se;
+ sh_idx++) {
+ for (cu_idx = 0;
+ cu_idx < adev->gfx.config.max_cu_per_sh;
+ cu_idx++) {
+ gfx_v9_4_2_select_se_sh(adev, se_idx, sh_idx,
+ cu_idx);
+ status = RREG32_SOC15(GC, 0,
+ regSQ_TIMEOUT_STATUS);
+ if (status != 0) {
+ dev_info(
+ adev->dev,
+ "GFX Watchdog Timeout: SE %d, SH %d, CU %d\n",
+ se_idx, sh_idx, cu_idx);
+ gfx_v9_4_2_log_cu_timeout_status(
+ adev, status);
+ }
+ /* clear old status */
+ WREG32_SOC15(GC, 0, regSQ_TIMEOUT_STATUS, 0);
+ }
+ }
+ }
+ gfx_v9_4_2_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+}
+
+static void gfx_v9_4_2_reset_sq_timeout_status(struct amdgpu_device *adev)
+{
+ uint32_t se_idx, sh_idx, cu_idx;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ for (se_idx = 0; se_idx < adev->gfx.config.max_shader_engines;
+ se_idx++) {
+ for (sh_idx = 0; sh_idx < adev->gfx.config.max_sh_per_se;
+ sh_idx++) {
+ for (cu_idx = 0;
+ cu_idx < adev->gfx.config.max_cu_per_sh;
+ cu_idx++) {
+ gfx_v9_4_2_select_se_sh(adev, se_idx, sh_idx,
+ cu_idx);
+ WREG32_SOC15(GC, 0, regSQ_TIMEOUT_STATUS, 0);
+ }
+ }
+ }
+ gfx_v9_4_2_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+}
+
+const struct amdgpu_gfx_ras_funcs gfx_v9_4_2_ras_funcs = {
+ .ras_late_init = amdgpu_gfx_ras_late_init,
+ .ras_fini = amdgpu_gfx_ras_fini,
+ .ras_error_inject = &gfx_v9_4_2_ras_error_inject,
+ .query_ras_error_count = &gfx_v9_4_2_query_ras_error_count,
+ .reset_ras_error_count = &gfx_v9_4_2_reset_ras_error_count,
+ .query_ras_error_status = &gfx_v9_4_2_query_ras_error_status,
+ .reset_ras_error_status = &gfx_v9_4_2_reset_ras_error_status,
+ .enable_watchdog_timer = &gfx_v9_4_2_enable_watchdog_timer,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h
new file mode 100644
index 000000000000..81c5833b6b9f
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __GFX_V9_4_2_H__
+#define __GFX_V9_4_2_H__
+
+void gfx_v9_4_2_debug_trap_config_init(struct amdgpu_device *adev,
+ uint32_t first_vmid, uint32_t last_vmid);
+void gfx_v9_4_2_init_golden_registers(struct amdgpu_device *adev,
+ uint32_t die_id);
+void gfx_v9_4_2_set_power_brake_sequence(struct amdgpu_device *adev);
+
+extern const struct amdgpu_gfx_ras_funcs gfx_v9_4_2_ras_funcs;
+
+#endif /* __GFX_V9_4_2_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index 6ddd53ba8b77..d189507dcef0 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -53,19 +53,39 @@ static void gfxhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev,
static void gfxhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
{
- uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
+ uint64_t pt_base;
- gfxhub_v1_0_setup_vm_pt_regs(adev, 0, pt_base);
+ if (adev->gmc.pdb0_bo)
+ pt_base = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo);
+ else
+ pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
- WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
- (u32)(adev->gmc.gart_start >> 12));
- WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
- (u32)(adev->gmc.gart_start >> 44));
+ gfxhub_v1_0_setup_vm_pt_regs(adev, 0, pt_base);
- WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
- (u32)(adev->gmc.gart_end >> 12));
- WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
- (u32)(adev->gmc.gart_end >> 44));
+ /* If use GART for FB translation, vmid0 page table covers both
+ * vram and system memory (gart)
+ */
+ if (adev->gmc.pdb0_bo) {
+ WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
+ (u32)(adev->gmc.fb_start >> 12));
+ WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
+ (u32)(adev->gmc.fb_start >> 44));
+
+ WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
+ (u32)(adev->gmc.gart_end >> 12));
+ WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
+ (u32)(adev->gmc.gart_end >> 44));
+ } else {
+ WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
+ (u32)(adev->gmc.gart_start >> 12));
+ WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
+ (u32)(adev->gmc.gart_start >> 44));
+
+ WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
+ (u32)(adev->gmc.gart_end >> 12));
+ WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
+ (u32)(adev->gmc.gart_end >> 44));
+ }
}
static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
@@ -116,6 +136,27 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
WREG32_FIELD15(GC, 0, VM_L2_PROTECTION_FAULT_CNTL2,
ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
}
+
+ /* In the case squeezing vram into GART aperture, we don't use
+ * FB aperture and AGP aperture. Disable them.
+ */
+ if (adev->gmc.pdb0_bo) {
+ if (adev->asic_type == CHIP_ALDEBARAN) {
+ WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_TOP, adev->gmc.fb_end_original >> 24);
+ WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_BASE, adev->gmc.fb_start_original >> 24);
+ WREG32_SOC15(GC, 0, mmMC_VM_AGP_TOP, 0);
+ WREG32_SOC15(GC, 0, mmMC_VM_AGP_BOT, 0xFFFFFF);
+ WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, adev->gmc.fb_start_original >> 18);
+ WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, adev->gmc.fb_end_original >> 18);
+ } else {
+ WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_TOP, 0);
+ WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_BASE, 0x00FFFFFF);
+ WREG32_SOC15(GC, 0, mmMC_VM_AGP_TOP, 0);
+ WREG32_SOC15(GC, 0, mmMC_VM_AGP_BOT, 0xFFFFFF);
+ WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, 0x3FFFFFFF);
+ WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0);
+ }
+ }
}
static void gfxhub_v1_0_init_tlb_regs(struct amdgpu_device *adev)
@@ -173,8 +214,13 @@ static void gfxhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
WREG32_SOC15_RLC(GC, 0, mmVM_L2_CNTL3, tmp);
tmp = mmVM_L2_CNTL4_DEFAULT;
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
+ if (adev->gmc.xgmi.connected_to_cpu) {
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 1);
+ } else {
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
+ }
WREG32_SOC15_RLC(GC, 0, mmVM_L2_CNTL4, tmp);
}
@@ -184,7 +230,10 @@ static void gfxhub_v1_0_enable_system_domain(struct amdgpu_device *adev)
tmp = RREG32_SOC15(GC, 0, mmVM_CONTEXT0_CNTL);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH,
+ adev->gmc.vmid0_page_table_depth);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_BLOCK_SIZE,
+ adev->gmc.vmid0_page_table_block_size);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL,
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_CNTL, tmp);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c
index c0ab71df0d90..8fca72ebd11c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c
@@ -28,13 +28,42 @@
#include "soc15_common.h"
+#define mmMC_VM_XGMI_LFB_CNTL_ALDE 0x0978
+#define mmMC_VM_XGMI_LFB_CNTL_ALDE_BASE_IDX 0
+#define mmMC_VM_XGMI_LFB_SIZE_ALDE 0x0979
+#define mmMC_VM_XGMI_LFB_SIZE_ALDE_BASE_IDX 0
+//MC_VM_XGMI_LFB_CNTL
+#define MC_VM_XGMI_LFB_CNTL_ALDE__PF_LFB_REGION__SHIFT 0x0
+#define MC_VM_XGMI_LFB_CNTL_ALDE__PF_MAX_REGION__SHIFT 0x4
+#define MC_VM_XGMI_LFB_CNTL_ALDE__PF_LFB_REGION_MASK 0x0000000FL
+#define MC_VM_XGMI_LFB_CNTL_ALDE__PF_MAX_REGION_MASK 0x000000F0L
+//MC_VM_XGMI_LFB_SIZE
+#define MC_VM_XGMI_LFB_SIZE_ALDE__PF_LFB_SIZE__SHIFT 0x0
+#define MC_VM_XGMI_LFB_SIZE_ALDE__PF_LFB_SIZE_MASK 0x0001FFFFL
+
int gfxhub_v1_1_get_xgmi_info(struct amdgpu_device *adev)
{
- u32 xgmi_lfb_cntl = RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_CNTL);
- u32 max_region =
+ u32 max_num_physical_nodes;
+ u32 max_physical_node_id;
+ u32 xgmi_lfb_cntl;
+ u32 max_region;
+ u64 seg_size;
+
+ if (adev->asic_type == CHIP_ALDEBARAN) {
+ xgmi_lfb_cntl = RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_CNTL_ALDE);
+ seg_size = REG_GET_FIELD(
+ RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_SIZE_ALDE),
+ MC_VM_XGMI_LFB_SIZE, PF_LFB_SIZE) << 24;
+ } else {
+ xgmi_lfb_cntl = RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_CNTL);
+ seg_size = REG_GET_FIELD(
+ RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_SIZE),
+ MC_VM_XGMI_LFB_SIZE, PF_LFB_SIZE) << 24;
+ }
+
+ max_region =
REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, PF_MAX_REGION);
- u32 max_num_physical_nodes = 0;
- u32 max_physical_node_id = 0;
+
switch (adev->asic_type) {
case CHIP_VEGA20:
@@ -45,23 +74,30 @@ int gfxhub_v1_1_get_xgmi_info(struct amdgpu_device *adev)
max_num_physical_nodes = 8;
max_physical_node_id = 7;
break;
+ case CHIP_ALDEBARAN:
+ /* just using duplicates for Aldebaran support, revisit later */
+ max_num_physical_nodes = 8;
+ max_physical_node_id = 7;
+ break;
default:
return -EINVAL;
}
/* PF_MAX_REGION=0 means xgmi is disabled */
- if (max_region) {
+ if (max_region || adev->gmc.xgmi.connected_to_cpu) {
adev->gmc.xgmi.num_physical_nodes = max_region + 1;
+
if (adev->gmc.xgmi.num_physical_nodes > max_num_physical_nodes)
return -EINVAL;
adev->gmc.xgmi.physical_node_id =
- REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, PF_LFB_REGION);
+ REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL,
+ PF_LFB_REGION);
+
if (adev->gmc.xgmi.physical_node_id > max_physical_node_id)
return -EINVAL;
- adev->gmc.xgmi.node_segment_size = REG_GET_FIELD(
- RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_SIZE),
- MC_VM_XGMI_LFB_SIZE, PF_LFB_SIZE) << 24;
+
+ adev->gmc.xgmi.node_segment_size = seg_size;
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 3b7c6c31fce1..2bfd620576f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -113,7 +113,7 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
/* Delegate it to a different ring if the hardware hasn't
* already done it.
*/
- if (in_interrupt()) {
+ if (entry->ih == &adev->irq.ih) {
amdgpu_irq_delegate(adev, entry, 8);
return 1;
}
@@ -152,8 +152,9 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
entry->src_id, entry->ring_id, entry->vmid,
entry->pasid, task_info.process_name, task_info.tgid,
task_info.task_name, task_info.pid);
- dev_err(adev->dev, " in page starting at address 0x%012llx from client %d\n",
- addr, entry->client_id);
+ dev_err(adev->dev, " in page starting at address 0x%016llx from client 0x%x (%s)\n",
+ addr, entry->client_id,
+ soc15_ih_clientid_name[entry->client_id]);
if (!amdgpu_sriov_vf(adev))
hub->vmhub_funcs->print_l2_protection_fault_status(adev,
@@ -654,7 +655,7 @@ static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev)
adev->umc.umc_inst_num = UMC_V8_7_UMC_INSTANCE_NUM;
adev->umc.channel_offs = UMC_V8_7_PER_CHANNEL_OFFSET_SIENNA;
adev->umc.channel_idx_tbl = &umc_v8_7_channel_idx_tbl[0][0];
- adev->umc.funcs = &umc_v8_7_funcs;
+ adev->umc.ras_funcs = &umc_v8_7_ras_funcs;
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index f5b69484c45a..405d6ad09022 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -346,6 +346,7 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
}
+ adev->gmc.gart_size += adev->pm.smu_prv_buffer_size;
gmc_v6_0_vram_gtt_location(adev, &adev->gmc);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index dee2b34effb6..210ada2289ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -414,6 +414,7 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
}
+ adev->gmc.gart_size += adev->pm.smu_prv_buffer_size;
gmc_v7_0_vram_gtt_location(adev, &adev->gmc);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 2d832fc23119..c1bd190841f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -599,6 +599,7 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
}
+ adev->gmc.gart_size += adev->pm.smu_prv_buffer_size;
gmc_v8_0_vram_gtt_location(adev, &adev->gmc);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 3686e777c76c..c82d82da2c73 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -50,6 +50,7 @@
#include "athub_v1_0.h"
#include "gfxhub_v1_1.h"
#include "mmhub_v9_4.h"
+#include "mmhub_v1_7.h"
#include "umc_v6_1.h"
#include "umc_v6_0.h"
@@ -279,6 +280,47 @@ static const char *mmhub_client_ids_arcturus[][2] = {
[384][1] = "OSS",
};
+static const char *mmhub_client_ids_aldebaran[][2] = {
+ [2][0] = "MP1",
+ [3][0] = "MP0",
+ [32+1][0] = "DBGU_IO0",
+ [32+2][0] = "DBGU_IO2",
+ [32+4][0] = "MPIO",
+ [96+11][0] = "JPEG0",
+ [96+12][0] = "VCN0",
+ [96+13][0] = "VCNU0",
+ [128+11][0] = "JPEG1",
+ [128+12][0] = "VCN1",
+ [128+13][0] = "VCNU1",
+ [160+1][0] = "XDP",
+ [160+14][0] = "HDP",
+ [256+0][0] = "SDMA0",
+ [256+1][0] = "SDMA1",
+ [256+2][0] = "SDMA2",
+ [256+3][0] = "SDMA3",
+ [256+4][0] = "SDMA4",
+ [384+0][0] = "OSS",
+ [2][1] = "MP1",
+ [3][1] = "MP0",
+ [32+1][1] = "DBGU_IO0",
+ [32+2][1] = "DBGU_IO2",
+ [32+4][1] = "MPIO",
+ [96+11][1] = "JPEG0",
+ [96+12][1] = "VCN0",
+ [96+13][1] = "VCNU0",
+ [128+11][1] = "JPEG1",
+ [128+12][1] = "VCN1",
+ [128+13][1] = "VCNU1",
+ [160+1][1] = "XDP",
+ [160+14][1] = "HDP",
+ [256+0][1] = "SDMA0",
+ [256+1][1] = "SDMA1",
+ [256+2][1] = "SDMA2",
+ [256+3][1] = "SDMA3",
+ [256+4][1] = "SDMA4",
+ [384+0][1] = "OSS",
+};
+
static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
{
SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
@@ -484,7 +526,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
/* Delegate it to a different ring if the hardware hasn't
* already done it.
*/
- if (in_interrupt()) {
+ if (entry->ih == &adev->irq.ih) {
amdgpu_irq_delegate(adev, entry, 8);
return 1;
}
@@ -520,8 +562,9 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
entry->src_id, entry->ring_id, entry->vmid,
entry->pasid, task_info.process_name, task_info.tgid,
task_info.task_name, task_info.pid);
- dev_err(adev->dev, " in page starting at address 0x%012llx from client %d\n",
- addr, entry->client_id);
+ dev_err(adev->dev, " in page starting at address 0x%016llx from IH client 0x%x (%s)\n",
+ addr, entry->client_id,
+ soc15_ih_clientid_name[entry->client_id]);
if (amdgpu_sriov_vf(adev))
return 0;
@@ -568,6 +611,9 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
case CHIP_RENOIR:
mmhub_cid = mmhub_client_ids_renoir[cid][rw];
break;
+ case CHIP_ALDEBARAN:
+ mmhub_cid = mmhub_client_ids_aldebaran[cid][rw];
+ break;
default:
mmhub_cid = NULL;
break;
@@ -607,7 +653,8 @@ static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
adev->gmc.vm_fault.num_types = 1;
adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
- if (!amdgpu_sriov_vf(adev)) {
+ if (!amdgpu_sriov_vf(adev) &&
+ !adev->gmc.xgmi.connected_to_cpu) {
adev->gmc.ecc_irq.num_types = 1;
adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
}
@@ -642,6 +689,9 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
uint32_t vmhub)
{
+ if (adev->asic_type == CHIP_ALDEBARAN)
+ return false;
+
return ((vmhub == AMDGPU_MMHUB_0 ||
vmhub == AMDGPU_MMHUB_1) &&
(!amdgpu_sriov_vf(adev)) &&
@@ -1033,10 +1083,14 @@ static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
*flags &= ~AMDGPU_PTE_VALID;
}
- if (adev->asic_type == CHIP_ARCTURUS &&
+ if ((adev->asic_type == CHIP_ARCTURUS ||
+ adev->asic_type == CHIP_ALDEBARAN) &&
!(*flags & AMDGPU_PTE_SYSTEM) &&
mapping->bo_va->is_xgmi)
*flags |= AMDGPU_PTE_SNOOPED;
+
+ if (adev->asic_type == CHIP_ALDEBARAN)
+ *flags |= mapping->flags & AMDGPU_PTE_SNOOPED;
}
static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
@@ -1102,7 +1156,7 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_VG20;
adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
- adev->umc.funcs = &umc_v6_1_funcs;
+ adev->umc.ras_funcs = &umc_v6_1_ras_funcs;
break;
case CHIP_ARCTURUS:
adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
@@ -1110,7 +1164,7 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_ARCT;
adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
- adev->umc.funcs = &umc_v6_1_funcs;
+ adev->umc.ras_funcs = &umc_v6_1_ras_funcs;
break;
default:
break;
@@ -1123,12 +1177,33 @@ static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
case CHIP_ARCTURUS:
adev->mmhub.funcs = &mmhub_v9_4_funcs;
break;
+ case CHIP_ALDEBARAN:
+ adev->mmhub.funcs = &mmhub_v1_7_funcs;
+ break;
default:
adev->mmhub.funcs = &mmhub_v1_0_funcs;
break;
}
}
+static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_VEGA20:
+ adev->mmhub.ras_funcs = &mmhub_v1_0_ras_funcs;
+ break;
+ case CHIP_ARCTURUS:
+ adev->mmhub.ras_funcs = &mmhub_v9_4_ras_funcs;
+ break;
+ case CHIP_ALDEBARAN:
+ adev->mmhub.ras_funcs = &mmhub_v1_7_ras_funcs;
+ break;
+ default:
+ /* mmhub ras is not available */
+ break;
+ }
+}
+
static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev)
{
adev->gfxhub.funcs = &gfxhub_v1_0_funcs;
@@ -1138,10 +1213,21 @@ static int gmc_v9_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (adev->asic_type == CHIP_VEGA20 ||
+ adev->asic_type == CHIP_ARCTURUS)
+ adev->gmc.xgmi.supported = true;
+
+ if (adev->asic_type == CHIP_ALDEBARAN) {
+ adev->gmc.xgmi.supported = true;
+ adev->gmc.xgmi.connected_to_cpu =
+ adev->smuio.funcs->is_host_gpu_xgmi_supported(adev);
+ }
+
gmc_v9_0_set_gmc_funcs(adev);
gmc_v9_0_set_irq_funcs(adev);
gmc_v9_0_set_umc_funcs(adev);
gmc_v9_0_set_mmhub_funcs(adev);
+ gmc_v9_0_set_mmhub_ras_funcs(adev);
gmc_v9_0_set_gfxhub_funcs(adev);
adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
@@ -1174,8 +1260,9 @@ static int gmc_v9_0_late_init(void *handle)
}
}
- if (adev->mmhub.funcs && adev->mmhub.funcs->reset_ras_error_count)
- adev->mmhub.funcs->reset_ras_error_count(adev);
+ if (adev->mmhub.ras_funcs &&
+ adev->mmhub.ras_funcs->reset_ras_error_count)
+ adev->mmhub.ras_funcs->reset_ras_error_count(adev);
r = amdgpu_gmc_ras_late_init(adev);
if (r)
@@ -1194,9 +1281,13 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
/* add the xgmi offset of the physical node */
base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
- amdgpu_gmc_vram_location(adev, mc, base);
- amdgpu_gmc_gart_location(adev, mc);
- amdgpu_gmc_agp_location(adev, mc);
+ if (adev->gmc.xgmi.connected_to_cpu) {
+ amdgpu_gmc_sysvm_location(adev, mc);
+ } else {
+ amdgpu_gmc_vram_location(adev, mc, base);
+ amdgpu_gmc_gart_location(adev, mc);
+ amdgpu_gmc_agp_location(adev, mc);
+ }
/* base offset of vram pages */
adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
@@ -1223,7 +1314,8 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
- if (!(adev->flags & AMD_IS_APU)) {
+ if (!(adev->flags & AMD_IS_APU) &&
+ !adev->gmc.xgmi.connected_to_cpu) {
r = amdgpu_device_resize_fb_bar(adev);
if (r)
return r;
@@ -1232,10 +1324,28 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
#ifdef CONFIG_X86_64
- if (adev->flags & AMD_IS_APU) {
- adev->gmc.aper_base = adev->gfxhub.funcs->get_mc_fb_offset(adev);
+ /*
+ * AMD Accelerated Processing Platform (APP) supporting GPU-HOST xgmi
+ * interface can use VRAM through here as it appears system reserved
+ * memory in host address space.
+ *
+ * For APUs, VRAM is just the stolen system memory and can be accessed
+ * directly.
+ *
+ * Otherwise, use the legacy Host Data Path (HDP) through PCIe BAR.
+ */
+
+ /* check whether both host-gpu and gpu-gpu xgmi links exist */
+ if ((adev->flags & AMD_IS_APU) ||
+ (adev->gmc.xgmi.supported &&
+ adev->gmc.xgmi.connected_to_cpu)) {
+ adev->gmc.aper_base =
+ adev->gfxhub.funcs->get_mc_fb_offset(adev) +
+ adev->gmc.xgmi.physical_node_id *
+ adev->gmc.xgmi.node_segment_size;
adev->gmc.aper_size = adev->gmc.real_vram_size;
}
+
#endif
/* In case the PCI BAR is larger than the actual amount of vram */
adev->gmc.visible_vram_size = adev->gmc.aper_size;
@@ -1249,6 +1359,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
case CHIP_VEGA12: /* all engines support GPUVM */
case CHIP_VEGA20:
case CHIP_ARCTURUS:
+ case CHIP_ALDEBARAN:
default:
adev->gmc.gart_size = 512ULL << 20;
break;
@@ -1261,6 +1372,8 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
}
+ adev->gmc.gart_size += adev->pm.smu_prv_buffer_size;
+
gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
return 0;
@@ -1274,6 +1387,15 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
WARN(1, "VEGA10 PCIE GART already initialized\n");
return 0;
}
+
+ if (adev->gmc.xgmi.connected_to_cpu) {
+ adev->gmc.vmid0_page_table_depth = 1;
+ adev->gmc.vmid0_page_table_block_size = 12;
+ } else {
+ adev->gmc.vmid0_page_table_depth = 0;
+ adev->gmc.vmid0_page_table_block_size = 0;
+ }
+
/* Initialize common gart structure */
r = amdgpu_gart_init(adev);
if (r)
@@ -1281,7 +1403,16 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
adev->gart.table_size = adev->gart.num_gpu_pages * 8;
adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(MTYPE_UC) |
AMDGPU_PTE_EXECUTABLE;
- return amdgpu_gart_table_vram_alloc(adev);
+
+ r = amdgpu_gart_table_vram_alloc(adev);
+ if (r)
+ return r;
+
+ if (adev->gmc.xgmi.connected_to_cpu) {
+ r = amdgpu_gmc_pdb0_alloc(adev);
+ }
+
+ return r;
}
/**
@@ -1352,6 +1483,7 @@ static int gmc_v9_0_sw_init(void *handle)
case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RENOIR:
+ case CHIP_ALDEBARAN:
adev->num_vmhubs = 2;
@@ -1395,7 +1527,8 @@ static int gmc_v9_0_sw_init(void *handle)
if (r)
return r;
- if (!amdgpu_sriov_vf(adev)) {
+ if (!amdgpu_sriov_vf(adev) &&
+ !adev->gmc.xgmi.connected_to_cpu) {
/* interrupt sent to DF. */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
&adev->gmc.ecc_irq);
@@ -1448,7 +1581,8 @@ static int gmc_v9_0_sw_init(void *handle)
* for video processing.
*/
adev->vm_manager.first_kfd_vmid =
- adev->asic_type == CHIP_ARCTURUS ? 3 : 8;
+ (adev->asic_type == CHIP_ARCTURUS ||
+ adev->asic_type == CHIP_ALDEBARAN) ? 3 : 8;
amdgpu_vm_manager_init(adev);
@@ -1465,6 +1599,7 @@ static int gmc_v9_0_sw_fini(void *handle)
amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev);
amdgpu_gart_table_vram_free(adev);
+ amdgpu_bo_unref(&adev->gmc.pdb0_bo);
amdgpu_bo_fini(adev);
amdgpu_gart_fini(adev);
@@ -1525,10 +1660,14 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
{
int r;
+ if (adev->gmc.xgmi.connected_to_cpu)
+ amdgpu_gmc_init_pdb0(adev);
+
if (adev->gart.bo == NULL) {
dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL;
}
+
r = amdgpu_gart_table_vram_pin(adev);
if (r)
return r;
@@ -1541,9 +1680,14 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
if (r)
return r;
- DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
- (unsigned)(adev->gmc.gart_size >> 20),
- (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
+ DRM_INFO("PCIE GART of %uM enabled.\n",
+ (unsigned)(adev->gmc.gart_size >> 20));
+ if (adev->gmc.pdb0_bo)
+ DRM_INFO("PDB0 located at 0x%016llX\n",
+ (unsigned long long)amdgpu_bo_gpu_offset(adev->gmc.pdb0_bo));
+ DRM_INFO("PTB located at 0x%016llX\n",
+ (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
+
adev->gart.ready = true;
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
index e46621fed5b9..edbd35d293eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
@@ -49,6 +49,9 @@ static void hdp_v4_0_flush_hdp(struct amdgpu_device *adev,
static void hdp_v4_0_invalidate_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
+ if (adev->asic_type == CHIP_ALDEBARAN)
+ return;
+
if (!ring || !ring->funcs->emit_wreg)
WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
else
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
index 7332a320ede8..9360204da7fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
@@ -487,7 +487,7 @@ int jpeg_v1_0_sw_init(void *handle)
ring = &adev->jpeg.inst->ring_dec;
sprintf(ring->name, "jpeg_dec");
r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq,
- 0, AMDGPU_RING_PRIO_DEFAULT);
+ 0, AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
index 3b22953aa62e..de5abceced0d 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
@@ -108,7 +108,7 @@ static int jpeg_v2_0_sw_init(void *handle)
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1;
sprintf(ring->name, "jpeg_dec");
r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq,
- 0, AMDGPU_RING_PRIO_DEFAULT);
+ 0, AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
index c6724a0e0c43..83531997aeba 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
@@ -115,7 +115,7 @@ static int jpeg_v2_5_sw_init(void *handle)
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + 8 * i;
sprintf(ring->name, "jpeg_dec_%d", i);
r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst[i].irq,
- 0, AMDGPU_RING_PRIO_DEFAULT);
+ 0, AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
@@ -565,6 +565,26 @@ static const struct amd_ip_funcs jpeg_v2_5_ip_funcs = {
.set_powergating_state = jpeg_v2_5_set_powergating_state,
};
+static const struct amd_ip_funcs jpeg_v2_6_ip_funcs = {
+ .name = "jpeg_v2_6",
+ .early_init = jpeg_v2_5_early_init,
+ .late_init = NULL,
+ .sw_init = jpeg_v2_5_sw_init,
+ .sw_fini = jpeg_v2_5_sw_fini,
+ .hw_init = jpeg_v2_5_hw_init,
+ .hw_fini = jpeg_v2_5_hw_fini,
+ .suspend = jpeg_v2_5_suspend,
+ .resume = jpeg_v2_5_resume,
+ .is_idle = jpeg_v2_5_is_idle,
+ .wait_for_idle = jpeg_v2_5_wait_for_idle,
+ .check_soft_reset = NULL,
+ .pre_soft_reset = NULL,
+ .soft_reset = NULL,
+ .post_soft_reset = NULL,
+ .set_clockgating_state = jpeg_v2_5_set_clockgating_state,
+ .set_powergating_state = jpeg_v2_5_set_powergating_state,
+};
+
static const struct amdgpu_ring_funcs jpeg_v2_5_dec_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_JPEG,
.align_mask = 0xf,
@@ -595,6 +615,36 @@ static const struct amdgpu_ring_funcs jpeg_v2_5_dec_ring_vm_funcs = {
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
};
+static const struct amdgpu_ring_funcs jpeg_v2_6_dec_ring_vm_funcs = {
+ .type = AMDGPU_RING_TYPE_VCN_JPEG,
+ .align_mask = 0xf,
+ .vmhub = AMDGPU_MMHUB_0,
+ .get_rptr = jpeg_v2_5_dec_ring_get_rptr,
+ .get_wptr = jpeg_v2_5_dec_ring_get_wptr,
+ .set_wptr = jpeg_v2_5_dec_ring_set_wptr,
+ .emit_frame_size =
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
+ 8 + /* jpeg_v2_5_dec_ring_emit_vm_flush */
+ 18 + 18 + /* jpeg_v2_5_dec_ring_emit_fence x2 vm fence */
+ 8 + 16,
+ .emit_ib_size = 22, /* jpeg_v2_5_dec_ring_emit_ib */
+ .emit_ib = jpeg_v2_0_dec_ring_emit_ib,
+ .emit_fence = jpeg_v2_0_dec_ring_emit_fence,
+ .emit_vm_flush = jpeg_v2_0_dec_ring_emit_vm_flush,
+ .test_ring = amdgpu_jpeg_dec_ring_test_ring,
+ .test_ib = amdgpu_jpeg_dec_ring_test_ib,
+ .insert_nop = jpeg_v2_0_dec_ring_nop,
+ .insert_start = jpeg_v2_0_dec_ring_insert_start,
+ .insert_end = jpeg_v2_0_dec_ring_insert_end,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_jpeg_ring_begin_use,
+ .end_use = amdgpu_jpeg_ring_end_use,
+ .emit_wreg = jpeg_v2_0_dec_ring_emit_wreg,
+ .emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+};
+
static void jpeg_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
{
int i;
@@ -602,8 +652,10 @@ static void jpeg_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
if (adev->jpeg.harvest_config & (1 << i))
continue;
-
- adev->jpeg.inst[i].ring_dec.funcs = &jpeg_v2_5_dec_ring_vm_funcs;
+ if (adev->asic_type == CHIP_ARCTURUS)
+ adev->jpeg.inst[i].ring_dec.funcs = &jpeg_v2_5_dec_ring_vm_funcs;
+ else /* CHIP_ALDEBARAN */
+ adev->jpeg.inst[i].ring_dec.funcs = &jpeg_v2_6_dec_ring_vm_funcs;
adev->jpeg.inst[i].ring_dec.me = i;
DRM_INFO("JPEG(%d) JPEG decode is enabled in VM mode\n", i);
}
@@ -635,3 +687,12 @@ const struct amdgpu_ip_block_version jpeg_v2_5_ip_block =
.rev = 0,
.funcs = &jpeg_v2_5_ip_funcs,
};
+
+const struct amdgpu_ip_block_version jpeg_v2_6_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_JPEG,
+ .major = 2,
+ .minor = 6,
+ .rev = 0,
+ .funcs = &jpeg_v2_6_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.h
index 2b4087c02620..3b0aa29b9879 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.h
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.h
@@ -25,5 +25,6 @@
#define __JPEG_V2_5_H__
extern const struct amdgpu_ip_block_version jpeg_v2_5_ip_block;
+extern const struct amdgpu_ip_block_version jpeg_v2_6_ip_block;
#endif /* __JPEG_V2_5_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
index e8fbb2a0de34..de5dfcfb3859 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
@@ -94,7 +94,7 @@ static int jpeg_v3_0_sw_init(void *handle)
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1;
sprintf(ring->name, "jpeg_dec");
r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
index 7f30629f21a2..a7ec4ac89da5 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
@@ -848,7 +848,8 @@ static int mes_v10_1_ring_init(struct amdgpu_device *adev)
ring->no_scheduler = true;
sprintf(ring->name, "mes_%d.%d.%d", ring->me, ring->pipe, ring->queue);
- return amdgpu_ring_init(adev, ring, 1024, NULL, 0, AMDGPU_RING_PRIO_DEFAULT);
+ return amdgpu_ring_init(adev, ring, 1024, NULL, 0,
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
}
static int mes_v10_1_mqd_sw_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index d7b39c07de20..aa9be5612c89 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -776,10 +776,14 @@ static void mmhub_v1_0_reset_ras_error_count(struct amdgpu_device *adev)
}
}
-const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs = {
+const struct amdgpu_mmhub_ras_funcs mmhub_v1_0_ras_funcs = {
.ras_late_init = amdgpu_mmhub_ras_late_init,
+ .ras_fini = amdgpu_mmhub_ras_fini,
.query_ras_error_count = mmhub_v1_0_query_ras_error_count,
.reset_ras_error_count = mmhub_v1_0_reset_ras_error_count,
+};
+
+const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs = {
.get_fb_location = mmhub_v1_0_get_fb_location,
.init = mmhub_v1_0_init,
.gart_enable = mmhub_v1_0_gart_enable,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
index d77f5b65a618..4661b094e007 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
@@ -24,5 +24,6 @@
#define __MMHUB_V1_0_H__
extern const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs;
+extern const struct amdgpu_mmhub_ras_funcs mmhub_v1_0_ras_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
new file mode 100644
index 000000000000..7977a7879b32
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
@@ -0,0 +1,1333 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "amdgpu_ras.h"
+#include "mmhub_v1_7.h"
+
+#include "mmhub/mmhub_1_7_offset.h"
+#include "mmhub/mmhub_1_7_sh_mask.h"
+#include "vega10_enum.h"
+
+#include "soc15_common.h"
+#include "soc15.h"
+
+#define regVM_L2_CNTL3_DEFAULT 0x80100007
+#define regVM_L2_CNTL4_DEFAULT 0x000000c1
+
+static u64 mmhub_v1_7_get_fb_location(struct amdgpu_device *adev)
+{
+ u64 base = RREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_BASE);
+ u64 top = RREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_TOP);
+
+ base &= MC_VM_FB_LOCATION_BASE__FB_BASE_MASK;
+ base <<= 24;
+
+ top &= MC_VM_FB_LOCATION_TOP__FB_TOP_MASK;
+ top <<= 24;
+
+ adev->gmc.fb_start = base;
+ adev->gmc.fb_end = top;
+ adev->gmc.fb_start_original = base;
+ adev->gmc.fb_end_original = top;
+
+ return base;
+}
+
+static void mmhub_v1_7_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+ uint64_t page_table_base)
+{
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+
+ WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+ hub->ctx_addr_distance * vmid, lower_32_bits(page_table_base));
+
+ WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
+ hub->ctx_addr_distance * vmid, upper_32_bits(page_table_base));
+}
+
+static void mmhub_v1_7_init_gart_aperture_regs(struct amdgpu_device *adev)
+{
+ uint64_t pt_base;
+
+ if (adev->gmc.pdb0_bo)
+ pt_base = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo);
+ else
+ pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
+
+ mmhub_v1_7_setup_vm_pt_regs(adev, 0, pt_base);
+
+ /* If use GART for FB translation, vmid0 page table covers both
+ * vram and system memory (gart)
+ */
+ if (adev->gmc.pdb0_bo) {
+ WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
+ (u32)(adev->gmc.fb_start >> 12));
+ WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
+ (u32)(adev->gmc.fb_start >> 44));
+
+ WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
+ (u32)(adev->gmc.gart_end >> 12));
+ WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
+ (u32)(adev->gmc.gart_end >> 44));
+
+ } else {
+ WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
+ (u32)(adev->gmc.gart_start >> 12));
+ WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
+ (u32)(adev->gmc.gart_start >> 44));
+
+ WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
+ (u32)(adev->gmc.gart_end >> 12));
+ WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
+ (u32)(adev->gmc.gart_end >> 44));
+ }
+}
+
+static void mmhub_v1_7_init_system_aperture_regs(struct amdgpu_device *adev)
+{
+ uint64_t value;
+ uint32_t tmp;
+
+ /* Program the AGP BAR */
+ WREG32_SOC15(MMHUB, 0, regMC_VM_AGP_BASE, 0);
+ WREG32_SOC15(MMHUB, 0, regMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
+ WREG32_SOC15(MMHUB, 0, regMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
+
+ /* Program the system aperture low logical page number. */
+ WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
+
+ WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
+
+ /* In the case squeezing vram into GART aperture, we don't use
+ * FB aperture and AGP aperture. Disable them.
+ */
+ if (adev->gmc.pdb0_bo) {
+ WREG32_SOC15(MMHUB, 0, regMC_VM_AGP_BOT, 0xFFFFFF);
+ WREG32_SOC15(MMHUB, 0, regMC_VM_AGP_TOP, 0);
+ WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_TOP, adev->gmc.fb_end_original >> 24);
+ WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_BASE, adev->gmc.fb_start_original >> 24);
+ WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_LOW_ADDR, adev->gmc.fb_start_original >> 18);
+ WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, adev->gmc.fb_end_original >> 18);
+ }
+ if (amdgpu_sriov_vf(adev))
+ return;
+
+ /* Set default page address. */
+ value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
+ adev->vm_manager.vram_base_offset;
+ WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
+ (u32)(value >> 12));
+ WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
+ (u32)(value >> 44));
+
+ /* Program "protection fault". */
+ WREG32_SOC15(MMHUB, 0, regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
+ (u32)(adev->dummy_page_addr >> 12));
+ WREG32_SOC15(MMHUB, 0, regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
+ (u32)((u64)adev->dummy_page_addr >> 44));
+
+ tmp = RREG32_SOC15(MMHUB, 0, regVM_L2_PROTECTION_FAULT_CNTL2);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2,
+ ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
+ WREG32_SOC15(MMHUB, 0, regVM_L2_PROTECTION_FAULT_CNTL2, tmp);
+}
+
+static void mmhub_v1_7_init_tlb_regs(struct amdgpu_device *adev)
+{
+ uint32_t tmp;
+
+ /* Setup TLB control */
+ tmp = RREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL);
+
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ ENABLE_ADVANCED_DRIVER_MODEL, 1);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ MTYPE, MTYPE_UC);/* XXX for emulation. */
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
+
+ WREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL, tmp);
+}
+
+static void mmhub_v1_7_init_cache_regs(struct amdgpu_device *adev)
+{
+ uint32_t tmp;
+
+ if (amdgpu_sriov_vf(adev))
+ return;
+
+ /* Setup L2 cache */
+ tmp = RREG32_SOC15(MMHUB, 0, regVM_L2_CNTL);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
+ /* XXX for emulation, Refer to closed source code.*/
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
+ 0);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
+ WREG32_SOC15(MMHUB, 0, regVM_L2_CNTL, tmp);
+
+ tmp = RREG32_SOC15(MMHUB, 0, regVM_L2_CNTL2);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
+ WREG32_SOC15(MMHUB, 0, regVM_L2_CNTL2, tmp);
+
+ tmp = regVM_L2_CNTL3_DEFAULT;
+ if (adev->gmc.translate_further) {
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
+ L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
+ } else {
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
+ L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
+ }
+ WREG32_SOC15(MMHUB, 0, regVM_L2_CNTL3, tmp);
+
+ tmp = regVM_L2_CNTL4_DEFAULT;
+ if (adev->gmc.xgmi.connected_to_cpu) {
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4,
+ VMC_TAP_PDE_REQUEST_PHYSICAL, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4,
+ VMC_TAP_PTE_REQUEST_PHYSICAL, 1);
+ } else {
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4,
+ VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4,
+ VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
+ }
+ WREG32_SOC15(MMHUB, 0, regVM_L2_CNTL4, tmp);
+}
+
+static void mmhub_v1_7_enable_system_domain(struct amdgpu_device *adev)
+{
+ uint32_t tmp;
+
+ tmp = RREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_CNTL);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH,
+ adev->gmc.vmid0_page_table_depth);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_BLOCK_SIZE,
+ adev->gmc.vmid0_page_table_block_size);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL,
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
+ WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_CNTL, tmp);
+}
+
+static void mmhub_v1_7_disable_identity_aperture(struct amdgpu_device *adev)
+{
+ if (amdgpu_sriov_vf(adev))
+ return;
+
+ WREG32_SOC15(MMHUB, 0, regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
+ 0XFFFFFFFF);
+ WREG32_SOC15(MMHUB, 0, regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
+ 0x0000000F);
+
+ WREG32_SOC15(MMHUB, 0,
+ regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32, 0);
+ WREG32_SOC15(MMHUB, 0,
+ regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32, 0);
+
+ WREG32_SOC15(MMHUB, 0, regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32,
+ 0);
+ WREG32_SOC15(MMHUB, 0, regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32,
+ 0);
+}
+
+static void mmhub_v1_7_setup_vmid_config(struct amdgpu_device *adev)
+{
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ unsigned num_level, block_size;
+ uint32_t tmp;
+ int i;
+
+ num_level = adev->vm_manager.num_level;
+ block_size = adev->vm_manager.block_size;
+ if (adev->gmc.translate_further)
+ num_level -= 1;
+ else
+ block_size -= 9;
+
+ for (i = 0; i <= 14; i++) {
+ tmp = RREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT1_CNTL, i);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
+ num_level);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
+ 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ PAGE_TABLE_BLOCK_SIZE,
+ block_size);
+ /* Send no-retry XNACK on fault to suppress VM fault storm. */
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
+ !adev->gmc.noretry);
+ WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT1_CNTL,
+ i * hub->ctx_distance, tmp);
+ WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
+ i * hub->ctx_addr_distance, 0);
+ WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
+ i * hub->ctx_addr_distance, 0);
+ WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
+ i * hub->ctx_addr_distance,
+ lower_32_bits(adev->vm_manager.max_pfn - 1));
+ WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
+ i * hub->ctx_addr_distance,
+ upper_32_bits(adev->vm_manager.max_pfn - 1));
+ }
+}
+
+static void mmhub_v1_7_program_invalidation(struct amdgpu_device *adev)
+{
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ unsigned i;
+
+ for (i = 0; i < 18; ++i) {
+ WREG32_SOC15_OFFSET(MMHUB, 0, regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
+ i * hub->eng_addr_distance, 0xffffffff);
+ WREG32_SOC15_OFFSET(MMHUB, 0, regVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
+ i * hub->eng_addr_distance, 0x1f);
+ }
+}
+
+static int mmhub_v1_7_gart_enable(struct amdgpu_device *adev)
+{
+ if (amdgpu_sriov_vf(adev)) {
+ /*
+ * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
+ * VF copy registers so vbios post doesn't program them, for
+ * SRIOV driver need to program them
+ */
+ WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_BASE,
+ adev->gmc.vram_start >> 24);
+ WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_TOP,
+ adev->gmc.vram_end >> 24);
+ }
+
+ /* GART Enable. */
+ mmhub_v1_7_init_gart_aperture_regs(adev);
+ mmhub_v1_7_init_system_aperture_regs(adev);
+ mmhub_v1_7_init_tlb_regs(adev);
+ mmhub_v1_7_init_cache_regs(adev);
+
+ mmhub_v1_7_enable_system_domain(adev);
+ mmhub_v1_7_disable_identity_aperture(adev);
+ mmhub_v1_7_setup_vmid_config(adev);
+ mmhub_v1_7_program_invalidation(adev);
+
+ return 0;
+}
+
+static void mmhub_v1_7_gart_disable(struct amdgpu_device *adev)
+{
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ u32 tmp;
+ u32 i;
+
+ /* Disable all tables */
+ for (i = 0; i < 16; i++)
+ WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT0_CNTL,
+ i * hub->ctx_distance, 0);
+
+ /* Setup TLB control */
+ tmp = RREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
+ tmp = REG_SET_FIELD(tmp,
+ MC_VM_MX_L1_TLB_CNTL,
+ ENABLE_ADVANCED_DRIVER_MODEL,
+ 0);
+ WREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL, tmp);
+
+ if (!amdgpu_sriov_vf(adev)) {
+ /* Setup L2 cache */
+ tmp = RREG32_SOC15(MMHUB, 0, regVM_L2_CNTL);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
+ WREG32_SOC15(MMHUB, 0, regVM_L2_CNTL, tmp);
+ WREG32_SOC15(MMHUB, 0, regVM_L2_CNTL3, 0);
+ }
+}
+
+/**
+ * mmhub_v1_7_set_fault_enable_default - update GART/VM fault handling
+ *
+ * @adev: amdgpu_device pointer
+ * @value: true redirects VM faults to the default page
+ */
+static void mmhub_v1_7_set_fault_enable_default(struct amdgpu_device *adev, bool value)
+{
+ u32 tmp;
+
+ if (amdgpu_sriov_vf(adev))
+ return;
+
+ tmp = RREG32_SOC15(MMHUB, 0, regVM_L2_PROTECTION_FAULT_CNTL);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp,
+ VM_L2_PROTECTION_FAULT_CNTL,
+ TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
+ value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ if (!value) {
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ CRASH_ON_NO_RETRY_FAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ CRASH_ON_RETRY_FAULT, 1);
+ }
+
+ WREG32_SOC15(MMHUB, 0, regVM_L2_PROTECTION_FAULT_CNTL, tmp);
+}
+
+static void mmhub_v1_7_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+
+ hub->ctx0_ptb_addr_lo32 =
+ SOC15_REG_OFFSET(MMHUB, 0,
+ regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
+ hub->ctx0_ptb_addr_hi32 =
+ SOC15_REG_OFFSET(MMHUB, 0,
+ regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
+ hub->vm_inv_eng0_req =
+ SOC15_REG_OFFSET(MMHUB, 0, regVM_INVALIDATE_ENG0_REQ);
+ hub->vm_inv_eng0_ack =
+ SOC15_REG_OFFSET(MMHUB, 0, regVM_INVALIDATE_ENG0_ACK);
+ hub->vm_context0_cntl =
+ SOC15_REG_OFFSET(MMHUB, 0, regVM_CONTEXT0_CNTL);
+ hub->vm_l2_pro_fault_status =
+ SOC15_REG_OFFSET(MMHUB, 0, regVM_L2_PROTECTION_FAULT_STATUS);
+ hub->vm_l2_pro_fault_cntl =
+ SOC15_REG_OFFSET(MMHUB, 0, regVM_L2_PROTECTION_FAULT_CNTL);
+
+ hub->ctx_distance = regVM_CONTEXT1_CNTL - regVM_CONTEXT0_CNTL;
+ hub->ctx_addr_distance = regVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 -
+ regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
+ hub->eng_distance = regVM_INVALIDATE_ENG1_REQ - regVM_INVALIDATE_ENG0_REQ;
+ hub->eng_addr_distance = regVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
+ regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
+
+}
+
+static void mmhub_v1_7_update_medium_grain_clock_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t def, data, def1, data1, def2 = 0, data2 = 0;
+
+ def = data = RREG32_SOC15(MMHUB, 0, regATC_L2_MISC_CG);
+
+ def1 = data1 = RREG32_SOC15(MMHUB, 0, regDAGB0_CNTL_MISC2);
+ def2 = data2 = RREG32_SOC15(MMHUB, 0, regDAGB1_CNTL_MISC2);
+
+ if (enable) {
+ data |= ATC_L2_MISC_CG__ENABLE_MASK;
+
+ data1 &= ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
+
+ data2 &= ~(DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
+ DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
+ DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
+ DAGB1_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
+ DAGB1_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
+ DAGB1_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
+ } else {
+ data &= ~ATC_L2_MISC_CG__ENABLE_MASK;
+
+ data1 |= (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
+
+ data2 |= (DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
+ DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
+ DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
+ DAGB1_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
+ DAGB1_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
+ DAGB1_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
+ }
+
+ if (def != data)
+ WREG32_SOC15(MMHUB, 0, regATC_L2_MISC_CG, data);
+
+ if (def1 != data1)
+ WREG32_SOC15(MMHUB, 0, regDAGB0_CNTL_MISC2, data1);
+
+ if (def2 != data2)
+ WREG32_SOC15(MMHUB, 0, regDAGB1_CNTL_MISC2, data2);
+}
+
+static void mmhub_v1_7_update_medium_grain_light_sleep(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t def, data;
+
+ def = data = RREG32_SOC15(MMHUB, 0, regATC_L2_MISC_CG);
+
+ if (enable)
+ data |= ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
+ else
+ data &= ~ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
+
+ if (def != data)
+ WREG32_SOC15(MMHUB, 0, regATC_L2_MISC_CG, data);
+}
+
+static int mmhub_v1_7_set_clockgating(struct amdgpu_device *adev,
+ enum amd_clockgating_state state)
+{
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ /* Change state only if MCCG support is enabled through driver */
+ if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)
+ mmhub_v1_7_update_medium_grain_clock_gating(adev,
+ state == AMD_CG_STATE_GATE);
+
+ /* Change state only if LS support is enabled through driver */
+ if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)
+ mmhub_v1_7_update_medium_grain_light_sleep(adev,
+ state == AMD_CG_STATE_GATE);
+
+ return 0;
+}
+
+static void mmhub_v1_7_get_clockgating(struct amdgpu_device *adev, u32 *flags)
+{
+ int data, data1;
+
+ if (amdgpu_sriov_vf(adev))
+ *flags = 0;
+
+ data = RREG32_SOC15(MMHUB, 0, regATC_L2_MISC_CG);
+
+ data1 = RREG32_SOC15(MMHUB, 0, regDAGB0_CNTL_MISC2);
+
+ /* AMD_CG_SUPPORT_MC_MGCG */
+ if ((data & ATC_L2_MISC_CG__ENABLE_MASK) &&
+ !(data1 & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK)))
+ *flags |= AMD_CG_SUPPORT_MC_MGCG;
+
+ /* AMD_CG_SUPPORT_MC_LS */
+ if (data & ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
+ *flags |= AMD_CG_SUPPORT_MC_LS;
+}
+
+static const struct soc15_ras_field_entry mmhub_v1_7_ras_fields[] = {
+ /* MMHUB Range 0 */
+ { "MMEA0_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA0_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA0_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA0_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, RRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA0_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, WRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA0_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, IOWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, IOWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA0_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA0_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA0_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA0_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA0_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA0_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA0_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA0_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA0_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA0_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D0MEM_DED_COUNT),
+ },
+ { "MMEA0_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D1MEM_DED_COUNT),
+ },
+ { "MMEA0_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D2MEM_DED_COUNT),
+ },
+ { "MMEA0_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D3MEM_DED_COUNT),
+ },
+ { "MMEA0_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA0_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA0_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA0_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA0_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA0_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA0_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA0_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA0_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA0_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA0_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA0_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
+ },
+
+ /* MMHUB Range 1 */
+ { "MMEA1_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA1_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA1_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA1_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, RRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA1_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, WRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA1_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, IOWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, IOWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA1_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA1_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA1_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA1_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA1_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA1_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA1_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA1_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA1_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA1_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D0MEM_DED_COUNT),
+ },
+ { "MMEA1_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D1MEM_DED_COUNT),
+ },
+ { "MMEA1_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D2MEM_DED_COUNT),
+ },
+ { "MMEA1_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D3MEM_DED_COUNT),
+ },
+ { "MMEA1_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA1_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA1_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA1_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA1_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA1_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA1_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA1_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA1_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA1_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA1_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA1_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
+ },
+
+ /* MMHAB Range 2*/
+ { "MMEA2_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA2_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA2_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA2_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, RRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA2_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, WRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA2_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, IOWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, IOWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA2_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA2_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA2_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA2_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA2_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA2_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA2_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA2_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA2_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA2_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D0MEM_DED_COUNT),
+ },
+ { "MMEA2_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D1MEM_DED_COUNT),
+ },
+ { "MMEA2_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D2MEM_DED_COUNT),
+ },
+ { "MMEA2_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D3MEM_DED_COUNT),
+ },
+ { "MMEA2_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA2_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA2_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA2_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA2_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA2_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA2_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA2_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA2_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA2_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA2_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA2_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
+ },
+
+ /* MMHUB Rang 3 */
+ { "MMEA3_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA3_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA3_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA3_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, RRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA3_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, WRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA3_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, IOWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, IOWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA3_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA3_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA3_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA3_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA3_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA3_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA3_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA3_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA3_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA3_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D0MEM_DED_COUNT),
+ },
+ { "MMEA3_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D1MEM_DED_COUNT),
+ },
+ { "MMEA3_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D2MEM_DED_COUNT),
+ },
+ { "MMEA3_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D3MEM_DED_COUNT),
+ },
+ { "MMEA3_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA3_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA3_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA3_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA3_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA3_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA3_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA3_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA3_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA3_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA3_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA3_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
+ },
+
+ /* MMHUB Range 4 */
+ { "MMEA4_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA4_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA4_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA4_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, RRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA4_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, WRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA4_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, IOWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, IOWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA4_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA4_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA4_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA4_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA4_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA4_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA4_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA4_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA4_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA4_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D0MEM_DED_COUNT),
+ },
+ { "MMEA4_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D1MEM_DED_COUNT),
+ },
+ { "MMEA4_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D2MEM_DED_COUNT),
+ },
+ { "MMEA4_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D3MEM_DED_COUNT),
+ },
+ { "MMEA4_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA4_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA4_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA4_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA4_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA4_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA4_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA4_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA4_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA4_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA4_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA4_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
+ },
+
+ /* MMHUAB Range 5 */
+ { "MMEA5_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA5_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA5_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA5_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, RRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA5_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, WRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA5_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, IOWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, IOWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA5_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA5_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA5_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA5_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA5_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA5_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA5_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA5_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA5_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA5_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D0MEM_DED_COUNT),
+ },
+ { "MMEA5_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D1MEM_DED_COUNT),
+ },
+ { "MMEA5_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D2MEM_DED_COUNT),
+ },
+ { "MMEA5_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D3MEM_DED_COUNT),
+ },
+ { "MMEA5_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA5_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA5_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA5_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA5_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA5_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA5_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA5_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA5_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA5_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA5_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA5_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
+ },
+};
+
+static const struct soc15_reg_entry mmhub_v1_7_edc_cnt_regs[] = {
+ { SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT2), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT3), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT2), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT3), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT2), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT3), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT2), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT3), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT2), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT3), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT2), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT3), 0, 0, 0 },
+};
+
+static int mmhub_v1_7_get_ras_error_count(struct amdgpu_device *adev,
+ const struct soc15_reg_entry *reg,
+ uint32_t value,
+ uint32_t *sec_count,
+ uint32_t *ded_count)
+{
+ uint32_t i;
+ uint32_t sec_cnt, ded_cnt;
+
+ for (i = 0; i < ARRAY_SIZE(mmhub_v1_7_ras_fields); i++) {
+ if(mmhub_v1_7_ras_fields[i].reg_offset != reg->reg_offset)
+ continue;
+
+ sec_cnt = (value &
+ mmhub_v1_7_ras_fields[i].sec_count_mask) >>
+ mmhub_v1_7_ras_fields[i].sec_count_shift;
+ if (sec_cnt) {
+ dev_info(adev->dev, "MMHUB SubBlock %s, SEC %d\n",
+ mmhub_v1_7_ras_fields[i].name,
+ sec_cnt);
+ *sec_count += sec_cnt;
+ }
+
+ ded_cnt = (value &
+ mmhub_v1_7_ras_fields[i].ded_count_mask) >>
+ mmhub_v1_7_ras_fields[i].ded_count_shift;
+ if (ded_cnt) {
+ dev_info(adev->dev, "MMHUB SubBlock %s, DED %d\n",
+ mmhub_v1_7_ras_fields[i].name,
+ ded_cnt);
+ *ded_count += ded_cnt;
+ }
+ }
+
+ return 0;
+}
+
+static void mmhub_v1_7_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+ uint32_t sec_count = 0, ded_count = 0;
+ uint32_t i;
+ uint32_t reg_value;
+
+ err_data->ue_count = 0;
+ err_data->ce_count = 0;
+
+ for (i = 0; i < ARRAY_SIZE(mmhub_v1_7_edc_cnt_regs); i++) {
+ reg_value =
+ RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_7_edc_cnt_regs[i]));
+ if (reg_value)
+ mmhub_v1_7_get_ras_error_count(adev, &mmhub_v1_7_edc_cnt_regs[i],
+ reg_value, &sec_count, &ded_count);
+ }
+
+ err_data->ce_count += sec_count;
+ err_data->ue_count += ded_count;
+}
+
+static void mmhub_v1_7_reset_ras_error_count(struct amdgpu_device *adev)
+{
+ uint32_t i;
+
+ /* write 0 to reset the edc counters */
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) {
+ for (i = 0; i < ARRAY_SIZE(mmhub_v1_7_edc_cnt_regs); i++)
+ WREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_7_edc_cnt_regs[i]), 0);
+ }
+}
+
+static const struct soc15_reg_entry mmhub_v1_7_err_status_regs[] = {
+ { SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_ERR_STATUS), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_ERR_STATUS), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_ERR_STATUS), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_ERR_STATUS), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_ERR_STATUS), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_ERR_STATUS), 0, 0, 0 },
+};
+
+static void mmhub_v1_7_query_ras_error_status(struct amdgpu_device *adev)
+{
+ int i;
+ uint32_t reg_value;
+
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(mmhub_v1_7_err_status_regs); i++) {
+ reg_value =
+ RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_7_err_status_regs[i]));
+ if (reg_value)
+ dev_warn(adev->dev, "MMHUB EA err detected at instance: %d, status: 0x%x!\n",
+ i, reg_value);
+ }
+}
+
+const struct amdgpu_mmhub_ras_funcs mmhub_v1_7_ras_funcs = {
+ .ras_late_init = amdgpu_mmhub_ras_late_init,
+ .ras_fini = amdgpu_mmhub_ras_fini,
+ .query_ras_error_count = mmhub_v1_7_query_ras_error_count,
+ .reset_ras_error_count = mmhub_v1_7_reset_ras_error_count,
+ .query_ras_error_status = mmhub_v1_7_query_ras_error_status,
+};
+
+const struct amdgpu_mmhub_funcs mmhub_v1_7_funcs = {
+ .get_fb_location = mmhub_v1_7_get_fb_location,
+ .init = mmhub_v1_7_init,
+ .gart_enable = mmhub_v1_7_gart_enable,
+ .set_fault_enable_default = mmhub_v1_7_set_fault_enable_default,
+ .gart_disable = mmhub_v1_7_gart_disable,
+ .set_clockgating = mmhub_v1_7_set_clockgating,
+ .get_clockgating = mmhub_v1_7_get_clockgating,
+ .setup_vm_pt_regs = mmhub_v1_7_setup_vm_pt_regs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.h
new file mode 100644
index 000000000000..a7f9dfc24697
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __MMHUB_V1_7_H__
+#define __MMHUB_V1_7_H__
+
+extern const struct amdgpu_mmhub_funcs mmhub_v1_7_funcs;
+extern const struct amdgpu_mmhub_ras_funcs mmhub_v1_7_ras_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
index f107385faba2..da7edd1ed6b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
@@ -689,7 +689,6 @@ static void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u32 *flags)
}
const struct amdgpu_mmhub_funcs mmhub_v2_0_funcs = {
- .ras_late_init = amdgpu_mmhub_ras_late_init,
.init = mmhub_v2_0_init,
.gart_enable = mmhub_v2_0_gart_enable,
.set_fault_enable_default = mmhub_v2_0_set_fault_enable_default,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
index ab9be5ad5a5f..1141c37432f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
@@ -616,7 +616,6 @@ static void mmhub_v2_3_get_clockgating(struct amdgpu_device *adev, u32 *flags)
}
const struct amdgpu_mmhub_funcs mmhub_v2_3_funcs = {
- .ras_late_init = amdgpu_mmhub_ras_late_init,
.init = mmhub_v2_3_init,
.gart_enable = mmhub_v2_3_gart_enable,
.set_fault_enable_default = mmhub_v2_3_set_fault_enable_default,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
index 4a31737b6bb0..0cffa820ea6e 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
@@ -1652,10 +1652,15 @@ static void mmhub_v9_4_query_ras_error_status(struct amdgpu_device *adev)
}
}
-const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs = {
+const struct amdgpu_mmhub_ras_funcs mmhub_v9_4_ras_funcs = {
.ras_late_init = amdgpu_mmhub_ras_late_init,
+ .ras_fini = amdgpu_mmhub_ras_fini,
.query_ras_error_count = mmhub_v9_4_query_ras_error_count,
.reset_ras_error_count = mmhub_v9_4_reset_ras_error_count,
+ .query_ras_error_status = mmhub_v9_4_query_ras_error_status,
+};
+
+const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs = {
.get_fb_location = mmhub_v9_4_get_fb_location,
.init = mmhub_v9_4_init,
.gart_enable = mmhub_v9_4_gart_enable,
@@ -1664,5 +1669,4 @@ const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs = {
.set_clockgating = mmhub_v9_4_set_clockgating,
.get_clockgating = mmhub_v9_4_get_clockgating,
.setup_vm_pt_regs = mmhub_v9_4_setup_vm_pt_regs,
- .query_ras_error_status = mmhub_v9_4_query_ras_error_status,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h
index 92404a8f66f3..90436efa92ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h
@@ -24,5 +24,6 @@
#define __MMHUB_V9_4_H__
extern const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs;
+extern const struct amdgpu_mmhub_ras_funcs mmhub_v9_4_ras_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index 4bc1d1434065..af44aad78171 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -52,6 +52,20 @@
#define BIF_MMSCH1_DOORBELL_RANGE__OFFSET_MASK 0x00000FFCL
#define BIF_MMSCH1_DOORBELL_RANGE__SIZE_MASK 0x001F0000L
+#define BIF_MMSCH1_DOORBELL_RANGE__OFFSET_MASK 0x00000FFCL
+#define BIF_MMSCH1_DOORBELL_RANGE__SIZE_MASK 0x001F0000L
+
+#define mmBIF_MMSCH1_DOORBELL_RANGE_ALDE 0x01d8
+#define mmBIF_MMSCH1_DOORBELL_RANGE_ALDE_BASE_IDX 2
+//BIF_MMSCH1_DOORBELL_ALDE_RANGE
+#define BIF_MMSCH1_DOORBELL_RANGE_ALDE__OFFSET__SHIFT 0x2
+#define BIF_MMSCH1_DOORBELL_RANGE_ALDE__SIZE__SHIFT 0x10
+#define BIF_MMSCH1_DOORBELL_RANGE_ALDE__OFFSET_MASK 0x00000FFCL
+#define BIF_MMSCH1_DOORBELL_RANGE_ALDE__SIZE_MASK 0x001F0000L
+
+#define mmRCC_DEV0_EPF0_STRAP0_ALDE 0x0015
+#define mmRCC_DEV0_EPF0_STRAP0_ALDE_BASE_IDX 2
+
static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status);
@@ -65,7 +79,12 @@ static void nbio_v7_4_remap_hdp_registers(struct amdgpu_device *adev)
static u32 nbio_v7_4_get_rev_id(struct amdgpu_device *adev)
{
- u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
+ u32 tmp;
+
+ if (adev->asic_type == CHIP_ALDEBARAN)
+ tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0_ALDE);
+ else
+ tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
tmp &= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK;
tmp >>= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT;
@@ -92,10 +111,10 @@ static void nbio_v7_4_sdma_doorbell_range(struct amdgpu_device *adev, int instan
{
u32 reg, doorbell_range;
- if (instance < 2)
+ if (instance < 2) {
reg = instance +
SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE);
- else
+ } else {
/*
* These registers address of SDMA2~7 is not consecutive
* from SDMA0~1. Need plus 4 dwords offset.
@@ -103,9 +122,19 @@ static void nbio_v7_4_sdma_doorbell_range(struct amdgpu_device *adev, int instan
* BIF_SDMA0_DOORBELL_RANGE: 0x3bc0
* BIF_SDMA1_DOORBELL_RANGE: 0x3bc4
* BIF_SDMA2_DOORBELL_RANGE: 0x3bd8
++ * BIF_SDMA4_DOORBELL_RANGE:
++ * ARCTURUS: 0x3be0
++ * ALDEBARAN: 0x3be4
*/
- reg = instance + 0x4 +
- SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE);
+ if (adev->asic_type == CHIP_ALDEBARAN && instance == 4)
+ reg = instance + 0x4 + 0x1 +
+ SOC15_REG_OFFSET(NBIO, 0,
+ mmBIF_SDMA0_DOORBELL_RANGE);
+ else
+ reg = instance + 0x4 +
+ SOC15_REG_OFFSET(NBIO, 0,
+ mmBIF_SDMA0_DOORBELL_RANGE);
+ }
doorbell_range = RREG32(reg);
@@ -124,9 +153,12 @@ static void nbio_v7_4_vcn_doorbell_range(struct amdgpu_device *adev, bool use_do
u32 reg;
u32 doorbell_range;
- if (instance)
- reg = SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH1_DOORBELL_RANGE);
- else
+ if (instance) {
+ if (adev->asic_type == CHIP_ALDEBARAN)
+ reg = SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH1_DOORBELL_RANGE_ALDE);
+ else
+ reg = SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH1_DOORBELL_RANGE);
+ } else
reg = SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH0_DOORBELL_RANGE);
doorbell_range = RREG32(reg);
@@ -525,6 +557,16 @@ static void nbio_v7_4_enable_doorbell_interrupt(struct amdgpu_device *adev,
DOORBELL_INTERRUPT_DISABLE, enable ? 0 : 1);
}
+const struct amdgpu_nbio_ras_funcs nbio_v7_4_ras_funcs = {
+ .handle_ras_controller_intr_no_bifring = nbio_v7_4_handle_ras_controller_intr_no_bifring,
+ .handle_ras_err_event_athub_intr_no_bifring = nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring,
+ .init_ras_controller_interrupt = nbio_v7_4_init_ras_controller_interrupt,
+ .init_ras_err_event_athub_interrupt = nbio_v7_4_init_ras_err_event_athub_interrupt,
+ .query_ras_error_count = nbio_v7_4_query_ras_error_count,
+ .ras_late_init = amdgpu_nbio_ras_late_init,
+ .ras_fini = amdgpu_nbio_ras_fini,
+};
+
const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
.get_hdp_flush_req_offset = nbio_v7_4_get_hdp_flush_req_offset,
.get_hdp_flush_done_offset = nbio_v7_4_get_hdp_flush_done_offset,
@@ -545,10 +587,4 @@ const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
.ih_control = nbio_v7_4_ih_control,
.init_registers = nbio_v7_4_init_registers,
.remap_hdp_registers = nbio_v7_4_remap_hdp_registers,
- .handle_ras_controller_intr_no_bifring = nbio_v7_4_handle_ras_controller_intr_no_bifring,
- .handle_ras_err_event_athub_intr_no_bifring = nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring,
- .init_ras_controller_interrupt = nbio_v7_4_init_ras_controller_interrupt,
- .init_ras_err_event_athub_interrupt = nbio_v7_4_init_ras_err_event_athub_interrupt,
- .query_ras_error_count = nbio_v7_4_query_ras_error_count,
- .ras_late_init = amdgpu_nbio_ras_late_init,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h
index b1ac82872752..b8216581ec8d 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h
@@ -28,5 +28,6 @@
extern const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg;
extern const struct amdgpu_nbio_funcs nbio_v7_4_funcs;
+extern const struct amdgpu_nbio_ras_funcs nbio_v7_4_ras_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index c625c5d8ed89..46d4bbabce75 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -25,6 +25,8 @@
#include <linux/module.h>
#include <linux/pci.h>
+#include <drm/amdgpu_drm.h>
+
#include "amdgpu.h"
#include "amdgpu_atombios.h"
#include "amdgpu_ih.h"
@@ -32,7 +34,6 @@
#include "amdgpu_vce.h"
#include "amdgpu_ucode.h"
#include "amdgpu_psp.h"
-#include "amdgpu_smu.h"
#include "atom.h"
#include "amd_pcie.h"
@@ -65,6 +66,184 @@
static const struct amd_ip_funcs nv_common_ip_funcs;
+/* Navi */
+static const struct amdgpu_video_codec_info nv_video_codecs_encode_array[] =
+{
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
+ .max_width = 4096,
+ .max_height = 2304,
+ .max_pixels_per_frame = 4096 * 2304,
+ .max_level = 0,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
+ .max_width = 4096,
+ .max_height = 2304,
+ .max_pixels_per_frame = 4096 * 2304,
+ .max_level = 0,
+ },
+};
+
+static const struct amdgpu_video_codecs nv_video_codecs_encode =
+{
+ .codec_count = ARRAY_SIZE(nv_video_codecs_encode_array),
+ .codec_array = nv_video_codecs_encode_array,
+};
+
+/* Navi1x */
+static const struct amdgpu_video_codec_info nv_video_codecs_decode_array[] =
+{
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 3,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 5,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 52,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 4,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
+ .max_width = 8192,
+ .max_height = 4352,
+ .max_pixels_per_frame = 8192 * 4352,
+ .max_level = 186,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 0,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9,
+ .max_width = 8192,
+ .max_height = 4352,
+ .max_pixels_per_frame = 8192 * 4352,
+ .max_level = 0,
+ },
+};
+
+static const struct amdgpu_video_codecs nv_video_codecs_decode =
+{
+ .codec_count = ARRAY_SIZE(nv_video_codecs_decode_array),
+ .codec_array = nv_video_codecs_decode_array,
+};
+
+/* Sienna Cichlid */
+static const struct amdgpu_video_codec_info sc_video_codecs_decode_array[] =
+{
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 3,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 5,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 52,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 4,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
+ .max_width = 8192,
+ .max_height = 4352,
+ .max_pixels_per_frame = 8192 * 4352,
+ .max_level = 186,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 0,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9,
+ .max_width = 8192,
+ .max_height = 4352,
+ .max_pixels_per_frame = 8192 * 4352,
+ .max_level = 0,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1,
+ .max_width = 8192,
+ .max_height = 4352,
+ .max_pixels_per_frame = 8192 * 4352,
+ .max_level = 0,
+ },
+};
+
+static const struct amdgpu_video_codecs sc_video_codecs_decode =
+{
+ .codec_count = ARRAY_SIZE(sc_video_codecs_decode_array),
+ .codec_array = sc_video_codecs_decode_array,
+};
+
+static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
+ const struct amdgpu_video_codecs **codecs)
+{
+ switch (adev->asic_type) {
+ case CHIP_SIENNA_CICHLID:
+ case CHIP_NAVY_FLOUNDER:
+ case CHIP_DIMGREY_CAVEFISH:
+ case CHIP_VANGOGH:
+ if (encode)
+ *codecs = &nv_video_codecs_encode;
+ else
+ *codecs = &sc_video_codecs_decode;
+ return 0;
+ case CHIP_NAVI10:
+ case CHIP_NAVI14:
+ case CHIP_NAVI12:
+ if (encode)
+ *codecs = &nv_video_codecs_encode;
+ else
+ *codecs = &nv_video_codecs_decode;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
/*
* Indirect registers accessor
*/
@@ -304,44 +483,6 @@ static int nv_read_register(struct amdgpu_device *adev, u32 se_num,
return -EINVAL;
}
-static int nv_asic_mode1_reset(struct amdgpu_device *adev)
-{
- u32 i;
- int ret = 0;
-
- amdgpu_atombios_scratch_regs_engine_hung(adev, true);
-
- /* disable BM */
- pci_clear_master(adev->pdev);
-
- amdgpu_device_cache_pci_state(adev->pdev);
-
- if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
- dev_info(adev->dev, "GPU smu mode1 reset\n");
- ret = amdgpu_dpm_mode1_reset(adev);
- } else {
- dev_info(adev->dev, "GPU psp mode1 reset\n");
- ret = psp_gpu_reset(adev);
- }
-
- if (ret)
- dev_err(adev->dev, "GPU mode1 reset failed\n");
- amdgpu_device_load_pci_state(adev->pdev);
-
- /* wait for asic to come out of reset */
- for (i = 0; i < adev->usec_timeout; i++) {
- u32 memsize = adev->nbio.funcs->get_memsize(adev);
-
- if (memsize != 0xffffffff)
- break;
- udelay(1);
- }
-
- amdgpu_atombios_scratch_regs_engine_hung(adev, false);
-
- return ret;
-}
-
static int nv_asic_mode2_reset(struct amdgpu_device *adev)
{
u32 i;
@@ -374,21 +515,9 @@ static int nv_asic_mode2_reset(struct amdgpu_device *adev)
return ret;
}
-static bool nv_asic_supports_baco(struct amdgpu_device *adev)
-{
- struct smu_context *smu = &adev->smu;
-
- if (smu_baco_is_support(smu))
- return true;
- else
- return false;
-}
-
static enum amd_reset_method
nv_asic_reset_method(struct amdgpu_device *adev)
{
- struct smu_context *smu = &adev->smu;
-
if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 ||
amdgpu_reset_method == AMD_RESET_METHOD_MODE2 ||
amdgpu_reset_method == AMD_RESET_METHOD_BACO ||
@@ -407,7 +536,7 @@ nv_asic_reset_method(struct amdgpu_device *adev)
case CHIP_DIMGREY_CAVEFISH:
return AMD_RESET_METHOD_MODE1;
default:
- if (smu_baco_is_support(smu))
+ if (amdgpu_dpm_is_baco_supported(adev))
return AMD_RESET_METHOD_BACO;
else
return AMD_RESET_METHOD_MODE1;
@@ -417,11 +546,6 @@ nv_asic_reset_method(struct amdgpu_device *adev)
static int nv_asic_reset(struct amdgpu_device *adev)
{
int ret = 0;
- struct smu_context *smu = &adev->smu;
-
- /* skip reset on vangogh for now */
- if (adev->asic_type == CHIP_VANGOGH)
- return 0;
switch (nv_asic_reset_method(adev)) {
case AMD_RESET_METHOD_PCI:
@@ -430,13 +554,7 @@ static int nv_asic_reset(struct amdgpu_device *adev)
break;
case AMD_RESET_METHOD_BACO:
dev_info(adev->dev, "BACO reset\n");
-
- ret = smu_baco_enter(smu);
- if (ret)
- return ret;
- ret = smu_baco_exit(smu);
- if (ret)
- return ret;
+ ret = amdgpu_dpm_baco_reset(adev);
break;
case AMD_RESET_METHOD_MODE2:
dev_info(adev->dev, "MODE2 reset\n");
@@ -444,7 +562,7 @@ static int nv_asic_reset(struct amdgpu_device *adev)
break;
default:
dev_info(adev->dev, "MODE1 reset\n");
- ret = nv_asic_mode1_reset(adev);
+ ret = amdgpu_device_mode1_reset(adev);
break;
}
@@ -844,9 +962,10 @@ static const struct amdgpu_asic_funcs nv_asic_funcs =
.need_full_reset = &nv_need_full_reset,
.need_reset_on_init = &nv_need_reset_on_init,
.get_pcie_replay_count = &nv_get_pcie_replay_count,
- .supports_baco = &nv_asic_supports_baco,
+ .supports_baco = &amdgpu_dpm_is_baco_supported,
.pre_asic_init = &nv_pre_asic_init,
.update_umd_stable_pstate = &nv_update_umd_stable_pstate,
+ .query_video_codecs = &nv_query_video_codecs,
};
static int nv_common_early_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
index 3ba7bdfde65d..dd4d65f7e0f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
@@ -102,6 +102,21 @@ enum psp_gfx_cmd_id
/* IDs upto 0x1F are reserved for older programs (Raven, Vega 10/12/20) */
GFX_CMD_ID_LOAD_TOC = 0x00000020, /* Load TOC and obtain TMR size */
GFX_CMD_ID_AUTOLOAD_RLC = 0x00000021, /* Indicates all graphics fw loaded, start RLC autoload */
+ GFX_CMD_ID_BOOT_CFG = 0x00000022, /* Boot Config */
+};
+
+/* PSP boot config sub-commands */
+enum psp_gfx_boot_config_cmd
+{
+ BOOTCFG_CMD_SET = 1, /* Set boot configuration settings */
+ BOOTCFG_CMD_GET = 2, /* Get boot configuration settings */
+ BOOTCFG_CMD_INVALIDATE = 3 /* Reset current boot configuration settings to VBIOS defaults */
+};
+
+/* PSP boot config bitmask values */
+enum psp_gfx_boot_config
+{
+ BOOT_CONFIG_GECC = 0x1,
};
/* Command to load Trusted Application binary into PSP OS. */
@@ -235,6 +250,7 @@ enum psp_gfx_fw_type {
GFX_FW_TYPE_SDMA6 = 56, /* SDMA6 MI */
GFX_FW_TYPE_SDMA7 = 57, /* SDMA7 MI */
GFX_FW_TYPE_VCN1 = 58, /* VCN1 MI */
+ GFX_FW_TYPE_REG_LIST = 67, /* REG_LIST MI */
GFX_FW_TYPE_MAX
};
@@ -272,6 +288,15 @@ struct psp_gfx_cmd_load_toc
uint32_t toc_size; /* FW buffer size in bytes */
};
+/* Dynamic boot configuration */
+struct psp_gfx_cmd_boot_cfg
+{
+ uint32_t timestamp; /* calendar time as number of seconds */
+ enum psp_gfx_boot_config_cmd sub_cmd; /* sub-command indicating how to process command data */
+ uint32_t boot_config; /* dynamic boot configuration bitmask */
+ uint32_t boot_config_valid; /* dynamic boot configuration valid bits bitmask */
+};
+
/* All GFX ring buffer commands. */
union psp_gfx_commands
{
@@ -284,6 +309,7 @@ union psp_gfx_commands
struct psp_gfx_cmd_reg_prog cmd_setup_reg_prog;
struct psp_gfx_cmd_setup_tmr cmd_setup_vmr;
struct psp_gfx_cmd_load_toc cmd_load_toc;
+ struct psp_gfx_cmd_boot_cfg boot_cfg;
};
struct psp_gfx_uresp_reserved
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index c325d6f53a71..589410c32d09 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -598,7 +598,7 @@ static int psp_v11_0_memory_training_send_msg(struct psp_context *psp, int msg)
}
/*
- * save and restore proces
+ * save and restore process
*/
static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops)
{
@@ -661,9 +661,9 @@ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops)
if (ops & PSP_MEM_TRAIN_SEND_LONG_MSG) {
/*
- * Long traing will encroach certain mount of bottom VRAM,
- * saving the content of this bottom VRAM to system memory
- * before training, and restoring it after training to avoid
+ * Long training will encroach a certain amount on the bottom of VRAM;
+ * save the content from the bottom of VRAM to system memory
+ * before training, and restore it after training to avoid
* VRAM corruption.
*/
sz = GDDR6_MEM_TRAINING_ENCROACHED_SIZE;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
new file mode 100644
index 000000000000..fcdce46445d6
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
@@ -0,0 +1,378 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "amdgpu_psp.h"
+#include "amdgpu_ucode.h"
+#include "soc15_common.h"
+#include "psp_v13_0.h"
+
+#include "mp/mp_13_0_2_offset.h"
+#include "mp/mp_13_0_2_sh_mask.h"
+
+MODULE_FIRMWARE("amdgpu/aldebaran_sos.bin");
+MODULE_FIRMWARE("amdgpu/aldebaran_ta.bin");
+
+static int psp_v13_0_init_microcode(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+ const char *chip_name;
+ int err = 0;
+
+ switch (adev->asic_type) {
+ case CHIP_ALDEBARAN:
+ chip_name = "aldebaran";
+ break;
+ default:
+ BUG();
+ }
+
+ err = psp_init_sos_microcode(psp, chip_name);
+ if (err)
+ return err;
+
+ err = psp_init_ta_microcode(&adev->psp, chip_name);
+
+ return err;
+}
+
+static bool psp_v13_0_is_sos_alive(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+ uint32_t sol_reg;
+
+ sol_reg = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81);
+
+ return sol_reg != 0x0;
+}
+
+static int psp_v13_0_wait_for_bootloader(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+
+ int ret;
+ int retry_loop;
+
+ for (retry_loop = 0; retry_loop < 10; retry_loop++) {
+ /* Wait for bootloader to signify that is
+ ready having bit 31 of C2PMSG_35 set to 1 */
+ ret = psp_wait_for(psp,
+ SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
+ 0x80000000,
+ 0x80000000,
+ false);
+
+ if (ret == 0)
+ return 0;
+ }
+
+ return ret;
+}
+
+static int psp_v13_0_bootloader_load_kdb(struct psp_context *psp)
+{
+ int ret;
+ uint32_t psp_gfxdrv_command_reg = 0;
+ struct amdgpu_device *adev = psp->adev;
+
+ /* Check tOS sign of life register to confirm sys driver and sOS
+ * are already been loaded.
+ */
+ if (psp_v13_0_is_sos_alive(psp))
+ return 0;
+
+ ret = psp_v13_0_wait_for_bootloader(psp);
+ if (ret)
+ return ret;
+
+ memset(psp->fw_pri_buf, 0, PSP_1_MEG);
+
+ /* Copy PSP KDB binary to memory */
+ memcpy(psp->fw_pri_buf, psp->kdb_start_addr, psp->kdb_bin_size);
+
+ /* Provide the PSP KDB to bootloader */
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_36,
+ (uint32_t)(psp->fw_pri_mc_addr >> 20));
+ psp_gfxdrv_command_reg = PSP_BL__LOAD_KEY_DATABASE;
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_35,
+ psp_gfxdrv_command_reg);
+
+ ret = psp_v13_0_wait_for_bootloader(psp);
+
+ return ret;
+}
+
+static int psp_v13_0_bootloader_load_sysdrv(struct psp_context *psp)
+{
+ int ret;
+ uint32_t psp_gfxdrv_command_reg = 0;
+ struct amdgpu_device *adev = psp->adev;
+
+ /* Check sOS sign of life register to confirm sys driver and sOS
+ * are already been loaded.
+ */
+ if (psp_v13_0_is_sos_alive(psp))
+ return 0;
+
+ ret = psp_v13_0_wait_for_bootloader(psp);
+ if (ret)
+ return ret;
+
+ memset(psp->fw_pri_buf, 0, PSP_1_MEG);
+
+ /* Copy PSP System Driver binary to memory */
+ memcpy(psp->fw_pri_buf, psp->sys_start_addr, psp->sys_bin_size);
+
+ /* Provide the sys driver to bootloader */
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_36,
+ (uint32_t)(psp->fw_pri_mc_addr >> 20));
+ psp_gfxdrv_command_reg = PSP_BL__LOAD_SYSDRV;
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_35,
+ psp_gfxdrv_command_reg);
+
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
+
+ ret = psp_v13_0_wait_for_bootloader(psp);
+
+ return ret;
+}
+
+static int psp_v13_0_bootloader_load_sos(struct psp_context *psp)
+{
+ int ret;
+ unsigned int psp_gfxdrv_command_reg = 0;
+ struct amdgpu_device *adev = psp->adev;
+
+ /* Check sOS sign of life register to confirm sys driver and sOS
+ * are already been loaded.
+ */
+ if (psp_v13_0_is_sos_alive(psp))
+ return 0;
+
+ ret = psp_v13_0_wait_for_bootloader(psp);
+ if (ret)
+ return ret;
+
+ memset(psp->fw_pri_buf, 0, PSP_1_MEG);
+
+ /* Copy Secure OS binary to PSP memory */
+ memcpy(psp->fw_pri_buf, psp->sos_start_addr, psp->sos_bin_size);
+
+ /* Provide the PSP secure OS to bootloader */
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_36,
+ (uint32_t)(psp->fw_pri_mc_addr >> 20));
+ psp_gfxdrv_command_reg = PSP_BL__LOAD_SOSDRV;
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_35,
+ psp_gfxdrv_command_reg);
+
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_81),
+ RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81),
+ 0, true);
+
+ return ret;
+}
+
+static int psp_v13_0_ring_init(struct psp_context *psp,
+ enum psp_ring_type ring_type)
+{
+ int ret = 0;
+ struct psp_ring *ring;
+ struct amdgpu_device *adev = psp->adev;
+
+ ring = &psp->km_ring;
+
+ ring->ring_type = ring_type;
+
+ /* allocate 4k Page of Local Frame Buffer memory for ring */
+ ring->ring_size = 0x1000;
+ ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->firmware.rbuf,
+ &ring->ring_mem_mc_addr,
+ (void **)&ring->ring_mem);
+ if (ret) {
+ ring->ring_size = 0;
+ return ret;
+ }
+
+ return 0;
+}
+
+static int psp_v13_0_ring_stop(struct psp_context *psp,
+ enum psp_ring_type ring_type)
+{
+ int ret = 0;
+ struct amdgpu_device *adev = psp->adev;
+
+ if (amdgpu_sriov_vf(adev)) {
+ /* Write the ring destroy command*/
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_101,
+ GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING);
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
+ /* Wait for response flag (bit 31) */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
+ 0x80000000, 0x80000000, false);
+ } else {
+ /* Write the ring destroy command*/
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_64,
+ GFX_CTRL_CMD_ID_DESTROY_RINGS);
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
+ /* Wait for response flag (bit 31) */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x80000000, false);
+ }
+
+ return ret;
+}
+
+static int psp_v13_0_ring_create(struct psp_context *psp,
+ enum psp_ring_type ring_type)
+{
+ int ret = 0;
+ unsigned int psp_ring_reg = 0;
+ struct psp_ring *ring = &psp->km_ring;
+ struct amdgpu_device *adev = psp->adev;
+
+ if (amdgpu_sriov_vf(adev)) {
+ ret = psp_v13_0_ring_stop(psp, ring_type);
+ if (ret) {
+ DRM_ERROR("psp_v13_0_ring_stop_sriov failed!\n");
+ return ret;
+ }
+
+ /* Write low address of the ring to C2PMSG_102 */
+ psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_102, psp_ring_reg);
+ /* Write high address of the ring to C2PMSG_103 */
+ psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_103, psp_ring_reg);
+
+ /* Write the ring initialization command to C2PMSG_101 */
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_101,
+ GFX_CTRL_CMD_ID_INIT_GPCOM_RING);
+
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
+
+ /* Wait for response flag (bit 31) in C2PMSG_101 */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
+ 0x80000000, 0x8000FFFF, false);
+
+ } else {
+ /* Wait for sOS ready for ring creation */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x80000000, false);
+ if (ret) {
+ DRM_ERROR("Failed to wait for trust OS ready for ring creation\n");
+ return ret;
+ }
+
+ /* Write low address of the ring to C2PMSG_69 */
+ psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_69, psp_ring_reg);
+ /* Write high address of the ring to C2PMSG_70 */
+ psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_70, psp_ring_reg);
+ /* Write size of ring to C2PMSG_71 */
+ psp_ring_reg = ring->ring_size;
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_71, psp_ring_reg);
+ /* Write the ring initialization command to C2PMSG_64 */
+ psp_ring_reg = ring_type;
+ psp_ring_reg = psp_ring_reg << 16;
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_64, psp_ring_reg);
+
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
+
+ /* Wait for response flag (bit 31) in C2PMSG_64 */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x8000FFFF, false);
+ }
+
+ return ret;
+}
+
+static int psp_v13_0_ring_destroy(struct psp_context *psp,
+ enum psp_ring_type ring_type)
+{
+ int ret = 0;
+ struct psp_ring *ring = &psp->km_ring;
+ struct amdgpu_device *adev = psp->adev;
+
+ ret = psp_v13_0_ring_stop(psp, ring_type);
+ if (ret)
+ DRM_ERROR("Fail to stop psp ring\n");
+
+ amdgpu_bo_free_kernel(&adev->firmware.rbuf,
+ &ring->ring_mem_mc_addr,
+ (void **)&ring->ring_mem);
+
+ return ret;
+}
+
+static uint32_t psp_v13_0_ring_get_wptr(struct psp_context *psp)
+{
+ uint32_t data;
+ struct amdgpu_device *adev = psp->adev;
+
+ if (amdgpu_sriov_vf(adev))
+ data = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_102);
+ else
+ data = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_67);
+
+ return data;
+}
+
+static void psp_v13_0_ring_set_wptr(struct psp_context *psp, uint32_t value)
+{
+ struct amdgpu_device *adev = psp->adev;
+
+ if (amdgpu_sriov_vf(adev)) {
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_102, value);
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_101,
+ GFX_CTRL_CMD_ID_CONSUME_CMD);
+ } else
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_67, value);
+}
+
+static const struct psp_funcs psp_v13_0_funcs = {
+ .init_microcode = psp_v13_0_init_microcode,
+ .bootloader_load_kdb = psp_v13_0_bootloader_load_kdb,
+ .bootloader_load_sysdrv = psp_v13_0_bootloader_load_sysdrv,
+ .bootloader_load_sos = psp_v13_0_bootloader_load_sos,
+ .ring_init = psp_v13_0_ring_init,
+ .ring_create = psp_v13_0_ring_create,
+ .ring_stop = psp_v13_0_ring_stop,
+ .ring_destroy = psp_v13_0_ring_destroy,
+ .ring_get_wptr = psp_v13_0_ring_get_wptr,
+ .ring_set_wptr = psp_v13_0_ring_set_wptr,
+};
+
+void psp_v13_0_set_psp_funcs(struct psp_context *psp)
+{
+ psp->funcs = &psp_v13_0_funcs;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.h b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.h
new file mode 100644
index 000000000000..b2414a729ca1
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __PSP_V13_0_H__
+#define __PSP_V13_0_H__
+
+#include "amdgpu_psp.h"
+
+void psp_v13_0_set_psp_funcs(struct psp_context *psp);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index eb5dc6c5b46e..9f0dda040ec8 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -876,12 +876,10 @@ static int sdma_v2_4_sw_init(void *handle)
ring->ring_obj = NULL;
ring->use_doorbell = false;
sprintf(ring->name, "sdma%d", i);
- r = amdgpu_ring_init(adev, ring, 1024,
- &adev->sdma.trap_irq,
- (i == 0) ?
- AMDGPU_SDMA_IRQ_INSTANCE0 :
+ r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
+ (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 :
AMDGPU_SDMA_IRQ_INSTANCE1,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index ad308d8c6d30..135727b59c41 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -1160,12 +1160,10 @@ static int sdma_v3_0_sw_init(void *handle)
}
sprintf(ring->name, "sdma%d", i);
- r = amdgpu_ring_init(adev, ring, 1024,
- &adev->sdma.trap_irq,
- (i == 0) ?
- AMDGPU_SDMA_IRQ_INSTANCE0 :
+ r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
+ (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 :
AMDGPU_SDMA_IRQ_INSTANCE1,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index c8c22c1d1e65..5715be6770ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -56,6 +56,7 @@
#include "ivsrcid/sdma1/irqsrcs_sdma1_4_0.h"
#include "amdgpu_ras.h"
+#include "sdma_v4_4.h"
MODULE_FIRMWARE("amdgpu/vega10_sdma.bin");
MODULE_FIRMWARE("amdgpu/vega10_sdma1.bin");
@@ -69,6 +70,7 @@ MODULE_FIRMWARE("amdgpu/raven2_sdma.bin");
MODULE_FIRMWARE("amdgpu/arcturus_sdma.bin");
MODULE_FIRMWARE("amdgpu/renoir_sdma.bin");
MODULE_FIRMWARE("amdgpu/green_sardine_sdma.bin");
+MODULE_FIRMWARE("amdgpu/aldebaran_sdma.bin");
#define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK 0x000000F8L
#define SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK 0xFC000000L
@@ -259,6 +261,24 @@ static const struct soc15_reg_golden golden_settings_sdma_arct[] =
SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_UTCL1_TIMEOUT, 0xffffffff, 0x00010001)
};
+static const struct soc15_reg_golden golden_settings_sdma_aldebaran[] = {
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
+ SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
+ SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
+ SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA2_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
+ SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
+ SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
+ SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
+ SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
+ SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
+ SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
+};
+
static const struct soc15_reg_golden golden_settings_sdma_4_3[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xffffffff, 0x3f000100),
@@ -482,6 +502,11 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_sdma_arct,
ARRAY_SIZE(golden_settings_sdma_arct));
break;
+ case CHIP_ALDEBARAN:
+ soc15_program_register_sequence(adev,
+ golden_settings_sdma_aldebaran,
+ ARRAY_SIZE(golden_settings_sdma_aldebaran));
+ break;
case CHIP_RAVEN:
soc15_program_register_sequence(adev,
golden_settings_sdma_4_1,
@@ -564,7 +589,8 @@ static void sdma_v4_0_destroy_inst_ctx(struct amdgpu_device *adev)
/* arcturus shares the same FW memory across
all SDMA isntances */
- if (adev->asic_type == CHIP_ARCTURUS)
+ if (adev->asic_type == CHIP_ARCTURUS ||
+ adev->asic_type == CHIP_ALDEBARAN)
break;
}
@@ -621,6 +647,9 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
else
chip_name = "green_sardine";
break;
+ case CHIP_ALDEBARAN:
+ chip_name = "aldebaran";
+ break;
default:
BUG();
}
@@ -636,8 +665,9 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
goto out;
for (i = 1; i < adev->sdma.num_instances; i++) {
- if (adev->asic_type == CHIP_ARCTURUS) {
- /* Acturus will leverage the same FW memory
+ if (adev->asic_type == CHIP_ARCTURUS ||
+ adev->asic_type == CHIP_ALDEBARAN) {
+ /* Acturus & Aldebaran will leverage the same FW memory
for every SDMA instance */
memcpy((void *)&adev->sdma.instance[i],
(void *)&adev->sdma.instance[0],
@@ -1825,6 +1855,8 @@ static int sdma_v4_0_early_init(void *handle)
adev->sdma.num_instances = 1;
else if (adev->asic_type == CHIP_ARCTURUS)
adev->sdma.num_instances = 8;
+ else if (adev->asic_type == CHIP_ALDEBARAN)
+ adev->sdma.num_instances = 5;
else
adev->sdma.num_instances = 2;
@@ -1895,6 +1927,33 @@ static int sdma_v4_0_sw_init(void *handle)
return r;
}
+ /* SDMA VM_HOLE/DOORBELL_INV/POLL_TIMEOUT/SRBM_WRITE_PROTECTION event*/
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ r = amdgpu_irq_add_id(adev, sdma_v4_0_seq_to_irq_id(i),
+ SDMA0_4_0__SRCID__SDMA_VM_HOLE,
+ &adev->sdma.vm_hole_irq);
+ if (r)
+ return r;
+
+ r = amdgpu_irq_add_id(adev, sdma_v4_0_seq_to_irq_id(i),
+ SDMA0_4_0__SRCID__SDMA_DOORBELL_INVALID,
+ &adev->sdma.doorbell_invalid_irq);
+ if (r)
+ return r;
+
+ r = amdgpu_irq_add_id(adev, sdma_v4_0_seq_to_irq_id(i),
+ SDMA0_4_0__SRCID__SDMA_POLL_TIMEOUT,
+ &adev->sdma.pool_timeout_irq);
+ if (r)
+ return r;
+
+ r = amdgpu_irq_add_id(adev, sdma_v4_0_seq_to_irq_id(i),
+ SDMA0_4_0__SRCID__SDMA_SRBMWRITE,
+ &adev->sdma.srbm_write_irq);
+ if (r)
+ return r;
+ }
+
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
@@ -1909,7 +1968,7 @@ static int sdma_v4_0_sw_init(void *handle)
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
AMDGPU_SDMA_IRQ_INSTANCE0 + i,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
@@ -1928,7 +1987,7 @@ static int sdma_v4_0_sw_init(void *handle)
r = amdgpu_ring_init(adev, ring, 1024,
&adev->sdma.trap_irq,
AMDGPU_SDMA_IRQ_INSTANCE0 + i,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
@@ -2149,6 +2208,72 @@ static int sdma_v4_0_set_ecc_irq_state(struct amdgpu_device *adev,
return 0;
}
+static int sdma_v4_0_print_iv_entry(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry)
+{
+ int instance;
+ struct amdgpu_task_info task_info;
+ u64 addr;
+
+ instance = sdma_v4_0_irq_id_to_seq(entry->client_id);
+ if (instance < 0 || instance >= adev->sdma.num_instances) {
+ dev_err(adev->dev, "sdma instance invalid %d\n", instance);
+ return -EINVAL;
+ }
+
+ addr = (u64)entry->src_data[0] << 12;
+ addr |= ((u64)entry->src_data[1] & 0xf) << 44;
+
+ memset(&task_info, 0, sizeof(struct amdgpu_task_info));
+ amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
+
+ dev_info(adev->dev,
+ "[sdma%d] address:0x%016llx src_id:%u ring:%u vmid:%u "
+ "pasid:%u, for process %s pid %d thread %s pid %d\n",
+ instance, addr, entry->src_id, entry->ring_id, entry->vmid,
+ entry->pasid, task_info.process_name, task_info.tgid,
+ task_info.task_name, task_info.pid);
+ return 0;
+}
+
+static int sdma_v4_0_process_vm_hole_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ dev_err(adev->dev, "MC or SEM address in VM hole\n");
+ sdma_v4_0_print_iv_entry(adev, entry);
+ return 0;
+}
+
+static int sdma_v4_0_process_doorbell_invalid_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ dev_err(adev->dev, "SDMA received a doorbell from BIF with byte_enable !=0xff\n");
+ sdma_v4_0_print_iv_entry(adev, entry);
+ return 0;
+}
+
+static int sdma_v4_0_process_pool_timeout_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ dev_err(adev->dev,
+ "Polling register/memory timeout executing POLL_REG/MEM with finite timer\n");
+ sdma_v4_0_print_iv_entry(adev, entry);
+ return 0;
+}
+
+static int sdma_v4_0_process_srbm_write_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ dev_err(adev->dev,
+ "SDMA gets an Register Write SRBM_WRITE command in non-privilege command buffer\n");
+ sdma_v4_0_print_iv_entry(adev, entry);
+ return 0;
+}
+
static void sdma_v4_0_update_medium_grain_clock_gating(
struct amdgpu_device *adev,
bool enable)
@@ -2222,21 +2347,10 @@ static int sdma_v4_0_set_clockgating_state(void *handle,
if (amdgpu_sriov_vf(adev))
return 0;
- switch (adev->asic_type) {
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_VEGA20:
- case CHIP_RAVEN:
- case CHIP_ARCTURUS:
- case CHIP_RENOIR:
- sdma_v4_0_update_medium_grain_clock_gating(adev,
- state == AMD_CG_STATE_GATE);
- sdma_v4_0_update_medium_grain_light_sleep(adev,
- state == AMD_CG_STATE_GATE);
- break;
- default:
- break;
- }
+ sdma_v4_0_update_medium_grain_clock_gating(adev,
+ state == AMD_CG_STATE_GATE);
+ sdma_v4_0_update_medium_grain_light_sleep(adev,
+ state == AMD_CG_STATE_GATE);
return 0;
}
@@ -2249,7 +2363,7 @@ static int sdma_v4_0_set_powergating_state(void *handle,
case CHIP_RAVEN:
case CHIP_RENOIR:
sdma_v4_1_update_power_gating(adev,
- state == AMD_PG_STATE_GATE ? true : false);
+ state == AMD_PG_STATE_GATE);
break;
default:
break;
@@ -2465,7 +2579,21 @@ static const struct amdgpu_irq_src_funcs sdma_v4_0_ecc_irq_funcs = {
.process = amdgpu_sdma_process_ecc_irq,
};
+static const struct amdgpu_irq_src_funcs sdma_v4_0_vm_hole_irq_funcs = {
+ .process = sdma_v4_0_process_vm_hole_irq,
+};
+
+static const struct amdgpu_irq_src_funcs sdma_v4_0_doorbell_invalid_irq_funcs = {
+ .process = sdma_v4_0_process_doorbell_invalid_irq,
+};
+
+static const struct amdgpu_irq_src_funcs sdma_v4_0_pool_timeout_irq_funcs = {
+ .process = sdma_v4_0_process_pool_timeout_irq,
+};
+static const struct amdgpu_irq_src_funcs sdma_v4_0_srbm_write_irq_funcs = {
+ .process = sdma_v4_0_process_srbm_write_irq,
+};
static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev)
{
@@ -2474,9 +2602,17 @@ static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev)
adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE1;
adev->sdma.ecc_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE1;
break;
+ case 5:
+ adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE5;
+ adev->sdma.ecc_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE5;
+ break;
case 8:
adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
adev->sdma.ecc_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
+ adev->sdma.vm_hole_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE5;
+ adev->sdma.doorbell_invalid_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
+ adev->sdma.pool_timeout_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
+ adev->sdma.srbm_write_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
break;
case 2:
default:
@@ -2487,6 +2623,10 @@ static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev)
adev->sdma.trap_irq.funcs = &sdma_v4_0_trap_irq_funcs;
adev->sdma.illegal_inst_irq.funcs = &sdma_v4_0_illegal_inst_irq_funcs;
adev->sdma.ecc_irq.funcs = &sdma_v4_0_ecc_irq_funcs;
+ adev->sdma.vm_hole_irq.funcs = &sdma_v4_0_vm_hole_irq_funcs;
+ adev->sdma.doorbell_invalid_irq.funcs = &sdma_v4_0_doorbell_invalid_irq_funcs;
+ adev->sdma.pool_timeout_irq.funcs = &sdma_v4_0_pool_timeout_irq_funcs;
+ adev->sdma.srbm_write_irq.funcs = &sdma_v4_0_srbm_write_irq_funcs;
}
/**
@@ -2655,6 +2795,9 @@ static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev)
case CHIP_ARCTURUS:
adev->sdma.funcs = &sdma_v4_0_ras_funcs;
break;
+ case CHIP_ALDEBARAN:
+ adev->sdma.funcs = &sdma_v4_4_ras_funcs;
+ break;
default:
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c
new file mode 100644
index 000000000000..6fcb95c89999
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c
@@ -0,0 +1,232 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "sdma/sdma_4_4_0_offset.h"
+#include "sdma/sdma_4_4_0_sh_mask.h"
+#include "soc15.h"
+#include "amdgpu_ras.h"
+
+#define SDMA1_REG_OFFSET 0x600
+#define SDMA2_REG_OFFSET 0x1cda0
+#define SDMA3_REG_OFFSET 0x1d1a0
+#define SDMA4_REG_OFFSET 0x1d5a0
+
+/* helper function that allow only use sdma0 register offset
+ * to calculate register offset for all the sdma instances */
+static uint32_t sdma_v4_4_get_reg_offset(struct amdgpu_device *adev,
+ uint32_t instance,
+ uint32_t offset)
+{
+ uint32_t sdma_base = adev->reg_offset[SDMA0_HWIP][0][0];
+
+ switch (instance) {
+ case 0:
+ return (sdma_base + offset);
+ case 1:
+ return (sdma_base + SDMA1_REG_OFFSET + offset);
+ case 2:
+ return (sdma_base + SDMA2_REG_OFFSET + offset);
+ case 3:
+ return (sdma_base + SDMA3_REG_OFFSET + offset);
+ case 4:
+ return (sdma_base + SDMA4_REG_OFFSET + offset);
+ default:
+ break;
+ }
+ return 0;
+}
+
+static const struct soc15_ras_field_entry sdma_v4_4_ras_fields[] = {
+ { "SDMA_MBANK_DATA_BUF0_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF0_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF1_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF1_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF2_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF2_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF3_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF3_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF4_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF4_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF5_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF5_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF6_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF6_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF7_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF7_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF8_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF8_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF9_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF9_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF10_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF10_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF11_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF11_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF12_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF12_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF13_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF13_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF14_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF14_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF15_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF15_SED),
+ 0, 0,
+ },
+ { "SDMA_UCODE_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER2),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER2, SDMA_UCODE_BUF_SED),
+ 0, 0,
+ },
+ { "SDMA_RB_CMD_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER2),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER2, SDMA_RB_CMD_BUF_SED),
+ 0, 0,
+ },
+ { "SDMA_IB_CMD_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER2),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER2, SDMA_IB_CMD_BUF_SED),
+ 0, 0,
+ },
+ { "SDMA_UTCL1_RD_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER2),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER2, SDMA_UTCL1_RD_FIFO_SED),
+ 0, 0,
+ },
+ { "SDMA_UTCL1_RDBST_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER2),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER2, SDMA_UTCL1_RDBST_FIFO_SED),
+ 0, 0,
+ },
+ { "SDMA_DATA_LUT_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER2),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER2, SDMA_DATA_LUT_FIFO_SED),
+ 0, 0,
+ },
+ { "SDMA_SPLIT_DATA_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER2),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER2, SDMA_SPLIT_DATA_BUF_SED),
+ 0, 0,
+ },
+ { "SDMA_MC_WR_ADDR_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER2),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER2, SDMA_MC_WR_ADDR_FIFO_SED),
+ 0, 0,
+ },
+ { "SDMA_MC_RDRET_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER2),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER2, SDMA_MC_WR_ADDR_FIFO_SED),
+ 0, 0,
+ },
+};
+
+static void sdma_v4_4_get_ras_error_count(struct amdgpu_device *adev,
+ uint32_t value,
+ uint32_t instance,
+ uint32_t *sec_count)
+{
+ uint32_t i;
+ uint32_t sec_cnt;
+
+ /* double bits error (multiple bits) error detection is not supported */
+ for (i = 0; i < ARRAY_SIZE(sdma_v4_4_ras_fields); i++) {
+ /* the SDMA_EDC_COUNTER register in each sdma instance
+ * shares the same sed shift_mask
+ * */
+ sec_cnt = (value &
+ sdma_v4_4_ras_fields[i].sec_count_mask) >>
+ sdma_v4_4_ras_fields[i].sec_count_shift;
+ if (sec_cnt) {
+ dev_info(adev->dev, "Detected %s in SDMA%d, SED %d\n",
+ sdma_v4_4_ras_fields[i].name,
+ instance, sec_cnt);
+ *sec_count += sec_cnt;
+ }
+ }
+}
+
+static int sdma_v4_4_query_ras_error_count(struct amdgpu_device *adev,
+ uint32_t instance,
+ void *ras_error_status)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+ uint32_t sec_count = 0;
+ uint32_t reg_value = 0;
+ uint32_t reg_offset = 0;
+
+ reg_offset = sdma_v4_4_get_reg_offset(adev, instance, regSDMA0_EDC_COUNTER);
+ reg_value = RREG32(reg_offset);
+ /* double bit error is not supported */
+ if (reg_value)
+ sdma_v4_4_get_ras_error_count(adev, reg_value, instance, &sec_count);
+ /* err_data->ce_count should be initialized to 0
+ * before calling into this function */
+ err_data->ce_count += sec_count;
+ /* double bit error is not supported
+ * set ue count to 0 */
+ err_data->ue_count = 0;
+
+ return 0;
+};
+
+static void sdma_v4_4_reset_ras_error_count(struct amdgpu_device *adev)
+{
+ int i;
+ uint32_t reg_offset;
+
+ /* write 0 to EDC_COUNTER reg to clear sdma edc counters */
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ reg_offset = sdma_v4_4_get_reg_offset(adev, i, regSDMA0_EDC_COUNTER);
+ WREG32(reg_offset, 0);
+ reg_offset = sdma_v4_4_get_reg_offset(adev, i, regSDMA0_EDC_COUNTER2);
+ WREG32(reg_offset, 0);
+ }
+ }
+}
+
+const struct amdgpu_sdma_ras_funcs sdma_v4_4_ras_funcs = {
+ .ras_late_init = amdgpu_sdma_ras_late_init,
+ .ras_fini = amdgpu_sdma_ras_fini,
+ .query_ras_error_count = sdma_v4_4_query_ras_error_count,
+ .reset_ras_error_count = sdma_v4_4_reset_ras_error_count,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.h b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.h
new file mode 100644
index 000000000000..74a6e5b5e949
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __SDMA_V4_4_H__
+#define __SDMA_V4_4_H__
+
+extern const struct amdgpu_sdma_ras_funcs sdma_v4_4_ras_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index d345e324837d..920fc6d4a127 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -1273,12 +1273,10 @@ static int sdma_v5_0_sw_init(void *handle)
: (adev->doorbell_index.sdma_engine[1] << 1); // get DWORD offset
sprintf(ring->name, "sdma%d", i);
- r = amdgpu_ring_init(adev, ring, 1024,
- &adev->sdma.trap_irq,
- (i == 0) ?
- AMDGPU_SDMA_IRQ_INSTANCE0 :
+ r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
+ (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 :
AMDGPU_SDMA_IRQ_INSTANCE1,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index 690a5090475a..93f826a7d3f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -1283,10 +1283,9 @@ static int sdma_v5_2_sw_init(void *handle)
(adev->doorbell_index.sdma_engine[i] << 1); //get DWORD offset
sprintf(ring->name, "sdma%d", i);
- r = amdgpu_ring_init(adev, ring, 1024,
- &adev->sdma.trap_irq,
+ r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
AMDGPU_SDMA_IRQ_INSTANCE0 + i,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
@@ -1595,9 +1594,9 @@ static int sdma_v5_2_set_clockgating_state(void *handle,
case CHIP_VANGOGH:
case CHIP_DIMGREY_CAVEFISH:
sdma_v5_2_update_medium_grain_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
sdma_v5_2_update_medium_grain_light_sleep(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index 6b5cf7882a12..7cbc2bb03bc6 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -26,6 +26,8 @@
#include <linux/module.h>
#include <linux/pci.h>
+#include <drm/amdgpu_drm.h>
+
#include "amdgpu.h"
#include "amdgpu_atombios.h"
#include "amdgpu_ih.h"
@@ -905,6 +907,114 @@ static const u32 hainan_mgcg_cgcg_init[] =
0x3630, 0xfffffff0, 0x00000100,
};
+/* XXX: update when we support VCE */
+#if 0
+/* tahiti, pitcarin, verde */
+static const struct amdgpu_video_codec_info tahiti_video_codecs_encode_array[] =
+{
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
+ .max_width = 2048,
+ .max_height = 1152,
+ .max_pixels_per_frame = 2048 * 1152,
+ .max_level = 0,
+ },
+};
+
+static const struct amdgpu_video_codecs tahiti_video_codecs_encode =
+{
+ .codec_count = ARRAY_SIZE(tahiti_video_codecs_encode_array),
+ .codec_array = tahiti_video_codecs_encode_array,
+};
+#else
+static const struct amdgpu_video_codecs tahiti_video_codecs_encode =
+{
+ .codec_count = 0,
+ .codec_array = NULL,
+};
+#endif
+/* oland and hainan don't support encode */
+static const struct amdgpu_video_codecs hainan_video_codecs_encode =
+{
+ .codec_count = 0,
+ .codec_array = NULL,
+};
+
+/* tahiti, pitcarin, verde, oland */
+static const struct amdgpu_video_codec_info tahiti_video_codecs_decode_array[] =
+{
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
+ .max_width = 2048,
+ .max_height = 1152,
+ .max_pixels_per_frame = 2048 * 1152,
+ .max_level = 3,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
+ .max_width = 2048,
+ .max_height = 1152,
+ .max_pixels_per_frame = 2048 * 1152,
+ .max_level = 5,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
+ .max_width = 2048,
+ .max_height = 1152,
+ .max_pixels_per_frame = 2048 * 1152,
+ .max_level = 41,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
+ .max_width = 2048,
+ .max_height = 1152,
+ .max_pixels_per_frame = 2048 * 1152,
+ .max_level = 4,
+ },
+};
+
+static const struct amdgpu_video_codecs tahiti_video_codecs_decode =
+{
+ .codec_count = ARRAY_SIZE(tahiti_video_codecs_decode_array),
+ .codec_array = tahiti_video_codecs_decode_array,
+};
+
+/* hainan doesn't support decode */
+static const struct amdgpu_video_codecs hainan_video_codecs_decode =
+{
+ .codec_count = 0,
+ .codec_array = NULL,
+};
+
+static int si_query_video_codecs(struct amdgpu_device *adev, bool encode,
+ const struct amdgpu_video_codecs **codecs)
+{
+ switch (adev->asic_type) {
+ case CHIP_VERDE:
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ if (encode)
+ *codecs = &tahiti_video_codecs_encode;
+ else
+ *codecs = &tahiti_video_codecs_decode;
+ return 0;
+ case CHIP_OLAND:
+ if (encode)
+ *codecs = &hainan_video_codecs_encode;
+ else
+ *codecs = &tahiti_video_codecs_decode;
+ return 0;
+ case CHIP_HAINAN:
+ if (encode)
+ *codecs = &hainan_video_codecs_encode;
+ else
+ *codecs = &hainan_video_codecs_decode;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
static u32 si_pcie_rreg(struct amdgpu_device *adev, u32 reg)
{
unsigned long flags;
@@ -1903,6 +2013,7 @@ static const struct amdgpu_asic_funcs si_asic_funcs =
.get_pcie_replay_count = &si_get_pcie_replay_count,
.supports_baco = &si_asic_supports_baco,
.pre_asic_init = &si_pre_asic_init,
+ .query_video_codecs = &si_query_video_codecs,
};
static uint32_t si_get_rev_id(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index 488497ad5e0c..cb703e307238 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -507,10 +507,9 @@ static int si_dma_sw_init(void *handle)
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024,
&adev->sdma.trap_irq,
- (i == 0) ?
- AMDGPU_SDMA_IRQ_INSTANCE0 :
+ (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 :
AMDGPU_SDMA_IRQ_INSTANCE1,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/smuio_v13_0.c b/drivers/gpu/drm/amd/amdgpu/smuio_v13_0.c
new file mode 100644
index 000000000000..079b094c48ad
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/smuio_v13_0.c
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "smuio_v13_0.h"
+#include "smuio/smuio_13_0_2_offset.h"
+#include "smuio/smuio_13_0_2_sh_mask.h"
+
+#define SMUIO_MCM_CONFIG__HOST_GPU_XGMI_MASK 0x00000001L
+
+static u32 smuio_v13_0_get_rom_index_offset(struct amdgpu_device *adev)
+{
+ return SOC15_REG_OFFSET(SMUIO, 0, regROM_INDEX);
+}
+
+static u32 smuio_v13_0_get_rom_data_offset(struct amdgpu_device *adev)
+{
+ return SOC15_REG_OFFSET(SMUIO, 0, regROM_DATA);
+}
+
+static void smuio_v13_0_update_rom_clock_gating(struct amdgpu_device *adev, bool enable)
+{
+ u32 def, data;
+
+ /* enable/disable ROM CG is not supported on APU */
+ if (adev->flags & AMD_IS_APU)
+ return;
+
+ def = data = RREG32_SOC15(SMUIO, 0, regCGTT_ROM_CLK_CTRL0);
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
+ data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
+ CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
+ else
+ data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
+ CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
+
+ if (def != data)
+ WREG32_SOC15(SMUIO, 0, regCGTT_ROM_CLK_CTRL0, data);
+}
+
+static void smuio_v13_0_get_clock_gating_state(struct amdgpu_device *adev, u32 *flags)
+{
+ u32 data;
+
+ /* CGTT_ROM_CLK_CTRL0 is not available for APU */
+ if (adev->flags & AMD_IS_APU)
+ return;
+
+ data = RREG32_SOC15(SMUIO, 0, regCGTT_ROM_CLK_CTRL0);
+ if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
+ *flags |= AMD_CG_SUPPORT_ROM_MGCG;
+}
+
+/**
+ * smuio_v13_0_get_die_id - query die id from FCH.
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Returns die id
+ */
+static u32 smuio_v13_0_get_die_id(struct amdgpu_device *adev)
+{
+ u32 data, die_id;
+
+ data = RREG32_SOC15(SMUIO, 0, regSMUIO_MCM_CONFIG);
+ die_id = REG_GET_FIELD(data, SMUIO_MCM_CONFIG, DIE_ID);
+
+ return die_id;
+}
+
+/**
+ * smuio_v13_0_supports_host_gpu_xgmi - detect xgmi interface between cpu and gpu/s.
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Returns true on success or false otherwise.
+ */
+static bool smuio_v13_0_is_host_gpu_xgmi_supported(struct amdgpu_device *adev)
+{
+ u32 data;
+
+ data = RREG32_SOC15(SMUIO, 0, regSMUIO_MCM_CONFIG);
+ data = REG_GET_FIELD(data, SMUIO_MCM_CONFIG, TOPOLOGY_ID);
+ /* data[4:0]
+ * bit 0 == 0 host-gpu interface is PCIE
+ * bit 0 == 1 host-gpu interface is Alternate Protocal
+ * for AMD, this is XGMI
+ */
+ data &= SMUIO_MCM_CONFIG__HOST_GPU_XGMI_MASK;
+
+ return data ? true : false;
+}
+
+const struct amdgpu_smuio_funcs smuio_v13_0_funcs = {
+ .get_rom_index_offset = smuio_v13_0_get_rom_index_offset,
+ .get_rom_data_offset = smuio_v13_0_get_rom_data_offset,
+ .get_die_id = smuio_v13_0_get_die_id,
+ .is_host_gpu_xgmi_supported = smuio_v13_0_is_host_gpu_xgmi_supported,
+ .update_rom_clock_gating = smuio_v13_0_update_rom_clock_gating,
+ .get_clock_gating_state = smuio_v13_0_get_clock_gating_state,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/smuio_v13_0.h b/drivers/gpu/drm/amd/amdgpu/smuio_v13_0.h
new file mode 100644
index 000000000000..a3bfe3e4fb46
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/smuio_v13_0.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __SMUIO_V13_0_H__
+#define __SMUIO_V13_0_H__
+
+#include "soc15_common.h"
+
+extern const struct amdgpu_smuio_funcs smuio_v13_0_funcs;
+
+#endif /* __SMUIO_V13_0_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 1221aa6b40a9..5c5eb3aed1b3 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -25,6 +25,8 @@
#include <linux/module.h>
#include <linux/pci.h>
+#include <drm/amdgpu_drm.h>
+
#include "amdgpu.h"
#include "amdgpu_atombios.h"
#include "amdgpu_ih.h"
@@ -71,9 +73,9 @@
#include "jpeg_v2_5.h"
#include "smuio_v9_0.h"
#include "smuio_v11_0.h"
+#include "smuio_v13_0.h"
#include "dce_virtual.h"
#include "mxgpu_ai.h"
-#include "amdgpu_smu.h"
#include "amdgpu_ras.h"
#include "amdgpu_xgmi.h"
#include <uapi/linux/kfd_ioctl.h>
@@ -83,6 +85,234 @@
#define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba
#define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0
+/* Vega, Raven, Arcturus */
+static const struct amdgpu_video_codec_info vega_video_codecs_encode_array[] =
+{
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
+ .max_width = 4096,
+ .max_height = 2304,
+ .max_pixels_per_frame = 4096 * 2304,
+ .max_level = 0,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
+ .max_width = 4096,
+ .max_height = 2304,
+ .max_pixels_per_frame = 4096 * 2304,
+ .max_level = 0,
+ },
+};
+
+static const struct amdgpu_video_codecs vega_video_codecs_encode =
+{
+ .codec_count = ARRAY_SIZE(vega_video_codecs_encode_array),
+ .codec_array = vega_video_codecs_encode_array,
+};
+
+/* Vega */
+static const struct amdgpu_video_codec_info vega_video_codecs_decode_array[] =
+{
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 3,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 5,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 52,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 4,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 186,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 0,
+ },
+};
+
+static const struct amdgpu_video_codecs vega_video_codecs_decode =
+{
+ .codec_count = ARRAY_SIZE(vega_video_codecs_decode_array),
+ .codec_array = vega_video_codecs_decode_array,
+};
+
+/* Raven */
+static const struct amdgpu_video_codec_info rv_video_codecs_decode_array[] =
+{
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 3,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 5,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 52,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 4,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 186,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 0,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 0,
+ },
+};
+
+static const struct amdgpu_video_codecs rv_video_codecs_decode =
+{
+ .codec_count = ARRAY_SIZE(rv_video_codecs_decode_array),
+ .codec_array = rv_video_codecs_decode_array,
+};
+
+/* Renoir, Arcturus */
+static const struct amdgpu_video_codec_info rn_video_codecs_decode_array[] =
+{
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 3,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 5,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 52,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 4,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
+ .max_width = 8192,
+ .max_height = 4352,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 186,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 0,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9,
+ .max_width = 8192,
+ .max_height = 4352,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 0,
+ },
+};
+
+static const struct amdgpu_video_codecs rn_video_codecs_decode =
+{
+ .codec_count = ARRAY_SIZE(rn_video_codecs_decode_array),
+ .codec_array = rn_video_codecs_decode_array,
+};
+
+static int soc15_query_video_codecs(struct amdgpu_device *adev, bool encode,
+ const struct amdgpu_video_codecs **codecs)
+{
+ switch (adev->asic_type) {
+ case CHIP_VEGA20:
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
+ if (encode)
+ *codecs = &vega_video_codecs_encode;
+ else
+ *codecs = &vega_video_codecs_decode;
+ return 0;
+ case CHIP_RAVEN:
+ if (encode)
+ *codecs = &vega_video_codecs_encode;
+ else
+ *codecs = &rv_video_codecs_decode;
+ return 0;
+ case CHIP_ARCTURUS:
+ case CHIP_RENOIR:
+ if (encode)
+ *codecs = &vega_video_codecs_encode;
+ else
+ *codecs = &rn_video_codecs_decode;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
/*
* Indirect registers accessor
*/
@@ -419,40 +649,6 @@ void soc15_program_register_sequence(struct amdgpu_device *adev,
}
-static int soc15_asic_mode1_reset(struct amdgpu_device *adev)
-{
- u32 i;
- int ret = 0;
-
- amdgpu_atombios_scratch_regs_engine_hung(adev, true);
-
- dev_info(adev->dev, "GPU mode1 reset\n");
-
- /* disable BM */
- pci_clear_master(adev->pdev);
-
- amdgpu_device_cache_pci_state(adev->pdev);
-
- ret = psp_gpu_reset(adev);
- if (ret)
- dev_err(adev->dev, "GPU mode1 reset failed\n");
-
- amdgpu_device_load_pci_state(adev->pdev);
-
- /* wait for asic to come out of reset */
- for (i = 0; i < adev->usec_timeout; i++) {
- u32 memsize = adev->nbio.funcs->get_memsize(adev);
-
- if (memsize != 0xffffffff)
- break;
- udelay(1);
- }
-
- amdgpu_atombios_scratch_regs_engine_hung(adev, false);
-
- return ret;
-}
-
static int soc15_asic_baco_reset(struct amdgpu_device *adev)
{
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
@@ -477,13 +673,21 @@ static enum amd_reset_method
soc15_asic_reset_method(struct amdgpu_device *adev)
{
bool baco_reset = false;
+ bool connected_to_cpu = false;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ if (adev->gmc.xgmi.supported && adev->gmc.xgmi.connected_to_cpu)
+ connected_to_cpu = true;
+
if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 ||
amdgpu_reset_method == AMD_RESET_METHOD_MODE2 ||
amdgpu_reset_method == AMD_RESET_METHOD_BACO ||
- amdgpu_reset_method == AMD_RESET_METHOD_PCI)
- return amdgpu_reset_method;
+ amdgpu_reset_method == AMD_RESET_METHOD_PCI) {
+ /* If connected to cpu, driver only support mode2 */
+ if (connected_to_cpu)
+ return AMD_RESET_METHOD_MODE2;
+ return amdgpu_reset_method;
+ }
if (amdgpu_reset_method != -1)
dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
@@ -509,6 +713,14 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
if ((ras && ras->supported) && adev->pm.fw_version <= 0x283400)
baco_reset = false;
break;
+ case CHIP_ALDEBARAN:
+ /*
+ * 1.connected to cpu: driver issue mode2 reset
+ * 2.discret gpu: driver issue mode1 reset
+ */
+ if (connected_to_cpu)
+ return AMD_RESET_METHOD_MODE2;
+ break;
default:
break;
}
@@ -538,7 +750,7 @@ static int soc15_asic_reset(struct amdgpu_device *adev)
return amdgpu_dpm_mode2_reset(adev);
default:
dev_info(adev->dev, "MODE1 reset\n");
- return soc15_asic_mode1_reset(adev);
+ return amdgpu_device_mode1_reset(adev);
}
}
@@ -661,6 +873,9 @@ static void soc15_reg_base_init(struct amdgpu_device *adev)
case CHIP_ARCTURUS:
arct_reg_base_init(adev);
break;
+ case CHIP_ALDEBARAN:
+ aldebaran_reg_base_init(adev);
+ break;
default:
DRM_ERROR("Unsupported asic type: %d!\n", adev->asic_type);
break;
@@ -683,14 +898,12 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
if (!amdgpu_sriov_vf(adev))
soc15_reg_base_init(adev);
- if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
- adev->gmc.xgmi.supported = true;
-
if (adev->flags & AMD_IS_APU) {
adev->nbio.funcs = &nbio_v7_0_funcs;
adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
} else if (adev->asic_type == CHIP_VEGA20 ||
- adev->asic_type == CHIP_ARCTURUS) {
+ adev->asic_type == CHIP_ARCTURUS ||
+ adev->asic_type == CHIP_ALDEBARAN) {
adev->nbio.funcs = &nbio_v7_4_funcs;
adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
} else {
@@ -699,7 +912,9 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
}
adev->hdp.funcs = &hdp_v4_0_funcs;
- if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
+ if (adev->asic_type == CHIP_VEGA20 ||
+ adev->asic_type == CHIP_ARCTURUS ||
+ adev->asic_type == CHIP_ALDEBARAN)
adev->df.funcs = &df_v3_6_funcs;
else
adev->df.funcs = &df_v1_7_funcs;
@@ -707,6 +922,8 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
if (adev->asic_type == CHIP_VEGA20 ||
adev->asic_type == CHIP_ARCTURUS)
adev->smuio.funcs = &smuio_v11_0_funcs;
+ else if (adev->asic_type == CHIP_ALDEBARAN)
+ adev->smuio.funcs = &smuio_v13_0_funcs;
else
adev->smuio.funcs = &smuio_v9_0_funcs;
@@ -826,6 +1043,27 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
break;
+ case CHIP_ALDEBARAN:
+ amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
+
+ if (amdgpu_sriov_vf(adev)) {
+ if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
+ amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
+ } else {
+ amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
+ if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
+ amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
+ }
+
+ amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
+
+ amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block);
+ amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block);
+ break;
default:
return -EINVAL;
}
@@ -994,6 +1232,7 @@ static const struct amdgpu_asic_funcs soc15_asic_funcs =
.get_pcie_replay_count = &soc15_get_pcie_replay_count,
.supports_baco = &soc15_supports_baco,
.pre_asic_init = &soc15_pre_asic_init,
+ .query_video_codecs = &soc15_query_video_codecs,
};
static const struct amdgpu_asic_funcs vega20_asic_funcs =
@@ -1015,6 +1254,7 @@ static const struct amdgpu_asic_funcs vega20_asic_funcs =
.get_pcie_replay_count = &soc15_get_pcie_replay_count,
.supports_baco = &soc15_supports_baco,
.pre_asic_init = &soc15_pre_asic_init,
+ .query_video_codecs = &soc15_query_video_codecs,
};
static int soc15_common_early_init(void *handle)
@@ -1244,6 +1484,21 @@ static int soc15_common_early_init(void *handle)
AMD_PG_SUPPORT_JPEG |
AMD_PG_SUPPORT_VCN_DPG;
break;
+ case CHIP_ALDEBARAN:
+ adev->asic_funcs = &vega20_asic_funcs;
+ adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_MGLS |
+ AMD_CG_SUPPORT_GFX_CGCG |
+ AMD_CG_SUPPORT_GFX_CGLS |
+ AMD_CG_SUPPORT_GFX_CP_LS |
+ AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_SDMA_MGCG |
+ AMD_CG_SUPPORT_SDMA_LS |
+ AMD_CG_SUPPORT_IH_CG |
+ AMD_CG_SUPPORT_VCN_MGCG | AMD_CG_SUPPORT_JPEG_MGCG;
+ adev->pg_flags = AMD_PG_SUPPORT_VCN_DPG;
+ adev->external_rev_id = adev->rev_id + 0x3c;
+ break;
default:
/* FIXME: not supported yet */
return -EINVAL;
@@ -1268,8 +1523,9 @@ static int soc15_common_late_init(void *handle)
if (adev->hdp.funcs->reset_ras_error_count)
adev->hdp.funcs->reset_ras_error_count(adev);
- if (adev->nbio.funcs->ras_late_init)
- r = adev->nbio.funcs->ras_late_init(adev);
+ if (adev->nbio.ras_funcs &&
+ adev->nbio.ras_funcs->ras_late_init)
+ r = adev->nbio.ras_funcs->ras_late_init(adev);
return r;
}
@@ -1290,7 +1546,9 @@ static int soc15_common_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- amdgpu_nbio_ras_fini(adev);
+ if (adev->nbio.ras_funcs &&
+ adev->nbio.ras_funcs->ras_fini)
+ adev->nbio.ras_funcs->ras_fini(adev);
adev->df.funcs->sw_fini(adev);
return 0;
}
@@ -1354,9 +1612,11 @@ static int soc15_common_hw_fini(void *handle)
if (adev->nbio.ras_if &&
amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) {
- if (adev->nbio.funcs->init_ras_controller_interrupt)
+ if (adev->nbio.ras_funcs &&
+ adev->nbio.ras_funcs->init_ras_controller_interrupt)
amdgpu_irq_put(adev, &adev->nbio.ras_controller_irq, 0);
- if (adev->nbio.funcs->init_ras_err_event_athub_interrupt)
+ if (adev->nbio.ras_funcs &&
+ adev->nbio.ras_funcs->init_ras_err_event_athub_interrupt)
amdgpu_irq_put(adev, &adev->nbio.ras_err_event_athub_irq, 0);
}
@@ -1477,6 +1737,7 @@ static int soc15_common_set_clockgating_state(void *handle,
state == AMD_CG_STATE_GATE);
break;
case CHIP_ARCTURUS:
+ case CHIP_ALDEBARAN:
adev->hdp.funcs->update_clock_gating(adev,
state == AMD_CG_STATE_GATE);
break;
@@ -1498,15 +1759,18 @@ static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
adev->hdp.funcs->get_clock_gating_state(adev, flags);
- /* AMD_CG_SUPPORT_DRM_MGCG */
- data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
- if (!(data & 0x01000000))
- *flags |= AMD_CG_SUPPORT_DRM_MGCG;
+ if (adev->asic_type != CHIP_ALDEBARAN) {
- /* AMD_CG_SUPPORT_DRM_LS */
- data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
- if (data & 0x1)
- *flags |= AMD_CG_SUPPORT_DRM_LS;
+ /* AMD_CG_SUPPORT_DRM_MGCG */
+ data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
+ if (!(data & 0x01000000))
+ *flags |= AMD_CG_SUPPORT_DRM_MGCG;
+
+ /* AMD_CG_SUPPORT_DRM_LS */
+ data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
+ if (data & 0x1)
+ *flags |= AMD_CG_SUPPORT_DRM_LS;
+ }
/* AMD_CG_SUPPORT_ROM_MGCG */
adev->smuio.funcs->get_clock_gating_state(adev, flags);
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h
index 8f38f047265b..034cfdfc4dbe 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.h
@@ -49,6 +49,13 @@ struct soc15_reg_rlcg {
u32 reg;
};
+struct soc15_reg {
+ uint32_t hwip;
+ uint32_t inst;
+ uint32_t seg;
+ uint32_t reg_offset;
+};
+
struct soc15_reg_entry {
uint32_t hwip;
uint32_t inst;
@@ -88,6 +95,10 @@ struct soc15_ras_field_entry {
#define SOC15_REG_FIELD(reg, field) reg##__##field##_MASK, reg##__##field##__SHIFT
+#define SOC15_REG_FIELD_VAL(val, mask, shift) (((val) & mask) >> shift)
+
+#define SOC15_RAS_REG_FIELD_VAL(val, entry, field) SOC15_REG_FIELD_VAL((val), (entry).field##_count_mask, (entry).field##_count_shift)
+
void soc15_grbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid);
void soc15_set_virt_ops(struct amdgpu_device *adev);
@@ -100,6 +111,7 @@ void soc15_program_register_sequence(struct amdgpu_device *adev,
int vega10_reg_base_init(struct amdgpu_device *adev);
int vega20_reg_base_init(struct amdgpu_device *adev);
int arct_reg_base_init(struct amdgpu_device *adev);
+int aldebaran_reg_base_init(struct amdgpu_device *adev);
void vega10_doorbell_index_init(struct amdgpu_device *adev);
void vega20_doorbell_index_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
index a5c00ab8b021..14bd794bbea6 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
@@ -77,13 +77,21 @@
})
#define WREG32_RLC(reg, value) \
+ do { \
+ if (adev->gfx.rlc.funcs->rlcg_wreg) \
+ adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, value, 0); \
+ else \
+ WREG32(reg, value); \
+ } while (0)
+
+#define WREG32_RLC_EX(prefix, reg, value) \
do { \
if (amdgpu_sriov_fullaccess(adev)) { \
uint32_t i = 0; \
uint32_t retries = 50000; \
- uint32_t r0 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0; \
- uint32_t r1 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1; \
- uint32_t spare_int = adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT; \
+ uint32_t r0 = adev->reg_offset[GC_HWIP][0][prefix##SCRATCH_REG0_BASE_IDX] + prefix##SCRATCH_REG0; \
+ uint32_t r1 = adev->reg_offset[GC_HWIP][0][prefix##SCRATCH_REG1_BASE_IDX] + prefix##SCRATCH_REG1; \
+ uint32_t spare_int = adev->reg_offset[GC_HWIP][0][prefix##RLC_SPARE_INT_BASE_IDX] + prefix##RLC_SPARE_INT; \
WREG32(r0, value); \
WREG32(r1, (reg | 0x80000000)); \
WREG32(spare_int, 0x1); \
@@ -101,13 +109,32 @@
} while (0)
#define WREG32_SOC15_RLC_SHADOW(ip, inst, reg, value) \
+ WREG32_RLC((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), value)
+
+#define RREG32_RLC(reg) \
+ (adev->gfx.rlc.funcs->rlcg_rreg ? \
+ adev->gfx.rlc.funcs->rlcg_rreg(adev, reg, 0) : RREG32(reg))
+
+#define WREG32_RLC_NO_KIQ(reg, value) \
+ do { \
+ if (adev->gfx.rlc.funcs->rlcg_wreg) \
+ adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, value, AMDGPU_REGS_NO_KIQ); \
+ else \
+ WREG32_NO_KIQ(reg, value); \
+ } while (0)
+
+#define RREG32_RLC_NO_KIQ(reg) \
+ (adev->gfx.rlc.funcs->rlcg_rreg ? \
+ adev->gfx.rlc.funcs->rlcg_rreg(adev, reg, AMDGPU_REGS_NO_KIQ) : RREG32_NO_KIQ(reg))
+
+#define WREG32_SOC15_RLC_SHADOW_EX(prefix, ip, inst, reg, value) \
do { \
uint32_t target_reg = adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg;\
if (amdgpu_sriov_fullaccess(adev)) { \
- uint32_t r2 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG2; \
- uint32_t r3 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3; \
- uint32_t grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL; \
- uint32_t grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX; \
+ uint32_t r2 = adev->reg_offset[GC_HWIP][0][prefix##SCRATCH_REG1_BASE_IDX] + prefix##SCRATCH_REG2; \
+ uint32_t r3 = adev->reg_offset[GC_HWIP][0][prefix##SCRATCH_REG1_BASE_IDX] + prefix##SCRATCH_REG3; \
+ uint32_t grbm_cntl = adev->reg_offset[GC_HWIP][0][prefix##GRBM_GFX_CNTL_BASE_IDX] + prefix##GRBM_GFX_CNTL; \
+ uint32_t grbm_idx = adev->reg_offset[GC_HWIP][0][prefix##GRBM_GFX_INDEX_BASE_IDX] + prefix##GRBM_GFX_INDEX; \
if (target_reg == grbm_cntl) \
WREG32(r2, value); \
else if (target_reg == grbm_idx) \
@@ -118,18 +145,30 @@
} \
} while (0)
+#define RREG32_SOC15_RLC(ip, inst, reg) \
+ RREG32_RLC(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg)
+
#define WREG32_SOC15_RLC(ip, inst, reg, value) \
do { \
+ uint32_t target_reg = adev->reg_offset[ip##_HWIP][0][reg##_BASE_IDX] + reg;\
+ WREG32_RLC(target_reg, value); \
+ } while (0)
+
+#define WREG32_SOC15_RLC_EX(prefix, ip, inst, reg, value) \
+ do { \
uint32_t target_reg = adev->reg_offset[GC_HWIP][0][reg##_BASE_IDX] + reg;\
- WREG32_RLC(target_reg, value); \
+ WREG32_RLC_EX(prefix, target_reg, value); \
} while (0)
#define WREG32_FIELD15_RLC(ip, idx, reg, field, val) \
- WREG32_RLC((adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg), \
- (RREG32(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg) \
- & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
+ WREG32_RLC((adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg), \
+ (RREG32_RLC(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg) \
+ & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
#define WREG32_SOC15_OFFSET_RLC(ip, inst, reg, offset, value) \
- WREG32_RLC(((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset), value)
+ WREG32_RLC(((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset), value)
+
+#define RREG32_SOC15_OFFSET_RLC(ip, inst, reg, offset) \
+ RREG32_RLC(((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset))
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h b/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h
index 5039375bb1d4..cf8ff064dc72 100644
--- a/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h
@@ -50,6 +50,7 @@ enum ta_securedisplay_status {
TA_SECUREDISPLAY_STATUS__I2C_WRITE_ERROR = 0x04, /* Fail to Write to I2C */
TA_SECUREDISPLAY_STATUS__READ_DIO_SCRATCH_ERROR = 0x05, /*Fail Read DIO Scratch Register*/
TA_SECUREDISPLAY_STATUS__READ_CRC_ERROR = 0x06, /* Fail to Read CRC*/
+ TA_SECUREDISPLAY_STATUS__I2C_INIT_ERROR = 0x07, /* Failed to initialize I2C */
TA_SECUREDISPLAY_STATUS__MAX = 0x7FFFFFFF,/* Maximum Value for status*/
};
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
index 96d7769609f4..20b44983ac94 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
@@ -22,6 +22,7 @@
*/
#include "umc_v6_1.h"
#include "amdgpu_ras.h"
+#include "amdgpu_umc.h"
#include "amdgpu.h"
#include "rsmu/rsmu_0_0_2_offset.h"
@@ -464,9 +465,10 @@ static void umc_v6_1_err_cnt_init(struct amdgpu_device *adev)
umc_v6_1_enable_umc_index_mode(adev);
}
-const struct amdgpu_umc_funcs umc_v6_1_funcs = {
+const struct amdgpu_umc_ras_funcs umc_v6_1_ras_funcs = {
.err_cnt_init = umc_v6_1_err_cnt_init,
.ras_late_init = amdgpu_umc_ras_late_init,
+ .ras_fini = amdgpu_umc_ras_fini,
.query_ras_error_count = umc_v6_1_query_ras_error_count,
.query_ras_error_address = umc_v6_1_query_ras_error_address,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h
index 0ce1d323cfdd..5dc36c730bb2 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h
@@ -45,7 +45,7 @@
/* umc ce count initial value */
#define UMC_V6_1_CE_CNT_INIT (UMC_V6_1_CE_CNT_MAX - UMC_V6_1_CE_INT_THRESHOLD)
-extern const struct amdgpu_umc_funcs umc_v6_1_funcs;
+extern const struct amdgpu_umc_ras_funcs umc_v6_1_ras_funcs;
extern const uint32_t
umc_v6_1_channel_idx_tbl[UMC_V6_1_UMC_INSTANCE_NUM][UMC_V6_1_CHANNEL_INSTANCE_NUM];
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
new file mode 100644
index 000000000000..3a8f787374c0
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
@@ -0,0 +1,281 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "umc_v6_7.h"
+#include "amdgpu_ras.h"
+#include "amdgpu_umc.h"
+#include "amdgpu.h"
+
+#include "umc/umc_6_7_0_offset.h"
+#include "umc/umc_6_7_0_sh_mask.h"
+
+static inline uint32_t get_umc_v6_7_reg_offset(struct amdgpu_device *adev,
+ uint32_t umc_inst,
+ uint32_t ch_inst)
+{
+ return adev->umc.channel_offs * ch_inst + UMC_V6_7_INST_DIST * umc_inst;
+}
+
+static void umc_v6_7_query_correctable_error_count(struct amdgpu_device *adev,
+ uint32_t umc_reg_offset,
+ unsigned long *error_count)
+{
+ uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr;
+ uint32_t ecc_err_cnt, ecc_err_cnt_addr;
+ uint64_t mc_umc_status;
+ uint32_t mc_umc_status_addr;
+
+ /* UMC 6_1_1 registers */
+ ecc_err_cnt_sel_addr =
+ SOC15_REG_OFFSET(UMC, 0, regUMCCH0_0_EccErrCntSel);
+ ecc_err_cnt_addr =
+ SOC15_REG_OFFSET(UMC, 0, regUMCCH0_0_EccErrCnt);
+ mc_umc_status_addr =
+ SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
+
+ /* select the lower chip and check the error count */
+ ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4);
+ ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
+ EccErrCntCsSel, 0);
+ WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
+
+ ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4);
+ *error_count +=
+ (REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) -
+ UMC_V6_7_CE_CNT_INIT);
+
+ /* select the higher chip and check the err counter */
+ ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
+ EccErrCntCsSel, 1);
+ WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
+
+ ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4);
+ *error_count +=
+ (REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) -
+ UMC_V6_7_CE_CNT_INIT);
+
+ /* check for SRAM correctable error
+ MCUMC_STATUS is a 64 bit register */
+ mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
+ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)
+ *error_count += 1;
+}
+
+static void umc_v6_7_querry_uncorrectable_error_count(struct amdgpu_device *adev,
+ uint32_t umc_reg_offset,
+ unsigned long *error_count)
+{
+ uint64_t mc_umc_status;
+ uint32_t mc_umc_status_addr;
+
+ mc_umc_status_addr =
+ SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
+
+ /* check the MCUMC_STATUS */
+ mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
+ if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
+ (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1))
+ *error_count += 1;
+}
+
+static void umc_v6_7_reset_error_count_per_channel(struct amdgpu_device *adev,
+ uint32_t umc_reg_offset)
+{
+ uint32_t ecc_err_cnt_addr;
+ uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr;
+
+ ecc_err_cnt_sel_addr =
+ SOC15_REG_OFFSET(UMC, 0,
+ regUMCCH0_0_EccErrCntSel);
+ ecc_err_cnt_addr =
+ SOC15_REG_OFFSET(UMC, 0,
+ regUMCCH0_0_EccErrCnt);
+
+ /* select the lower chip */
+ ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr +
+ umc_reg_offset) * 4);
+ ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel,
+ UMCCH0_0_EccErrCntSel,
+ EccErrCntCsSel, 0);
+ WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4,
+ ecc_err_cnt_sel);
+
+ /* clear lower chip error count */
+ WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4,
+ UMC_V6_7_CE_CNT_INIT);
+
+ /* select the higher chip */
+ ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr +
+ umc_reg_offset) * 4);
+ ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel,
+ UMCCH0_0_EccErrCntSel,
+ EccErrCntCsSel, 1);
+ WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4,
+ ecc_err_cnt_sel);
+
+ /* clear higher chip error count */
+ WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4,
+ UMC_V6_7_CE_CNT_INIT);
+}
+
+static void umc_v6_7_reset_error_count(struct amdgpu_device *adev)
+{
+ uint32_t umc_inst = 0;
+ uint32_t ch_inst = 0;
+ uint32_t umc_reg_offset = 0;
+
+ LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
+ umc_reg_offset = get_umc_v6_7_reg_offset(adev,
+ umc_inst,
+ ch_inst);
+
+ umc_v6_7_reset_error_count_per_channel(adev,
+ umc_reg_offset);
+ }
+}
+
+static void umc_v6_7_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+
+ uint32_t umc_inst = 0;
+ uint32_t ch_inst = 0;
+ uint32_t umc_reg_offset = 0;
+
+ /*TODO: driver needs to toggle DF Cstate to ensure
+ * safe access of UMC registers. Will add the protection */
+ LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
+ umc_reg_offset = get_umc_v6_7_reg_offset(adev,
+ umc_inst,
+ ch_inst);
+ umc_v6_7_query_correctable_error_count(adev,
+ umc_reg_offset,
+ &(err_data->ce_count));
+ umc_v6_7_querry_uncorrectable_error_count(adev,
+ umc_reg_offset,
+ &(err_data->ue_count));
+ }
+
+ umc_v6_7_reset_error_count(adev);
+}
+
+static void umc_v6_7_query_error_address(struct amdgpu_device *adev,
+ struct ras_err_data *err_data,
+ uint32_t umc_reg_offset,
+ uint32_t ch_inst,
+ uint32_t umc_inst)
+{
+ uint32_t mc_umc_status_addr;
+ uint64_t mc_umc_status, err_addr, retired_page, mc_umc_addrt0;
+ struct eeprom_table_record *err_rec;
+ uint32_t channel_index;
+
+ mc_umc_status_addr =
+ SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
+ mc_umc_addrt0 =
+ SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_ADDRT0);
+
+ mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
+
+ if (mc_umc_status == 0)
+ return;
+
+ if (!err_data->err_addr) {
+ /* clear umc status */
+ WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
+ return;
+ }
+
+ err_rec = &err_data->err_addr[err_data->err_addr_cnt];
+
+ channel_index =
+ adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
+
+ /* calculate error address if ue/ce error is detected */
+ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
+ (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
+
+ err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
+ err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
+
+ /* translate umc channel address to soc pa, 3 parts are included */
+ retired_page = ADDR_OF_8KB_BLOCK(err_addr) |
+ ADDR_OF_256B_BLOCK(channel_index) |
+ OFFSET_IN_256B_BLOCK(err_addr);
+
+ /* we only save ue error information currently, ce is skipped */
+ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
+ == 1) {
+ err_rec->address = err_addr;
+ /* page frame address is saved */
+ err_rec->retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
+ err_rec->ts = (uint64_t)ktime_get_real_seconds();
+ err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
+ err_rec->cu = 0;
+ err_rec->mem_channel = channel_index;
+ err_rec->mcumc_id = umc_inst;
+
+ err_data->err_addr_cnt++;
+ }
+ }
+
+ /* clear umc status */
+ WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
+}
+
+static void umc_v6_7_query_ras_error_address(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+
+ uint32_t umc_inst = 0;
+ uint32_t ch_inst = 0;
+ uint32_t umc_reg_offset = 0;
+
+ /*TODO: driver needs to toggle DF Cstate to ensure
+ * safe access of UMC resgisters. Will add the protection
+ * when firmware interface is ready */
+ LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
+ umc_reg_offset = get_umc_v6_7_reg_offset(adev,
+ umc_inst,
+ ch_inst);
+ umc_v6_7_query_error_address(adev,
+ err_data,
+ umc_reg_offset,
+ ch_inst,
+ umc_inst);
+ }
+}
+
+const struct amdgpu_umc_ras_funcs umc_v6_7_ras_funcs = {
+ .ras_late_init = amdgpu_umc_ras_late_init,
+ .ras_fini = amdgpu_umc_ras_fini,
+ .query_ras_error_count = umc_v6_7_query_ras_error_count,
+ .query_ras_error_address = umc_v6_7_query_ras_error_address,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.h b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.h
new file mode 100644
index 000000000000..4eb85f247e96
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __UMC_V6_7_H__
+#define __UMC_V6_7_H__
+
+/* EccErrCnt max value */
+#define UMC_V6_7_CE_CNT_MAX 0xffff
+/* umc ce interrupt threshold */
+#define UMC_V6_7_CE_INT_THRESHOLD 0xffff
+/* umc ce count initial value */
+#define UMC_V6_7_CE_CNT_INIT (UMC_V6_7_CE_CNT_MAX - UMC_V6_7_CE_INT_THRESHOLD)
+
+#define UMC_V6_7_INST_DIST 0x40000
+
+extern const struct amdgpu_umc_ras_funcs umc_v6_7_ras_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c
index a064c097690c..89d20adfa001 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c
@@ -22,6 +22,7 @@
*/
#include "umc_v8_7.h"
#include "amdgpu_ras.h"
+#include "amdgpu_umc.h"
#include "amdgpu.h"
#include "rsmu/rsmu_0_0_2_offset.h"
@@ -323,9 +324,10 @@ static void umc_v8_7_err_cnt_init(struct amdgpu_device *adev)
}
}
-const struct amdgpu_umc_funcs umc_v8_7_funcs = {
+const struct amdgpu_umc_ras_funcs umc_v8_7_ras_funcs = {
.err_cnt_init = umc_v8_7_err_cnt_init,
.ras_late_init = amdgpu_umc_ras_late_init,
+ .ras_fini = amdgpu_umc_ras_fini,
.query_ras_error_count = umc_v8_7_query_ras_error_count,
.query_ras_error_address = umc_v8_7_query_ras_error_address,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.h b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.h
index d4d0468e3df5..37e6dc7c28e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.h
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.h
@@ -44,7 +44,7 @@
/* umc ce count initial value */
#define UMC_V8_7_CE_CNT_INIT (UMC_V8_7_CE_CNT_MAX - UMC_V8_7_CE_INT_THRESHOLD)
-extern const struct amdgpu_umc_funcs umc_v8_7_funcs;
+extern const struct amdgpu_umc_ras_funcs umc_v8_7_ras_funcs;
extern const uint32_t
umc_v8_7_channel_idx_tbl[UMC_V8_7_UMC_INSTANCE_NUM][UMC_V8_7_CHANNEL_INSTANCE_NUM];
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
index 10ecae257b18..284447d7a579 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
@@ -562,7 +562,7 @@ static int uvd_v3_1_sw_init(void *handle)
ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index a70d2a0de316..a301518e4957 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -119,7 +119,7 @@ static int uvd_v4_2_sw_init(void *handle)
ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index f3b0a927101b..a4d5bd21c83c 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -117,7 +117,7 @@ static int uvd_v5_0_sw_init(void *handle)
ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 760859880c1e..2bab9c77952f 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -420,7 +420,7 @@ static int uvd_v6_0_sw_init(void *handle)
ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
@@ -434,7 +434,7 @@ static int uvd_v6_0_sw_init(void *handle)
sprintf(ring->name, "uvd_enc%d", i);
r = amdgpu_ring_init(adev, ring, 512,
&adev->uvd.inst->irq, 0,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 7cd67cb2ac5f..0cd98fcb1f9f 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -454,7 +454,7 @@ static int uvd_v7_0_sw_init(void *handle)
sprintf(ring->name, "uvd_%d", ring->me);
r = amdgpu_ring_init(adev, ring, 512,
&adev->uvd.inst[j].irq, 0,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
@@ -475,7 +475,7 @@ static int uvd_v7_0_sw_init(void *handle)
}
r = amdgpu_ring_init(adev, ring, 512,
&adev->uvd.inst[j].irq, 0,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index 0e2945baf0f1..c7d28c169be5 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -433,9 +433,8 @@ static int vce_v2_0_sw_init(void *handle)
for (i = 0; i < adev->vce.num_rings; i++) {
ring = &adev->vce.ring[i];
sprintf(ring->name, "vce%d", i);
- r = amdgpu_ring_init(adev, ring, 512,
- &adev->vce.irq, 0,
- AMDGPU_RING_PRIO_DEFAULT);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 6d9108fa22e0..3b82fb289ef6 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -443,7 +443,7 @@ static int vce_v3_0_sw_init(void *handle)
ring = &adev->vce.ring[i];
sprintf(ring->name, "vce%d", i);
r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 37fa163393fd..8e238dea7bef 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -477,7 +477,7 @@ static int vce_v4_0_sw_init(void *handle)
ring->doorbell_index = adev->doorbell_index.uvd_vce.vce_ring2_3 * 2 + 1;
}
r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 6117931fa8d7..51a773a37a35 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -129,7 +129,7 @@ static int vcn_v1_0_sw_init(void *handle)
ring = &adev->vcn.inst->ring_dec;
sprintf(ring->name, "vcn_dec");
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
@@ -148,7 +148,7 @@ static int vcn_v1_0_sw_init(void *handle)
ring = &adev->vcn.inst->ring_enc[i];
sprintf(ring->name, "vcn_enc%d", i);
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index d63198c945bf..116b9643d5ba 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -136,7 +136,7 @@ static int vcn_v2_0_sw_init(void *handle)
sprintf(ring->name, "vcn_dec");
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
@@ -167,7 +167,7 @@ static int vcn_v2_0_sw_init(void *handle)
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + i;
sprintf(ring->name, "vcn_enc%d", i);
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index b6e0f4ba6272..948813d7caa0 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -189,7 +189,7 @@ static int vcn_v2_5_sw_init(void *handle)
(amdgpu_sriov_vf(adev) ? 2*j : 8*j);
sprintf(ring->name, "vcn_dec_%d", j);
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq,
- 0, AMDGPU_RING_PRIO_DEFAULT);
+ 0, AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
@@ -203,7 +203,7 @@ static int vcn_v2_5_sw_init(void *handle)
sprintf(ring->name, "vcn_enc_%d.%d", j, i);
r = amdgpu_ring_init(adev, ring, 512,
&adev->vcn.inst[j].irq, 0,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
@@ -1545,6 +1545,36 @@ static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = {
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
};
+static const struct amdgpu_ring_funcs vcn_v2_6_dec_ring_vm_funcs = {
+ .type = AMDGPU_RING_TYPE_VCN_DEC,
+ .align_mask = 0xf,
+ .vmhub = AMDGPU_MMHUB_0,
+ .get_rptr = vcn_v2_5_dec_ring_get_rptr,
+ .get_wptr = vcn_v2_5_dec_ring_get_wptr,
+ .set_wptr = vcn_v2_5_dec_ring_set_wptr,
+ .emit_frame_size =
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
+ 8 + /* vcn_v2_0_dec_ring_emit_vm_flush */
+ 14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */
+ 6,
+ .emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */
+ .emit_ib = vcn_v2_0_dec_ring_emit_ib,
+ .emit_fence = vcn_v2_0_dec_ring_emit_fence,
+ .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
+ .test_ring = vcn_v2_0_dec_ring_test_ring,
+ .test_ib = amdgpu_vcn_dec_ring_test_ib,
+ .insert_nop = vcn_v2_0_dec_ring_insert_nop,
+ .insert_start = vcn_v2_0_dec_ring_insert_start,
+ .insert_end = vcn_v2_0_dec_ring_insert_end,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_vcn_ring_begin_use,
+ .end_use = amdgpu_vcn_ring_end_use,
+ .emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
+ .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+};
+
/**
* vcn_v2_5_enc_ring_get_rptr - get enc read pointer
*
@@ -1644,6 +1674,36 @@ static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = {
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
};
+static const struct amdgpu_ring_funcs vcn_v2_6_enc_ring_vm_funcs = {
+ .type = AMDGPU_RING_TYPE_VCN_ENC,
+ .align_mask = 0x3f,
+ .nop = VCN_ENC_CMD_NO_OP,
+ .vmhub = AMDGPU_MMHUB_0,
+ .get_rptr = vcn_v2_5_enc_ring_get_rptr,
+ .get_wptr = vcn_v2_5_enc_ring_get_wptr,
+ .set_wptr = vcn_v2_5_enc_ring_set_wptr,
+ .emit_frame_size =
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
+ 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
+ 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
+ 1, /* vcn_v2_0_enc_ring_insert_end */
+ .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
+ .emit_ib = vcn_v2_0_enc_ring_emit_ib,
+ .emit_fence = vcn_v2_0_enc_ring_emit_fence,
+ .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
+ .test_ring = amdgpu_vcn_enc_ring_test_ring,
+ .test_ib = amdgpu_vcn_enc_ring_test_ib,
+ .insert_nop = amdgpu_ring_insert_nop,
+ .insert_end = vcn_v2_0_enc_ring_insert_end,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_vcn_ring_begin_use,
+ .end_use = amdgpu_vcn_ring_end_use,
+ .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
+ .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+};
+
static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
{
int i;
@@ -1651,7 +1711,10 @@ static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
- adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs;
+ if (adev->asic_type == CHIP_ARCTURUS)
+ adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs;
+ else /* CHIP_ALDEBARAN */
+ adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_6_dec_ring_vm_funcs;
adev->vcn.inst[i].ring_dec.me = i;
DRM_INFO("VCN(%d) decode is enabled in VM mode\n", i);
}
@@ -1665,7 +1728,10 @@ static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev)
if (adev->vcn.harvest_config & (1 << j))
continue;
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
- adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs;
+ if (adev->asic_type == CHIP_ARCTURUS)
+ adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs;
+ else /* CHIP_ALDEBARAN */
+ adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_6_enc_ring_vm_funcs;
adev->vcn.inst[j].ring_enc[i].me = j;
}
DRM_INFO("VCN(%d) encode is enabled in VM mode\n", j);
@@ -1830,6 +1896,26 @@ static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
.set_powergating_state = vcn_v2_5_set_powergating_state,
};
+static const struct amd_ip_funcs vcn_v2_6_ip_funcs = {
+ .name = "vcn_v2_6",
+ .early_init = vcn_v2_5_early_init,
+ .late_init = NULL,
+ .sw_init = vcn_v2_5_sw_init,
+ .sw_fini = vcn_v2_5_sw_fini,
+ .hw_init = vcn_v2_5_hw_init,
+ .hw_fini = vcn_v2_5_hw_fini,
+ .suspend = vcn_v2_5_suspend,
+ .resume = vcn_v2_5_resume,
+ .is_idle = vcn_v2_5_is_idle,
+ .wait_for_idle = vcn_v2_5_wait_for_idle,
+ .check_soft_reset = NULL,
+ .pre_soft_reset = NULL,
+ .soft_reset = NULL,
+ .post_soft_reset = NULL,
+ .set_clockgating_state = vcn_v2_5_set_clockgating_state,
+ .set_powergating_state = vcn_v2_5_set_powergating_state,
+};
+
const struct amdgpu_ip_block_version vcn_v2_5_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_VCN,
@@ -1838,3 +1924,12 @@ const struct amdgpu_ip_block_version vcn_v2_5_ip_block =
.rev = 0,
.funcs = &vcn_v2_5_ip_funcs,
};
+
+const struct amdgpu_ip_block_version vcn_v2_6_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_VCN,
+ .major = 2,
+ .minor = 6,
+ .rev = 0,
+ .funcs = &vcn_v2_6_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.h b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.h
index 8d9c0800b8e0..e72f799ed0fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.h
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.h
@@ -25,5 +25,6 @@
#define __VCN_V2_5_H__
extern const struct amdgpu_ip_block_version vcn_v2_5_ip_block;
+extern const struct amdgpu_ip_block_version vcn_v2_6_ip_block;
#endif /* __VCN_V2_5_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index def583916294..3f15bf34123a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -50,6 +50,9 @@
#define VCN_INSTANCES_SIENNA_CICHLID 2
#define DEC_SW_RING_ENABLED FALSE
+#define RDECODE_MSG_CREATE 0x00000000
+#define RDECODE_MESSAGE_CREATE 0x00000001
+
static int amdgpu_ih_clientid_vcns[] = {
SOC15_IH_CLIENTID_VCN,
SOC15_IH_CLIENTID_VCN1
@@ -171,6 +174,7 @@ static int vcn_v3_0_sw_init(void *handle)
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
volatile struct amdgpu_fw_shared *fw_shared;
+
if (adev->vcn.harvest_config & (1 << i))
continue;
@@ -198,6 +202,8 @@ static int vcn_v3_0_sw_init(void *handle)
if (r)
return r;
+ atomic_set(&adev->vcn.inst[i].sched_score, 0);
+
ring = &adev->vcn.inst[i].ring_dec;
ring->use_doorbell = true;
if (amdgpu_sriov_vf(adev)) {
@@ -205,11 +211,10 @@ static int vcn_v3_0_sw_init(void *handle)
} else {
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i;
}
- if (adev->asic_type == CHIP_SIENNA_CICHLID && i != 0)
- ring->no_scheduler = true;
sprintf(ring->name, "vcn_dec_%d", i);
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT,
+ &adev->vcn.inst[i].sched_score);
if (r)
return r;
@@ -227,18 +232,18 @@ static int vcn_v3_0_sw_init(void *handle)
} else {
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + j + 8 * i;
}
- if (adev->asic_type == CHIP_SIENNA_CICHLID && i != 1)
- ring->no_scheduler = true;
sprintf(ring->name, "vcn_enc_%d.%d", i, j);
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT,
+ &adev->vcn.inst[i].sched_score);
if (r)
return r;
}
fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr;
fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_SW_RING_FLAG) |
- cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG);
+ cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG) |
+ cpu_to_le32(AMDGPU_VCN_FW_SHARED_FLAG_0_RB);
fw_shared->sw_ring.is_enabled = cpu_to_le32(DEC_SW_RING_ENABLED);
}
@@ -1074,7 +1079,13 @@ static int vcn_v3_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR,
lower_32_bits(ring->wptr));
+ /* Reset FW shared memory RBC WPTR/RPTR */
+ fw_shared->rb.rptr = 0;
+ fw_shared->rb.wptr = lower_32_bits(ring->wptr);
+
+ /*resetting done, fw can check RB ring */
fw_shared->multi_queue.decode_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
+
/* Unstall DPG */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
@@ -1239,9 +1250,11 @@ static int vcn_v3_0_start(struct amdgpu_device *adev)
/* Initialize the ring buffer's read and write pointers */
WREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR, 0);
+ WREG32_SOC15(VCN, i, mmUVD_SCRATCH2, 0);
ring->wptr = RREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR);
WREG32_SOC15(VCN, i, mmUVD_RBC_RB_WPTR,
lower_32_bits(ring->wptr));
+ fw_shared->rb.wptr = lower_32_bits(ring->wptr);
fw_shared->multi_queue.decode_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
@@ -1662,6 +1675,10 @@ static int vcn_v3_0_pause_dpg_mode(struct amdgpu_device *adev,
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
fw_shared->multi_queue.encode_lowlatency_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
+ /* restore wptr/rptr with pointers saved in FW shared memory*/
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, fw_shared->rb.rptr);
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR, fw_shared->rb.wptr);
+
/* Unstall DPG */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
@@ -1721,6 +1738,15 @@ static uint64_t vcn_v3_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
+ volatile struct amdgpu_fw_shared *fw_shared;
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ /*whenever update RBC_RB_WPTR, we save the wptr in shared rb.wptr and scratch2 */
+ fw_shared = adev->vcn.inst[ring->me].fw_shared_cpu_addr;
+ fw_shared->rb.wptr = lower_32_bits(ring->wptr);
+ WREG32_SOC15(VCN, ring->me, mmUVD_SCRATCH2,
+ lower_32_bits(ring->wptr));
+ }
if (ring->use_doorbell) {
adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
@@ -1822,6 +1848,132 @@ static const struct amdgpu_ring_funcs vcn_v3_0_dec_sw_ring_vm_funcs = {
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
};
+static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p)
+{
+ struct drm_gpu_scheduler **scheds;
+
+ /* The create msg must be in the first IB submitted */
+ if (atomic_read(&p->entity->fence_seq))
+ return -EINVAL;
+
+ scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_DEC]
+ [AMDGPU_RING_PRIO_DEFAULT].sched;
+ drm_sched_entity_modify_sched(p->entity, scheds, 1);
+ return 0;
+}
+
+static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, uint64_t addr)
+{
+ struct ttm_operation_ctx ctx = { false, false };
+ struct amdgpu_bo_va_mapping *map;
+ uint32_t *msg, num_buffers;
+ struct amdgpu_bo *bo;
+ uint64_t start, end;
+ unsigned int i;
+ void * ptr;
+ int r;
+
+ addr &= AMDGPU_GMC_HOLE_MASK;
+ r = amdgpu_cs_find_mapping(p, addr, &bo, &map);
+ if (r) {
+ DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
+ return r;
+ }
+
+ start = map->start * AMDGPU_GPU_PAGE_SIZE;
+ end = (map->last + 1) * AMDGPU_GPU_PAGE_SIZE;
+ if (addr & 0x7) {
+ DRM_ERROR("VCN messages must be 8 byte aligned!\n");
+ return -EINVAL;
+ }
+
+ bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (r) {
+ DRM_ERROR("Failed validating the VCN message BO (%d)!\n", r);
+ return r;
+ }
+
+ r = amdgpu_bo_kmap(bo, &ptr);
+ if (r) {
+ DRM_ERROR("Failed mapping the VCN message (%d)!\n", r);
+ return r;
+ }
+
+ msg = ptr + addr - start;
+
+ /* Check length */
+ if (msg[1] > end - addr) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (msg[3] != RDECODE_MSG_CREATE)
+ goto out;
+
+ num_buffers = msg[2];
+ for (i = 0, msg = &msg[6]; i < num_buffers; ++i, msg += 4) {
+ uint32_t offset, size, *create;
+
+ if (msg[0] != RDECODE_MESSAGE_CREATE)
+ continue;
+
+ offset = msg[1];
+ size = msg[2];
+
+ if (offset + size > end) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ create = ptr + addr + offset - start;
+
+ /* H246, HEVC and VP9 can run on any instance */
+ if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11)
+ continue;
+
+ r = vcn_v3_0_limit_sched(p);
+ if (r)
+ goto out;
+ }
+
+out:
+ amdgpu_bo_kunmap(bo);
+ return r;
+}
+
+static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
+ uint32_t ib_idx)
+{
+ struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
+ struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
+ uint32_t msg_lo = 0, msg_hi = 0;
+ unsigned i;
+ int r;
+
+ /* The first instance can decode anything */
+ if (!ring->me)
+ return 0;
+
+ for (i = 0; i < ib->length_dw; i += 2) {
+ uint32_t reg = amdgpu_get_ib_value(p, ib_idx, i);
+ uint32_t val = amdgpu_get_ib_value(p, ib_idx, i + 1);
+
+ if (reg == PACKET0(p->adev->vcn.internal.data0, 0)) {
+ msg_lo = val;
+ } else if (reg == PACKET0(p->adev->vcn.internal.data1, 0)) {
+ msg_hi = val;
+ } else if (reg == PACKET0(p->adev->vcn.internal.cmd, 0) &&
+ val == 0) {
+ r = vcn_v3_0_dec_msg(p, ((u64)msg_hi) << 32 | msg_lo);
+ if (r)
+ return r;
+ }
+ }
+ return 0;
+}
+
static const struct amdgpu_ring_funcs vcn_v3_0_dec_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_DEC,
.align_mask = 0xf,
@@ -1829,6 +1981,7 @@ static const struct amdgpu_ring_funcs vcn_v3_0_dec_ring_vm_funcs = {
.get_rptr = vcn_v3_0_dec_ring_get_rptr,
.get_wptr = vcn_v3_0_dec_ring_get_wptr,
.set_wptr = vcn_v3_0_dec_ring_set_wptr,
+ .patch_cs_in_place = vcn_v3_0_ring_patch_cs_in_place,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index 88626d83e07b..ca8efa5c6978 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -220,10 +220,8 @@ static int vega10_ih_enable_ring(struct amdgpu_device *adev,
tmp = vega10_ih_rb_cntl(ih, tmp);
if (ih == &adev->irq.ih)
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled);
- if (ih == &adev->irq.ih1) {
- tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0);
+ if (ih == &adev->irq.ih1)
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
- }
if (amdgpu_sriov_vf(adev)) {
if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
dev_err(adev->dev, "PSP program IH_RB_CNTL failed!\n");
@@ -265,7 +263,6 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
u32 ih_chicken;
int ret;
int i;
- u32 tmp;
/* disable irqs */
ret = vega10_ih_toggle_interrupts(adev, false);
@@ -291,15 +288,6 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
}
}
- tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
- tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
- CLIENT18_IS_STORM_CLIENT, 1);
- WREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL, tmp);
-
- tmp = RREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL);
- tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
- WREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL, tmp);
-
pci_set_master(adev->pdev);
/* enable interrupts */
@@ -345,11 +333,17 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev,
u32 wptr, tmp;
struct amdgpu_ih_regs *ih_regs;
- wptr = le32_to_cpu(*ih->wptr_cpu);
- ih_regs = &ih->ih_regs;
+ if (ih == &adev->irq.ih) {
+ /* Only ring0 supports writeback. On other rings fall back
+ * to register-based code with overflow checking below.
+ */
+ wptr = le32_to_cpu(*ih->wptr_cpu);
- if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
- goto out;
+ if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+ goto out;
+ }
+
+ ih_regs = &ih->ih_regs;
/* Double check that the overflow wasn't already cleared. */
wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
@@ -440,15 +434,11 @@ static int vega10_ih_self_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- uint32_t wptr = cpu_to_le32(entry->src_data[0]);
-
switch (entry->ring_id) {
case 1:
- *adev->irq.ih1.wptr_cpu = wptr;
schedule_work(&adev->irq.ih1_work);
break;
case 2:
- *adev->irq.ih2.wptr_cpu = wptr;
schedule_work(&adev->irq.ih2_work);
break;
default: break;
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
index 5a3c867d5881..8a122b413bf5 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
@@ -35,6 +35,9 @@
#define MAX_REARM_RETRY 10
+#define mmIH_CHICKEN_ALDEBARAN 0x18d
+#define mmIH_CHICKEN_ALDEBARAN_BASE_IDX 0
+
static void vega20_ih_set_interrupt_funcs(struct amdgpu_device *adev);
/**
@@ -104,6 +107,8 @@ static int vega20_ih_toggle_ring_interrupts(struct amdgpu_device *adev,
tmp = RREG32(ih_regs->ih_rb_cntl);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0));
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_GPU_TS_ENABLE, 1);
+
/* enable_intr field is only valid in ring0 */
if (ih == &adev->irq.ih)
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
@@ -220,10 +225,8 @@ static int vega20_ih_enable_ring(struct amdgpu_device *adev,
tmp = vega20_ih_rb_cntl(ih, tmp);
if (ih == &adev->irq.ih)
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled);
- if (ih == &adev->irq.ih1) {
- tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0);
+ if (ih == &adev->irq.ih1)
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
- }
if (amdgpu_sriov_vf(adev)) {
if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
dev_err(adev->dev, "PSP program IH_RB_CNTL failed!\n");
@@ -261,10 +264,10 @@ static void vega20_ih_reroute_ih(struct amdgpu_device *adev)
{
uint32_t tmp;
- /* vega20 ih reroute will go through psp
- * this function is only used for arcturus
+ /* vega20 ih reroute will go through psp this
+ * function is used for newer asics starting arcturus
*/
- if (adev->asic_type == CHIP_ARCTURUS) {
+ if (adev->asic_type >= CHIP_ARCTURUS) {
/* Reroute to IH ring 1 for VMC */
WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_INDEX, 0x12);
tmp = RREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA);
@@ -297,7 +300,6 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev)
u32 ih_chicken;
int ret;
int i;
- u32 tmp;
/* disable irqs */
ret = vega20_ih_toggle_interrupts(adev, false);
@@ -316,6 +318,18 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev)
WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN, ih_chicken);
}
+ /* psp firmware won't program IH_CHICKEN for aldebaran
+ * driver needs to program it properly according to
+ * MC_SPACE type in IH_RB_CNTL */
+ if (adev->asic_type == CHIP_ALDEBARAN) {
+ ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN_ALDEBARAN);
+ if (adev->irq.ih.use_bus_addr) {
+ ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN,
+ MC_SPACE_GPA_ENABLE, 1);
+ }
+ WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN_ALDEBARAN, ih_chicken);
+ }
+
for (i = 0; i < ARRAY_SIZE(ih); i++) {
if (ih[i]->ring_size) {
if (i == 1)
@@ -326,15 +340,6 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev)
}
}
- tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
- tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
- CLIENT18_IS_STORM_CLIENT, 1);
- WREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL, tmp);
-
- tmp = RREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL);
- tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
- WREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL, tmp);
-
pci_set_master(adev->pdev);
/* enable interrupts */
@@ -380,11 +385,17 @@ static u32 vega20_ih_get_wptr(struct amdgpu_device *adev,
u32 wptr, tmp;
struct amdgpu_ih_regs *ih_regs;
- wptr = le32_to_cpu(*ih->wptr_cpu);
- ih_regs = &ih->ih_regs;
+ if (ih == &adev->irq.ih) {
+ /* Only ring0 supports writeback. On other rings fall back
+ * to register-based code with overflow checking below.
+ */
+ wptr = le32_to_cpu(*ih->wptr_cpu);
- if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
- goto out;
+ if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+ goto out;
+ }
+
+ ih_regs = &ih->ih_regs;
/* Double check that the overflow wasn't already cleared. */
wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
@@ -476,15 +487,11 @@ static int vega20_ih_self_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- uint32_t wptr = cpu_to_le32(entry->src_data[0]);
-
switch (entry->ring_id) {
case 1:
- *adev->irq.ih1.wptr_cpu = wptr;
schedule_work(&adev->irq.ih1_work);
break;
case 2:
- *adev->irq.ih2.wptr_cpu = wptr;
schedule_work(&adev->irq.ih2_work);
break;
default: break;
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index eafb76aebd00..ea338de5818a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -24,6 +24,8 @@
#include <linux/pci.h>
#include <linux/slab.h>
+#include <drm/amdgpu_drm.h>
+
#include "amdgpu.h"
#include "amdgpu_atombios.h"
#include "amdgpu_ih.h"
@@ -79,6 +81,193 @@
#include "mxgpu_vi.h"
#include "amdgpu_dm.h"
+/* Topaz */
+static const struct amdgpu_video_codecs topaz_video_codecs_encode =
+{
+ .codec_count = 0,
+ .codec_array = NULL,
+};
+
+/* Tonga, CZ, ST, Fiji */
+static const struct amdgpu_video_codec_info tonga_video_codecs_encode_array[] =
+{
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
+ .max_width = 4096,
+ .max_height = 2304,
+ .max_pixels_per_frame = 4096 * 2304,
+ .max_level = 0,
+ },
+};
+
+static const struct amdgpu_video_codecs tonga_video_codecs_encode =
+{
+ .codec_count = ARRAY_SIZE(tonga_video_codecs_encode_array),
+ .codec_array = tonga_video_codecs_encode_array,
+};
+
+/* Polaris */
+static const struct amdgpu_video_codec_info polaris_video_codecs_encode_array[] =
+{
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
+ .max_width = 4096,
+ .max_height = 2304,
+ .max_pixels_per_frame = 4096 * 2304,
+ .max_level = 0,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
+ .max_width = 4096,
+ .max_height = 2304,
+ .max_pixels_per_frame = 4096 * 2304,
+ .max_level = 0,
+ },
+};
+
+static const struct amdgpu_video_codecs polaris_video_codecs_encode =
+{
+ .codec_count = ARRAY_SIZE(polaris_video_codecs_encode_array),
+ .codec_array = polaris_video_codecs_encode_array,
+};
+
+/* Topaz */
+static const struct amdgpu_video_codecs topaz_video_codecs_decode =
+{
+ .codec_count = 0,
+ .codec_array = NULL,
+};
+
+/* Tonga */
+static const struct amdgpu_video_codec_info tonga_video_codecs_decode_array[] =
+{
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 3,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 5,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 52,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 4,
+ },
+};
+
+static const struct amdgpu_video_codecs tonga_video_codecs_decode =
+{
+ .codec_count = ARRAY_SIZE(tonga_video_codecs_decode_array),
+ .codec_array = tonga_video_codecs_decode_array,
+};
+
+/* CZ, ST, Fiji, Polaris */
+static const struct amdgpu_video_codec_info cz_video_codecs_decode_array[] =
+{
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 3,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 5,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 52,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 4,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 186,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 0,
+ },
+};
+
+static const struct amdgpu_video_codecs cz_video_codecs_decode =
+{
+ .codec_count = ARRAY_SIZE(cz_video_codecs_decode_array),
+ .codec_array = cz_video_codecs_decode_array,
+};
+
+static int vi_query_video_codecs(struct amdgpu_device *adev, bool encode,
+ const struct amdgpu_video_codecs **codecs)
+{
+ switch (adev->asic_type) {
+ case CHIP_TOPAZ:
+ if (encode)
+ *codecs = &topaz_video_codecs_encode;
+ else
+ *codecs = &topaz_video_codecs_decode;
+ return 0;
+ case CHIP_TONGA:
+ if (encode)
+ *codecs = &tonga_video_codecs_encode;
+ else
+ *codecs = &tonga_video_codecs_decode;
+ return 0;
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
+ case CHIP_VEGAM:
+ if (encode)
+ *codecs = &polaris_video_codecs_encode;
+ else
+ *codecs = &cz_video_codecs_decode;
+ return 0;
+ case CHIP_FIJI:
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ if (encode)
+ *codecs = &tonga_video_codecs_encode;
+ else
+ *codecs = &cz_video_codecs_decode;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
/*
* Indirect registers accessor
*/
@@ -1085,6 +1274,7 @@ static const struct amdgpu_asic_funcs vi_asic_funcs =
.get_pcie_replay_count = &vi_get_pcie_replay_count,
.supports_baco = &vi_asic_supports_baco,
.pre_asic_init = &vi_pre_asic_init,
+ .query_video_codecs = &vi_query_video_codecs,
};
#define CZ_REV_BRISTOL(rev) \