aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h165
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c117
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c245
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c121
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c455
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c117
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c628
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h247
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c88
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c60
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c134
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c91
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c407
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c377
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h72
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c282
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h98
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c114
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c122
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c142
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c204
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c141
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c6844
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.h349
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_smc.c279
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c76
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_dpm.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.c65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.c69
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c146
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c299
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c434
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c463
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c238
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.c65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c93
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c366
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c92
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c69
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c72
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c749
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c68
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c198
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15_common.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ta_xgmi_if.h130
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c91
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c84
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c90
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c72
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c397
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c102
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.h1
136 files changed, 6292 insertions, 11677 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 138cb787d27e..466da5954a68 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -53,11 +53,11 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \
- amdgpu_gmc.o amdgpu_xgmi.o
+ amdgpu_gmc.o amdgpu_xgmi.o amdgpu_csa.o
# add asic specific block
amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
- ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o
+ dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o
amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o si_dpm.o si_smc.o
@@ -105,6 +105,7 @@ amdgpu-y += \
# add GFX block
amdgpu-y += \
amdgpu_gfx.o \
+ amdgpu_rlc.o \
gfx_v8_0.o \
gfx_v9_0.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index b0fc116296cb..8d0d7f3dd5fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -75,11 +75,14 @@
#include "amdgpu_sdma.h"
#include "amdgpu_dm.h"
#include "amdgpu_virt.h"
+#include "amdgpu_csa.h"
#include "amdgpu_gart.h"
#include "amdgpu_debugfs.h"
#include "amdgpu_job.h"
#include "amdgpu_bo_list.h"
#include "amdgpu_gem.h"
+#include "amdgpu_doorbell.h"
+#include "amdgpu_amdkfd.h"
#define MAX_GPU_INSTANCE 16
@@ -161,6 +164,7 @@ extern int amdgpu_si_support;
extern int amdgpu_cik_support;
#endif
+#define AMDGPU_VM_MAX_NUM_CTX 4096
#define AMDGPU_SG_THRESHOLD (256*1024*1024)
#define AMDGPU_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
@@ -360,123 +364,6 @@ int amdgpu_fence_slab_init(void);
void amdgpu_fence_slab_fini(void);
/*
- * GPU doorbell structures, functions & helpers
- */
-typedef enum _AMDGPU_DOORBELL_ASSIGNMENT
-{
- AMDGPU_DOORBELL_KIQ = 0x000,
- AMDGPU_DOORBELL_HIQ = 0x001,
- AMDGPU_DOORBELL_DIQ = 0x002,
- AMDGPU_DOORBELL_MEC_RING0 = 0x010,
- AMDGPU_DOORBELL_MEC_RING1 = 0x011,
- AMDGPU_DOORBELL_MEC_RING2 = 0x012,
- AMDGPU_DOORBELL_MEC_RING3 = 0x013,
- AMDGPU_DOORBELL_MEC_RING4 = 0x014,
- AMDGPU_DOORBELL_MEC_RING5 = 0x015,
- AMDGPU_DOORBELL_MEC_RING6 = 0x016,
- AMDGPU_DOORBELL_MEC_RING7 = 0x017,
- AMDGPU_DOORBELL_GFX_RING0 = 0x020,
- AMDGPU_DOORBELL_sDMA_ENGINE0 = 0x1E0,
- AMDGPU_DOORBELL_sDMA_ENGINE1 = 0x1E1,
- AMDGPU_DOORBELL_IH = 0x1E8,
- AMDGPU_DOORBELL_MAX_ASSIGNMENT = 0x3FF,
- AMDGPU_DOORBELL_INVALID = 0xFFFF
-} AMDGPU_DOORBELL_ASSIGNMENT;
-
-struct amdgpu_doorbell {
- /* doorbell mmio */
- resource_size_t base;
- resource_size_t size;
- u32 __iomem *ptr;
- u32 num_doorbells; /* Number of doorbells actually reserved for amdgpu. */
-};
-
-/*
- * 64bit doorbell, offset are in QWORD, occupy 2KB doorbell space
- */
-typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT
-{
- /*
- * All compute related doorbells: kiq, hiq, diq, traditional compute queue, user queue, should locate in
- * a continues range so that programming CP_MEC_DOORBELL_RANGE_LOWER/UPPER can cover this range.
- * Compute related doorbells are allocated from 0x00 to 0x8a
- */
-
-
- /* kernel scheduling */
- AMDGPU_DOORBELL64_KIQ = 0x00,
-
- /* HSA interface queue and debug queue */
- AMDGPU_DOORBELL64_HIQ = 0x01,
- AMDGPU_DOORBELL64_DIQ = 0x02,
-
- /* Compute engines */
- AMDGPU_DOORBELL64_MEC_RING0 = 0x03,
- AMDGPU_DOORBELL64_MEC_RING1 = 0x04,
- AMDGPU_DOORBELL64_MEC_RING2 = 0x05,
- AMDGPU_DOORBELL64_MEC_RING3 = 0x06,
- AMDGPU_DOORBELL64_MEC_RING4 = 0x07,
- AMDGPU_DOORBELL64_MEC_RING5 = 0x08,
- AMDGPU_DOORBELL64_MEC_RING6 = 0x09,
- AMDGPU_DOORBELL64_MEC_RING7 = 0x0a,
-
- /* User queue doorbell range (128 doorbells) */
- AMDGPU_DOORBELL64_USERQUEUE_START = 0x0b,
- AMDGPU_DOORBELL64_USERQUEUE_END = 0x8a,
-
- /* Graphics engine */
- AMDGPU_DOORBELL64_GFX_RING0 = 0x8b,
-
- /*
- * Other graphics doorbells can be allocated here: from 0x8c to 0xdf
- * Graphics voltage island aperture 1
- * default non-graphics QWORD index is 0xe0 - 0xFF inclusive
- */
-
- /* sDMA engines reserved from 0xe0 -oxef */
- AMDGPU_DOORBELL64_sDMA_ENGINE0 = 0xE0,
- AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE0 = 0xE1,
- AMDGPU_DOORBELL64_sDMA_ENGINE1 = 0xE8,
- AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE1 = 0xE9,
-
- /* For vega10 sriov, the sdma doorbell must be fixed as follow
- * to keep the same setting with host driver, or it will
- * happen conflicts
- */
- AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 = 0xF0,
- AMDGPU_VEGA10_DOORBELL64_sDMA_HI_PRI_ENGINE0 = 0xF1,
- AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 = 0xF2,
- AMDGPU_VEGA10_DOORBELL64_sDMA_HI_PRI_ENGINE1 = 0xF3,
-
- /* Interrupt handler */
- AMDGPU_DOORBELL64_IH = 0xF4, /* For legacy interrupt ring buffer */
- AMDGPU_DOORBELL64_IH_RING1 = 0xF5, /* For page migration request log */
- AMDGPU_DOORBELL64_IH_RING2 = 0xF6, /* For page migration translation/invalidation log */
-
- /* VCN engine use 32 bits doorbell */
- AMDGPU_DOORBELL64_VCN0_1 = 0xF8, /* lower 32 bits for VNC0 and upper 32 bits for VNC1 */
- AMDGPU_DOORBELL64_VCN2_3 = 0xF9,
- AMDGPU_DOORBELL64_VCN4_5 = 0xFA,
- AMDGPU_DOORBELL64_VCN6_7 = 0xFB,
-
- /* overlap the doorbell assignment with VCN as they are mutually exclusive
- * VCE engine's doorbell is 32 bit and two VCE ring share one QWORD
- */
- AMDGPU_DOORBELL64_UVD_RING0_1 = 0xF8,
- AMDGPU_DOORBELL64_UVD_RING2_3 = 0xF9,
- AMDGPU_DOORBELL64_UVD_RING4_5 = 0xFA,
- AMDGPU_DOORBELL64_UVD_RING6_7 = 0xFB,
-
- AMDGPU_DOORBELL64_VCE_RING0_1 = 0xFC,
- AMDGPU_DOORBELL64_VCE_RING2_3 = 0xFD,
- AMDGPU_DOORBELL64_VCE_RING4_5 = 0xFE,
- AMDGPU_DOORBELL64_VCE_RING6_7 = 0xFF,
-
- AMDGPU_DOORBELL64_MAX_ASSIGNMENT = 0xFF,
- AMDGPU_DOORBELL64_INVALID = 0xFFFF
-} AMDGPU_DOORBELL64_ASSIGNMENT;
-
-/*
* IRQS.
*/
@@ -524,6 +411,8 @@ struct amdgpu_fpriv {
struct amdgpu_ctx_mgr ctx_mgr;
};
+int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv);
+
int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned size, struct amdgpu_ib *ib);
void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
@@ -653,6 +542,13 @@ struct amdgpu_asic_funcs {
struct amdgpu_ring *ring);
/* check if the asic needs a full reset of if soft reset will work */
bool (*need_full_reset)(struct amdgpu_device *adev);
+ /* initialize doorbell layout for specific asic*/
+ void (*init_doorbell_index)(struct amdgpu_device *adev);
+ /* PCIe bandwidth usage */
+ void (*get_pcie_usage)(struct amdgpu_device *adev, uint64_t *count0,
+ uint64_t *count1);
+ /* do we need to reset the asic at init time (e.g., kexec) */
+ bool (*need_reset_on_init)(struct amdgpu_device *adev);
};
/*
@@ -745,7 +641,7 @@ struct amdgpu_nbio_funcs {
void (*hdp_flush)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
u32 (*get_memsize)(struct amdgpu_device *adev);
void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance,
- bool use_doorbell, int doorbell_index);
+ bool use_doorbell, int doorbell_index, int doorbell_size);
void (*enable_doorbell_aperture)(struct amdgpu_device *adev,
bool enable);
void (*enable_doorbell_selfring_aperture)(struct amdgpu_device *adev,
@@ -831,7 +727,6 @@ struct amdgpu_device {
bool need_dma32;
bool need_swiotlb;
bool accel_working;
- struct work_struct reset_work;
struct notifier_block acpi_nb;
struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS];
struct amdgpu_debugfs debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
@@ -976,6 +871,9 @@ struct amdgpu_device {
/* GDS */
struct amdgpu_gds gds;
+ /* KFD */
+ struct amdgpu_kfd_dev kfd;
+
/* display related functionality */
struct amdgpu_display_manager dm;
@@ -989,9 +887,6 @@ struct amdgpu_device {
atomic64_t visible_pin_size;
atomic64_t gart_pin_size;
- /* amdkfd interface */
- struct kfd_dev *kfd;
-
/* soc15 register offset based on ip, instance and segment */
uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
@@ -1023,6 +918,10 @@ struct amdgpu_device {
unsigned long last_mm_index;
bool in_gpu_reset;
struct mutex lock_reset;
+ struct amdgpu_doorbell_index doorbell_index;
+
+ int asic_reset_res;
+ struct work_struct xgmi_reset_work;
};
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
@@ -1047,11 +946,6 @@ uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset);
u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg);
void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v);
-u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index);
-void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
-u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index);
-void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
-
bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type);
bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
@@ -1113,11 +1007,6 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
#define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg))
#define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v))
-#define RDOORBELL32(index) amdgpu_mm_rdoorbell(adev, (index))
-#define WDOORBELL32(index, v) amdgpu_mm_wdoorbell(adev, (index), (v))
-#define RDOORBELL64(index) amdgpu_mm_rdoorbell64(adev, (index))
-#define WDOORBELL64(index, v) amdgpu_mm_wdoorbell64(adev, (index), (v))
-
#define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
#define REG_FIELD_MASK(reg, field) reg##__##field##_MASK
@@ -1159,6 +1048,9 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
#define amdgpu_asic_flush_hdp(adev, r) (adev)->asic_funcs->flush_hdp((adev), (r))
#define amdgpu_asic_invalidate_hdp(adev, r) (adev)->asic_funcs->invalidate_hdp((adev), (r))
#define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev))
+#define amdgpu_asic_init_doorbell_index(adev) (adev)->asic_funcs->init_doorbell_index((adev))
+#define amdgpu_asic_get_pcie_usage(adev, cnt0, cnt1) ((adev)->asic_funcs->get_pcie_usage((adev), (cnt0), (cnt1)))
+#define amdgpu_asic_need_reset_on_init(adev) (adev)->asic_funcs->need_reset_on_init((adev))
/* Common functions */
bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev);
@@ -1219,12 +1111,6 @@ void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
-
-/*
- * functions used by amdgpu_xgmi.c
- */
-int amdgpu_xgmi_add_device(struct amdgpu_device *adev);
-
/*
* functions used by amdgpu_encoder.c
*/
@@ -1252,6 +1138,9 @@ bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *ade
int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
u8 perf_req, bool advertise);
int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
+
+void amdgpu_acpi_get_backlight_caps(struct amdgpu_device *adev,
+ struct amdgpu_dm_backlight_caps *caps);
#else
static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 7f0afc526419..4376b17ca594 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -41,28 +41,21 @@ struct amdgpu_atif_notification_cfg {
};
struct amdgpu_atif_notifications {
- bool display_switch;
- bool expansion_mode_change;
bool thermal_state;
bool forced_power_state;
bool system_power_state;
- bool display_conf_change;
- bool px_gfx_switch;
bool brightness_change;
bool dgpu_display_event;
+ bool gpu_package_power_limit;
};
struct amdgpu_atif_functions {
bool system_params;
bool sbios_requests;
- bool select_active_disp;
- bool lid_state;
- bool get_tv_standard;
- bool set_tv_standard;
- bool get_panel_expansion_mode;
- bool set_panel_expansion_mode;
bool temperature_change;
- bool graphics_device_types;
+ bool query_backlight_transfer_characteristics;
+ bool ready_to_undock;
+ bool external_gpu_information;
};
struct amdgpu_atif {
@@ -72,6 +65,7 @@ struct amdgpu_atif {
struct amdgpu_atif_functions functions;
struct amdgpu_atif_notification_cfg notification_cfg;
struct amdgpu_encoder *encoder_for_bl;
+ struct amdgpu_dm_backlight_caps backlight_caps;
};
/* Call the ATIF method
@@ -137,15 +131,12 @@ static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif,
*/
static void amdgpu_atif_parse_notification(struct amdgpu_atif_notifications *n, u32 mask)
{
- n->display_switch = mask & ATIF_DISPLAY_SWITCH_REQUEST_SUPPORTED;
- n->expansion_mode_change = mask & ATIF_EXPANSION_MODE_CHANGE_REQUEST_SUPPORTED;
n->thermal_state = mask & ATIF_THERMAL_STATE_CHANGE_REQUEST_SUPPORTED;
n->forced_power_state = mask & ATIF_FORCED_POWER_STATE_CHANGE_REQUEST_SUPPORTED;
n->system_power_state = mask & ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST_SUPPORTED;
- n->display_conf_change = mask & ATIF_DISPLAY_CONF_CHANGE_REQUEST_SUPPORTED;
- n->px_gfx_switch = mask & ATIF_PX_GFX_SWITCH_REQUEST_SUPPORTED;
n->brightness_change = mask & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST_SUPPORTED;
n->dgpu_display_event = mask & ATIF_DGPU_DISPLAY_EVENT_SUPPORTED;
+ n->gpu_package_power_limit = mask & ATIF_GPU_PACKAGE_POWER_LIMIT_REQUEST_SUPPORTED;
}
/**
@@ -162,14 +153,11 @@ static void amdgpu_atif_parse_functions(struct amdgpu_atif_functions *f, u32 mas
{
f->system_params = mask & ATIF_GET_SYSTEM_PARAMETERS_SUPPORTED;
f->sbios_requests = mask & ATIF_GET_SYSTEM_BIOS_REQUESTS_SUPPORTED;
- f->select_active_disp = mask & ATIF_SELECT_ACTIVE_DISPLAYS_SUPPORTED;
- f->lid_state = mask & ATIF_GET_LID_STATE_SUPPORTED;
- f->get_tv_standard = mask & ATIF_GET_TV_STANDARD_FROM_CMOS_SUPPORTED;
- f->set_tv_standard = mask & ATIF_SET_TV_STANDARD_IN_CMOS_SUPPORTED;
- f->get_panel_expansion_mode = mask & ATIF_GET_PANEL_EXPANSION_MODE_FROM_CMOS_SUPPORTED;
- f->set_panel_expansion_mode = mask & ATIF_SET_PANEL_EXPANSION_MODE_IN_CMOS_SUPPORTED;
f->temperature_change = mask & ATIF_TEMPERATURE_CHANGE_NOTIFICATION_SUPPORTED;
- f->graphics_device_types = mask & ATIF_GET_GRAPHICS_DEVICE_TYPES_SUPPORTED;
+ f->query_backlight_transfer_characteristics =
+ mask & ATIF_QUERY_BACKLIGHT_TRANSFER_CHARACTERISTICS_SUPPORTED;
+ f->ready_to_undock = mask & ATIF_READY_TO_UNDOCK_NOTIFICATION_SUPPORTED;
+ f->external_gpu_information = mask & ATIF_GET_EXTERNAL_GPU_INFORMATION_SUPPORTED;
}
/**
@@ -311,6 +299,65 @@ out:
}
/**
+ * amdgpu_atif_query_backlight_caps - get min and max backlight input signal
+ *
+ * @handle: acpi handle
+ *
+ * Execute the QUERY_BRIGHTNESS_TRANSFER_CHARACTERISTICS ATIF function
+ * to determine the acceptable range of backlight values
+ *
+ * Backlight_caps.caps_valid will be set to true if the query is successful
+ *
+ * The input signals are in range 0-255
+ *
+ * This function assumes the display with backlight is the first LCD
+ *
+ * Returns 0 on success, error on failure.
+ */
+static int amdgpu_atif_query_backlight_caps(struct amdgpu_atif *atif)
+{
+ union acpi_object *info;
+ struct atif_qbtc_output characteristics;
+ struct atif_qbtc_arguments arguments;
+ struct acpi_buffer params;
+ size_t size;
+ int err = 0;
+
+ arguments.size = sizeof(arguments);
+ arguments.requested_display = ATIF_QBTC_REQUEST_LCD1;
+
+ params.length = sizeof(arguments);
+ params.pointer = (void *)&arguments;
+
+ info = amdgpu_atif_call(atif,
+ ATIF_FUNCTION_QUERY_BRIGHTNESS_TRANSFER_CHARACTERISTICS,
+ &params);
+ if (!info) {
+ err = -EIO;
+ goto out;
+ }
+
+ size = *(u16 *) info->buffer.pointer;
+ if (size < 10) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ memset(&characteristics, 0, sizeof(characteristics));
+ size = min(sizeof(characteristics), size);
+ memcpy(&characteristics, info->buffer.pointer, size);
+
+ atif->backlight_caps.caps_valid = true;
+ atif->backlight_caps.min_input_signal =
+ characteristics.min_input_signal;
+ atif->backlight_caps.max_input_signal =
+ characteristics.max_input_signal;
+out:
+ kfree(info);
+ return err;
+}
+
+/**
* amdgpu_atif_get_sbios_requests - get requested sbios event
*
* @handle: acpi handle
@@ -799,6 +846,17 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
}
}
+ if (atif->functions.query_backlight_transfer_characteristics) {
+ ret = amdgpu_atif_query_backlight_caps(atif);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Call to QUERY_BACKLIGHT_TRANSFER_CHARACTERISTICS failed: %d\n",
+ ret);
+ atif->backlight_caps.caps_valid = false;
+ }
+ } else {
+ atif->backlight_caps.caps_valid = false;
+ }
+
out:
adev->acpi_nb.notifier_call = amdgpu_acpi_event;
register_acpi_notifier(&adev->acpi_nb);
@@ -806,6 +864,18 @@ out:
return ret;
}
+void amdgpu_acpi_get_backlight_caps(struct amdgpu_device *adev,
+ struct amdgpu_dm_backlight_caps *caps)
+{
+ if (!adev->atif) {
+ caps->caps_valid = false;
+ return;
+ }
+ caps->caps_valid = adev->atif->backlight_caps.caps_valid;
+ caps->min_input_signal = adev->atif->backlight_caps.min_input_signal;
+ caps->max_input_signal = adev->atif->backlight_caps.max_input_signal;
+}
+
/**
* amdgpu_acpi_fini - tear down driver acpi support
*
@@ -816,6 +886,5 @@ out:
void amdgpu_acpi_fini(struct amdgpu_device *adev)
{
unregister_acpi_notifier(&adev->acpi_nb);
- if (adev->atif)
- kfree(adev->atif);
+ kfree(adev->atif);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 1580ec60b89f..fe1d7368c1e6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -26,22 +26,28 @@
#include "amdgpu.h"
#include "amdgpu_gfx.h"
#include <linux/module.h>
-
-const struct kgd2kfd_calls *kgd2kfd;
+#include <linux/dma-buf.h>
static const unsigned int compute_vmid_bitmap = 0xFF00;
+/* Total memory size in system memory and all GPU VRAM. Used to
+ * estimate worst case amount of memory to reserve for page tables
+ */
+uint64_t amdgpu_amdkfd_total_mem_size;
+
int amdgpu_amdkfd_init(void)
{
+ struct sysinfo si;
int ret;
+ si_meminfo(&si);
+ amdgpu_amdkfd_total_mem_size = si.totalram - si.totalhigh;
+ amdgpu_amdkfd_total_mem_size *= si.mem_unit;
+
#ifdef CONFIG_HSA_AMD
- ret = kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd);
- if (ret)
- kgd2kfd = NULL;
+ ret = kgd2kfd_init();
amdgpu_amdkfd_gpuvm_init_mem_limits();
#else
- kgd2kfd = NULL;
ret = -ENOENT;
#endif
@@ -50,17 +56,13 @@ int amdgpu_amdkfd_init(void)
void amdgpu_amdkfd_fini(void)
{
- if (kgd2kfd)
- kgd2kfd->exit();
+ kgd2kfd_exit();
}
void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
{
const struct kfd2kgd_calls *kfd2kgd;
- if (!kgd2kfd)
- return;
-
switch (adev->asic_type) {
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_KAVERI:
@@ -73,9 +75,11 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
case CHIP_FIJI:
case CHIP_POLARIS10:
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
break;
case CHIP_VEGA10:
+ case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
kfd2kgd = amdgpu_amdkfd_gfx_9_0_get_functions();
@@ -85,8 +89,11 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
return;
}
- adev->kfd = kgd2kfd->probe((struct kgd_dev *)adev,
- adev->pdev, kfd2kgd);
+ adev->kfd.dev = kgd2kfd_probe((struct kgd_dev *)adev,
+ adev->pdev, kfd2kgd);
+
+ if (adev->kfd.dev)
+ amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
}
/**
@@ -124,9 +131,10 @@ static void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
{
- int i, n;
+ int i;
int last_valid_bit;
- if (adev->kfd) {
+
+ if (adev->kfd.dev) {
struct kgd2kfd_shared_resources gpu_resources = {
.compute_vmid_bitmap = compute_vmid_bitmap,
.num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec,
@@ -134,7 +142,9 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
.gpuvm_size = min(adev->vm_manager.max_pfn
<< AMDGPU_GPU_PAGE_SHIFT,
AMDGPU_GMC_HOLE_START),
- .drm_render_minor = adev->ddev->render->index
+ .drm_render_minor = adev->ddev->render->index,
+ .sdma_doorbell_idx = adev->doorbell_index.sdma_engine,
+
};
/* this is going to have a few of the MSBs set that we need to
@@ -144,7 +154,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
KGD_MAX_QUEUES);
/* remove the KIQ bit as well */
- if (adev->gfx.kiq.ring.ready)
+ if (adev->gfx.kiq.ring.sched.ready)
clear_bit(amdgpu_gfx_queue_to_bit(adev,
adev->gfx.kiq.ring.me - 1,
adev->gfx.kiq.ring.pipe,
@@ -164,78 +174,52 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
&gpu_resources.doorbell_aperture_size,
&gpu_resources.doorbell_start_offset);
- if (adev->asic_type < CHIP_VEGA10) {
- kgd2kfd->device_init(adev->kfd, &gpu_resources);
- return;
- }
-
- n = (adev->asic_type < CHIP_VEGA20) ? 2 : 8;
-
- for (i = 0; i < n; i += 2) {
- /* On SOC15 the BIF is involved in routing
- * doorbells using the low 12 bits of the
- * address. Communicate the assignments to
- * KFD. KFD uses two doorbell pages per
- * process in case of 64-bit doorbells so we
- * can use each doorbell assignment twice.
- */
- if (adev->asic_type == CHIP_VEGA10) {
- gpu_resources.sdma_doorbell[0][i] =
- AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 + (i >> 1);
- gpu_resources.sdma_doorbell[0][i+1] =
- AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 + 0x200 + (i >> 1);
- gpu_resources.sdma_doorbell[1][i] =
- AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 + (i >> 1);
- gpu_resources.sdma_doorbell[1][i+1] =
- AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 + 0x200 + (i >> 1);
- } else {
- gpu_resources.sdma_doorbell[0][i] =
- AMDGPU_DOORBELL64_sDMA_ENGINE0 + (i >> 1);
- gpu_resources.sdma_doorbell[0][i+1] =
- AMDGPU_DOORBELL64_sDMA_ENGINE0 + 0x200 + (i >> 1);
- gpu_resources.sdma_doorbell[1][i] =
- AMDGPU_DOORBELL64_sDMA_ENGINE1 + (i >> 1);
- gpu_resources.sdma_doorbell[1][i+1] =
- AMDGPU_DOORBELL64_sDMA_ENGINE1 + 0x200 + (i >> 1);
- }
- }
- /* Doorbells 0x0e0-0ff and 0x2e0-2ff are reserved for
- * SDMA, IH and VCN. So don't use them for the CP.
+ /* Since SOC15, BIF starts to statically use the
+ * lower 12 bits of doorbell addresses for routing
+ * based on settings in registers like
+ * SDMA0_DOORBELL_RANGE etc..
+ * In order to route a doorbell to CP engine, the lower
+ * 12 bits of its address has to be outside the range
+ * set for SDMA, VCN, and IH blocks.
*/
- gpu_resources.reserved_doorbell_mask = 0x1e0;
- gpu_resources.reserved_doorbell_val = 0x0e0;
+ if (adev->asic_type >= CHIP_VEGA10) {
+ gpu_resources.non_cp_doorbells_start =
+ adev->doorbell_index.first_non_cp;
+ gpu_resources.non_cp_doorbells_end =
+ adev->doorbell_index.last_non_cp;
+ }
- kgd2kfd->device_init(adev->kfd, &gpu_resources);
+ kgd2kfd_device_init(adev->kfd.dev, &gpu_resources);
}
}
void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev)
{
- if (adev->kfd) {
- kgd2kfd->device_exit(adev->kfd);
- adev->kfd = NULL;
+ if (adev->kfd.dev) {
+ kgd2kfd_device_exit(adev->kfd.dev);
+ adev->kfd.dev = NULL;
}
}
void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
const void *ih_ring_entry)
{
- if (adev->kfd)
- kgd2kfd->interrupt(adev->kfd, ih_ring_entry);
+ if (adev->kfd.dev)
+ kgd2kfd_interrupt(adev->kfd.dev, ih_ring_entry);
}
void amdgpu_amdkfd_suspend(struct amdgpu_device *adev)
{
- if (adev->kfd)
- kgd2kfd->suspend(adev->kfd);
+ if (adev->kfd.dev)
+ kgd2kfd_suspend(adev->kfd.dev);
}
int amdgpu_amdkfd_resume(struct amdgpu_device *adev)
{
int r = 0;
- if (adev->kfd)
- r = kgd2kfd->resume(adev->kfd);
+ if (adev->kfd.dev)
+ r = kgd2kfd_resume(adev->kfd.dev);
return r;
}
@@ -244,8 +228,8 @@ int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev)
{
int r = 0;
- if (adev->kfd)
- r = kgd2kfd->pre_reset(adev->kfd);
+ if (adev->kfd.dev)
+ r = kgd2kfd_pre_reset(adev->kfd.dev);
return r;
}
@@ -254,8 +238,8 @@ int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev)
{
int r = 0;
- if (adev->kfd)
- r = kgd2kfd->post_reset(adev->kfd);
+ if (adev->kfd.dev)
+ r = kgd2kfd_post_reset(adev->kfd.dev);
return r;
}
@@ -268,9 +252,9 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd)
amdgpu_device_gpu_recover(adev, NULL);
}
-int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
- void **mem_obj, uint64_t *gpu_addr,
- void **cpu_ptr, bool mqd_gfx9)
+int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
+ void **mem_obj, uint64_t *gpu_addr,
+ void **cpu_ptr, bool mqd_gfx9)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
struct amdgpu_bo *bo = NULL;
@@ -340,7 +324,7 @@ allocate_mem_reserve_bo_failed:
return r;
}
-void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
+void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
{
struct amdgpu_bo *bo = (struct amdgpu_bo *) mem_obj;
@@ -351,8 +335,8 @@ void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
amdgpu_bo_unref(&(bo));
}
-void get_local_mem_info(struct kgd_dev *kgd,
- struct kfd_local_mem_info *mem_info)
+void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd,
+ struct kfd_local_mem_info *mem_info)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
uint64_t address_mask = adev->dev->dma_mask ? ~*adev->dev->dma_mask :
@@ -383,7 +367,7 @@ void get_local_mem_info(struct kgd_dev *kgd,
mem_info->mem_clk_max = 100;
}
-uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
+uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
@@ -392,7 +376,7 @@ uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
return 0;
}
-uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
+uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
@@ -405,7 +389,7 @@ uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
return 100;
}
-void get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info)
+void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
struct amdgpu_cu_info acu_info = adev->gfx.cu_info;
@@ -428,6 +412,62 @@ void get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info)
cu_info->lds_size = acu_info.lds_size;
}
+int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
+ struct kgd_dev **dma_buf_kgd,
+ uint64_t *bo_size, void *metadata_buffer,
+ size_t buffer_size, uint32_t *metadata_size,
+ uint32_t *flags)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+ struct dma_buf *dma_buf;
+ struct drm_gem_object *obj;
+ struct amdgpu_bo *bo;
+ uint64_t metadata_flags;
+ int r = -EINVAL;
+
+ dma_buf = dma_buf_get(dma_buf_fd);
+ if (IS_ERR(dma_buf))
+ return PTR_ERR(dma_buf);
+
+ if (dma_buf->ops != &amdgpu_dmabuf_ops)
+ /* Can't handle non-graphics buffers */
+ goto out_put;
+
+ obj = dma_buf->priv;
+ if (obj->dev->driver != adev->ddev->driver)
+ /* Can't handle buffers from different drivers */
+ goto out_put;
+
+ adev = obj->dev->dev_private;
+ bo = gem_to_amdgpu_bo(obj);
+ if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT)))
+ /* Only VRAM and GTT BOs are supported */
+ goto out_put;
+
+ r = 0;
+ if (dma_buf_kgd)
+ *dma_buf_kgd = (struct kgd_dev *)adev;
+ if (bo_size)
+ *bo_size = amdgpu_bo_size(bo);
+ if (metadata_size)
+ *metadata_size = bo->metadata_size;
+ if (metadata_buffer)
+ r = amdgpu_bo_get_metadata(bo, metadata_buffer, buffer_size,
+ metadata_size, &metadata_flags);
+ if (flags) {
+ *flags = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
+ ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT;
+
+ if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
+ *flags |= ALLOC_MEM_FLAGS_PUBLIC;
+ }
+
+out_put:
+ dma_buf_put(dma_buf);
+ return r;
+}
+
uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
@@ -510,7 +550,7 @@ void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle)
bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
{
- if (adev->kfd) {
+ if (adev->kfd.dev) {
if ((1 << vmid) & compute_vmid_bitmap)
return true;
}
@@ -524,7 +564,7 @@ bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
return false;
}
-void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo)
+void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
{
}
@@ -557,4 +597,47 @@ struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void)
{
return NULL;
}
+
+struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev,
+ const struct kfd2kgd_calls *f2g)
+{
+ return NULL;
+}
+
+bool kgd2kfd_device_init(struct kfd_dev *kfd,
+ const struct kgd2kfd_shared_resources *gpu_resources)
+{
+ return false;
+}
+
+void kgd2kfd_device_exit(struct kfd_dev *kfd)
+{
+}
+
+void kgd2kfd_exit(void)
+{
+}
+
+void kgd2kfd_suspend(struct kfd_dev *kfd)
+{
+}
+
+int kgd2kfd_resume(struct kfd_dev *kfd)
+{
+ return 0;
+}
+
+int kgd2kfd_pre_reset(struct kfd_dev *kfd)
+{
+ return 0;
+}
+
+int kgd2kfd_post_reset(struct kfd_dev *kfd)
+{
+ return 0;
+}
+
+void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
+{
+}
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 8e0d4f7196b4..0b31a1859023 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -27,14 +27,13 @@
#include <linux/types.h>
#include <linux/mm.h>
-#include <linux/mmu_context.h>
#include <linux/workqueue.h>
#include <kgd_kfd_interface.h>
#include <drm/ttm/ttm_execbuf_util.h>
#include "amdgpu_sync.h"
#include "amdgpu_vm.h"
-extern const struct kgd2kfd_calls *kgd2kfd;
+extern uint64_t amdgpu_amdkfd_total_mem_size;
struct amdgpu_device;
@@ -77,6 +76,11 @@ struct amdgpu_amdkfd_fence {
char timeline_name[TASK_COMM_LEN];
};
+struct amdgpu_kfd_dev {
+ struct kfd_dev *dev;
+ uint64_t vram_used;
+};
+
struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context,
struct mm_struct *mm);
bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
@@ -134,16 +138,21 @@ int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev);
void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd);
/* Shared API */
-int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
- void **mem_obj, uint64_t *gpu_addr,
- void **cpu_ptr, bool mqd_gfx9);
-void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj);
-void get_local_mem_info(struct kgd_dev *kgd,
- struct kfd_local_mem_info *mem_info);
-uint64_t get_gpu_clock_counter(struct kgd_dev *kgd);
-
-uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
-void get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info);
+int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
+ void **mem_obj, uint64_t *gpu_addr,
+ void **cpu_ptr, bool mqd_gfx9);
+void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj);
+void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd,
+ struct kfd_local_mem_info *mem_info);
+uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd);
+
+uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
+void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info);
+int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
+ struct kgd_dev **dmabuf_kgd,
+ uint64_t *bo_size, void *metadata_buffer,
+ size_t buffer_size, uint32_t *metadata_size,
+ uint32_t *flags);
uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd);
uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd);
@@ -195,7 +204,31 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info,
int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
struct kfd_vm_fault_info *info);
+int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
+ struct dma_buf *dmabuf,
+ uint64_t va, void *vm,
+ struct kgd_mem **mem, uint64_t *size,
+ uint64_t *mmap_offset);
+
void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
-void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo);
+void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo);
+
+/* KGD2KFD callbacks */
+int kgd2kfd_init(void);
+void kgd2kfd_exit(void);
+struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev,
+ const struct kfd2kgd_calls *f2g);
+bool kgd2kfd_device_init(struct kfd_dev *kfd,
+ const struct kgd2kfd_shared_resources *gpu_resources);
+void kgd2kfd_device_exit(struct kfd_dev *kfd);
+void kgd2kfd_suspend(struct kfd_dev *kfd);
+int kgd2kfd_resume(struct kfd_dev *kfd);
+int kgd2kfd_pre_reset(struct kfd_dev *kfd);
+int kgd2kfd_post_reset(struct kfd_dev *kfd);
+void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
+int kgd2kfd_quiesce_mm(struct mm_struct *mm);
+int kgd2kfd_resume_mm(struct mm_struct *mm);
+int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
+ struct dma_fence *fence);
#endif /* AMDGPU_AMDKFD_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
index 574c1181ae9a..3107b9575929 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
@@ -122,7 +122,7 @@ static bool amdkfd_fence_enable_signaling(struct dma_fence *f)
if (dma_fence_is_signaled(f))
return true;
- if (!kgd2kfd->schedule_evict_and_restore_process(fence->mm, f))
+ if (!kgd2kfd_schedule_evict_and_restore_process(fence->mm, f))
return true;
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 244d9834a381..ff7fac7df34b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -23,6 +23,7 @@
#include <linux/fdtable.h>
#include <linux/uaccess.h>
#include <linux/firmware.h>
+#include <linux/mmu_context.h>
#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
@@ -173,13 +174,6 @@ static int get_tile_config(struct kgd_dev *kgd,
}
static const struct kfd2kgd_calls kfd2kgd = {
- .init_gtt_mem_allocation = alloc_gtt_mem,
- .free_gtt_mem = free_gtt_mem,
- .get_local_mem_info = get_local_mem_info,
- .get_gpu_clock_counter = get_gpu_clock_counter,
- .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
- .alloc_pasid = amdgpu_pasid_alloc,
- .free_pasid = amdgpu_pasid_free,
.program_sh_mem_settings = kgd_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
.init_interrupts = kgd_init_interrupts,
@@ -200,28 +194,10 @@ static const struct kfd2kgd_calls kfd2kgd = {
.get_fw_version = get_fw_version,
.set_scratch_backing_va = set_scratch_backing_va,
.get_tile_config = get_tile_config,
- .get_cu_info = get_cu_info,
- .get_vram_usage = amdgpu_amdkfd_get_vram_usage,
- .create_process_vm = amdgpu_amdkfd_gpuvm_create_process_vm,
- .acquire_process_vm = amdgpu_amdkfd_gpuvm_acquire_process_vm,
- .destroy_process_vm = amdgpu_amdkfd_gpuvm_destroy_process_vm,
- .release_process_vm = amdgpu_amdkfd_gpuvm_release_process_vm,
- .get_process_page_dir = amdgpu_amdkfd_gpuvm_get_process_page_dir,
.set_vm_context_page_table_base = set_vm_context_page_table_base,
- .alloc_memory_of_gpu = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu,
- .free_memory_of_gpu = amdgpu_amdkfd_gpuvm_free_memory_of_gpu,
- .map_memory_to_gpu = amdgpu_amdkfd_gpuvm_map_memory_to_gpu,
- .unmap_memory_to_gpu = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu,
- .sync_memory = amdgpu_amdkfd_gpuvm_sync_memory,
- .map_gtt_bo_to_kernel = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel,
- .restore_process_bos = amdgpu_amdkfd_gpuvm_restore_process_bos,
.invalidate_tlbs = invalidate_tlbs,
.invalidate_tlbs_vmid = invalidate_tlbs_vmid,
- .submit_ib = amdgpu_amdkfd_submit_ib,
- .get_vm_fault_info = amdgpu_amdkfd_gpuvm_get_vm_fault_info,
.read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg,
- .gpu_recover = amdgpu_amdkfd_gpu_reset,
- .set_compute_idle = amdgpu_amdkfd_set_compute_idle
};
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 9f149914ad6c..56ea929f524b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -24,6 +24,7 @@
#include <linux/fdtable.h>
#include <linux/uaccess.h>
#include <linux/firmware.h>
+#include <linux/mmu_context.h>
#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
@@ -128,13 +129,6 @@ static int get_tile_config(struct kgd_dev *kgd,
}
static const struct kfd2kgd_calls kfd2kgd = {
- .init_gtt_mem_allocation = alloc_gtt_mem,
- .free_gtt_mem = free_gtt_mem,
- .get_local_mem_info = get_local_mem_info,
- .get_gpu_clock_counter = get_gpu_clock_counter,
- .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
- .alloc_pasid = amdgpu_pasid_alloc,
- .free_pasid = amdgpu_pasid_free,
.program_sh_mem_settings = kgd_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
.init_interrupts = kgd_init_interrupts,
@@ -157,27 +151,9 @@ static const struct kfd2kgd_calls kfd2kgd = {
.get_fw_version = get_fw_version,
.set_scratch_backing_va = set_scratch_backing_va,
.get_tile_config = get_tile_config,
- .get_cu_info = get_cu_info,
- .get_vram_usage = amdgpu_amdkfd_get_vram_usage,
- .create_process_vm = amdgpu_amdkfd_gpuvm_create_process_vm,
- .acquire_process_vm = amdgpu_amdkfd_gpuvm_acquire_process_vm,
- .destroy_process_vm = amdgpu_amdkfd_gpuvm_destroy_process_vm,
- .release_process_vm = amdgpu_amdkfd_gpuvm_release_process_vm,
- .get_process_page_dir = amdgpu_amdkfd_gpuvm_get_process_page_dir,
.set_vm_context_page_table_base = set_vm_context_page_table_base,
- .alloc_memory_of_gpu = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu,
- .free_memory_of_gpu = amdgpu_amdkfd_gpuvm_free_memory_of_gpu,
- .map_memory_to_gpu = amdgpu_amdkfd_gpuvm_map_memory_to_gpu,
- .unmap_memory_to_gpu = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu,
- .sync_memory = amdgpu_amdkfd_gpuvm_sync_memory,
- .map_gtt_bo_to_kernel = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel,
- .restore_process_bos = amdgpu_amdkfd_gpuvm_restore_process_bos,
.invalidate_tlbs = invalidate_tlbs,
.invalidate_tlbs_vmid = invalidate_tlbs_vmid,
- .submit_ib = amdgpu_amdkfd_submit_ib,
- .get_vm_fault_info = amdgpu_amdkfd_gpuvm_get_vm_fault_info,
- .gpu_recover = amdgpu_amdkfd_gpu_reset,
- .set_compute_idle = amdgpu_amdkfd_set_compute_idle
};
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index 42cb4c4e0929..5c51d4910650 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -26,6 +26,7 @@
#include <linux/fdtable.h>
#include <linux/uaccess.h>
#include <linux/firmware.h>
+#include <linux/mmu_context.h>
#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
@@ -46,38 +47,9 @@
#include "v9_structs.h"
#include "soc15.h"
#include "soc15d.h"
+#include "mmhub_v1_0.h"
+#include "gfxhub_v1_0.h"
-/* HACK: MMHUB and GC both have VM-related register with the same
- * names but different offsets. Define the MMHUB register we need here
- * with a prefix. A proper solution would be to move the functions
- * programming these registers into gfx_v9_0.c and mmhub_v1_0.c
- * respectively.
- */
-#define mmMMHUB_VM_INVALIDATE_ENG16_REQ 0x06f3
-#define mmMMHUB_VM_INVALIDATE_ENG16_REQ_BASE_IDX 0
-
-#define mmMMHUB_VM_INVALIDATE_ENG16_ACK 0x0705
-#define mmMMHUB_VM_INVALIDATE_ENG16_ACK_BASE_IDX 0
-
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32 0x072b
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32 0x072c
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
-
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32 0x074b
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32 0x074c
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
-
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32 0x076b
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32 0x076c
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
-
-#define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32 0x0727
-#define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32_BASE_IDX 0
-#define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32 0x0728
-#define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32_BASE_IDX 0
#define V9_PIPE_PER_MEC (4)
#define V9_QUEUES_PER_PIPE_MEC (8)
@@ -167,13 +139,6 @@ static int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
}
static const struct kfd2kgd_calls kfd2kgd = {
- .init_gtt_mem_allocation = alloc_gtt_mem,
- .free_gtt_mem = free_gtt_mem,
- .get_local_mem_info = get_local_mem_info,
- .get_gpu_clock_counter = get_gpu_clock_counter,
- .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
- .alloc_pasid = amdgpu_pasid_alloc,
- .free_pasid = amdgpu_pasid_free,
.program_sh_mem_settings = kgd_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
.init_interrupts = kgd_init_interrupts,
@@ -196,26 +161,9 @@ static const struct kfd2kgd_calls kfd2kgd = {
.get_fw_version = get_fw_version,
.set_scratch_backing_va = set_scratch_backing_va,
.get_tile_config = amdgpu_amdkfd_get_tile_config,
- .get_cu_info = get_cu_info,
- .get_vram_usage = amdgpu_amdkfd_get_vram_usage,
- .create_process_vm = amdgpu_amdkfd_gpuvm_create_process_vm,
- .acquire_process_vm = amdgpu_amdkfd_gpuvm_acquire_process_vm,
- .destroy_process_vm = amdgpu_amdkfd_gpuvm_destroy_process_vm,
- .release_process_vm = amdgpu_amdkfd_gpuvm_release_process_vm,
- .get_process_page_dir = amdgpu_amdkfd_gpuvm_get_process_page_dir,
.set_vm_context_page_table_base = set_vm_context_page_table_base,
- .alloc_memory_of_gpu = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu,
- .free_memory_of_gpu = amdgpu_amdkfd_gpuvm_free_memory_of_gpu,
- .map_memory_to_gpu = amdgpu_amdkfd_gpuvm_map_memory_to_gpu,
- .unmap_memory_to_gpu = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu,
- .sync_memory = amdgpu_amdkfd_gpuvm_sync_memory,
- .map_gtt_bo_to_kernel = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel,
- .restore_process_bos = amdgpu_amdkfd_gpuvm_restore_process_bos,
.invalidate_tlbs = invalidate_tlbs,
.invalidate_tlbs_vmid = invalidate_tlbs_vmid,
- .submit_ib = amdgpu_amdkfd_submit_ib,
- .gpu_recover = amdgpu_amdkfd_gpu_reset,
- .set_compute_idle = amdgpu_amdkfd_set_compute_idle,
.get_hive_id = amdgpu_amdkfd_get_hive_id,
};
@@ -785,15 +733,6 @@ static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
{
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
- uint32_t req = (1 << vmid) |
- (0 << VM_INVALIDATE_ENG16_REQ__FLUSH_TYPE__SHIFT) | /* legacy */
- VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PTES_MASK |
- VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE0_MASK |
- VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE1_MASK |
- VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE2_MASK |
- VM_INVALIDATE_ENG16_REQ__INVALIDATE_L1_PTES_MASK;
-
- mutex_lock(&adev->srbm_mutex);
/* Use legacy mode tlb invalidation.
*
@@ -810,34 +749,7 @@ static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
* TODO 2: support range-based invalidation, requires kfg2kgd
* interface change
*/
- WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG16_ADDR_RANGE_LO32),
- 0xffffffff);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG16_ADDR_RANGE_HI32),
- 0x0000001f);
-
- WREG32(SOC15_REG_OFFSET(MMHUB, 0,
- mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32),
- 0xffffffff);
- WREG32(SOC15_REG_OFFSET(MMHUB, 0,
- mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32),
- 0x0000001f);
-
- WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG16_REQ), req);
-
- WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_INVALIDATE_ENG16_REQ),
- req);
-
- while (!(RREG32(SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG16_ACK)) &
- (1 << vmid)))
- cpu_relax();
-
- while (!(RREG32(SOC15_REG_OFFSET(MMHUB, 0,
- mmMMHUB_VM_INVALIDATE_ENG16_ACK)) &
- (1 << vmid)))
- cpu_relax();
-
- mutex_unlock(&adev->srbm_mutex);
-
+ amdgpu_gmc_flush_gpu_tlb(adev, vmid, 0);
}
static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid)
@@ -876,7 +788,7 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
if (adev->in_gpu_reset)
return -EIO;
- if (ring->ready)
+ if (ring->sched.ready)
return invalidate_tlbs_with_kiq(adev, pasid);
for (vmid = 0; vmid < 16; vmid++) {
@@ -1016,7 +928,6 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
uint64_t page_table_base)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
- uint64_t base = page_table_base | AMDGPU_PTE_VALID;
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
pr_err("trying to set page table base for wrong VMID %u\n",
@@ -1028,25 +939,7 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
* now, all processes share the same address space size, like
* on GFX8 and older.
*/
- WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32) + (vmid*2), 0);
- WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32) + (vmid*2), 0);
-
- WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32) + (vmid*2),
- lower_32_bits(adev->vm_manager.max_pfn - 1));
- WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32) + (vmid*2),
- upper_32_bits(adev->vm_manager.max_pfn - 1));
-
- WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32) + (vmid*2), lower_32_bits(base));
- WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) + (vmid*2), upper_32_bits(base));
-
- WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32) + (vmid*2), 0);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32) + (vmid*2), 0);
-
- WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32) + (vmid*2),
- lower_32_bits(adev->vm_manager.max_pfn - 1));
- WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32) + (vmid*2),
- upper_32_bits(adev->vm_manager.max_pfn - 1));
+ mmhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32) + (vmid*2), lower_32_bits(base));
- WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) + (vmid*2), upper_32_bits(base));
+ gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index df0a059565f9..1921dec3df7a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -25,6 +25,7 @@
#include <linux/list.h>
#include <linux/pagemap.h>
#include <linux/sched/mm.h>
+#include <linux/dma-buf.h>
#include <drm/drmP.h>
#include "amdgpu_object.h"
#include "amdgpu_vm.h"
@@ -46,9 +47,9 @@
/* Impose limit on how much memory KFD can use */
static struct {
uint64_t max_system_mem_limit;
- uint64_t max_userptr_mem_limit;
+ uint64_t max_ttm_mem_limit;
int64_t system_mem_used;
- int64_t userptr_mem_used;
+ int64_t ttm_mem_used;
spinlock_t mem_limit_lock;
} kfd_mem_limit;
@@ -90,8 +91,8 @@ static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm,
}
/* Set memory usage limits. Current, limits are
- * System (kernel) memory - 3/8th System RAM
- * Userptr memory - 3/4th System RAM
+ * System (TTM + userptr) memory - 3/4th System RAM
+ * TTM memory - 3/8th System RAM
*/
void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
{
@@ -103,48 +104,61 @@ void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
mem *= si.mem_unit;
spin_lock_init(&kfd_mem_limit.mem_limit_lock);
- kfd_mem_limit.max_system_mem_limit = (mem >> 1) - (mem >> 3);
- kfd_mem_limit.max_userptr_mem_limit = mem - (mem >> 2);
- pr_debug("Kernel memory limit %lluM, userptr limit %lluM\n",
+ kfd_mem_limit.max_system_mem_limit = (mem >> 1) + (mem >> 2);
+ kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3);
+ pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
(kfd_mem_limit.max_system_mem_limit >> 20),
- (kfd_mem_limit.max_userptr_mem_limit >> 20));
+ (kfd_mem_limit.max_ttm_mem_limit >> 20));
}
-static int amdgpu_amdkfd_reserve_system_mem_limit(struct amdgpu_device *adev,
- uint64_t size, u32 domain)
+static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
+ uint64_t size, u32 domain, bool sg)
{
- size_t acc_size;
+ size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
+ uint64_t reserved_for_pt = amdgpu_amdkfd_total_mem_size >> 9;
int ret = 0;
acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
sizeof(struct amdgpu_bo));
- spin_lock(&kfd_mem_limit.mem_limit_lock);
+ vram_needed = 0;
if (domain == AMDGPU_GEM_DOMAIN_GTT) {
- if (kfd_mem_limit.system_mem_used + (acc_size + size) >
- kfd_mem_limit.max_system_mem_limit) {
- ret = -ENOMEM;
- goto err_no_mem;
- }
- kfd_mem_limit.system_mem_used += (acc_size + size);
- } else if (domain == AMDGPU_GEM_DOMAIN_CPU) {
- if ((kfd_mem_limit.system_mem_used + acc_size >
- kfd_mem_limit.max_system_mem_limit) ||
- (kfd_mem_limit.userptr_mem_used + (size + acc_size) >
- kfd_mem_limit.max_userptr_mem_limit)) {
- ret = -ENOMEM;
- goto err_no_mem;
- }
- kfd_mem_limit.system_mem_used += acc_size;
- kfd_mem_limit.userptr_mem_used += size;
+ /* TTM GTT memory */
+ system_mem_needed = acc_size + size;
+ ttm_mem_needed = acc_size + size;
+ } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
+ /* Userptr */
+ system_mem_needed = acc_size + size;
+ ttm_mem_needed = acc_size;
+ } else {
+ /* VRAM and SG */
+ system_mem_needed = acc_size;
+ ttm_mem_needed = acc_size;
+ if (domain == AMDGPU_GEM_DOMAIN_VRAM)
+ vram_needed = size;
+ }
+
+ spin_lock(&kfd_mem_limit.mem_limit_lock);
+
+ if ((kfd_mem_limit.system_mem_used + system_mem_needed >
+ kfd_mem_limit.max_system_mem_limit) ||
+ (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
+ kfd_mem_limit.max_ttm_mem_limit) ||
+ (adev->kfd.vram_used + vram_needed >
+ adev->gmc.real_vram_size - reserved_for_pt)) {
+ ret = -ENOMEM;
+ } else {
+ kfd_mem_limit.system_mem_used += system_mem_needed;
+ kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
+ adev->kfd.vram_used += vram_needed;
}
-err_no_mem:
+
spin_unlock(&kfd_mem_limit.mem_limit_lock);
return ret;
}
-static void unreserve_system_mem_limit(struct amdgpu_device *adev,
- uint64_t size, u32 domain)
+static void unreserve_mem_limit(struct amdgpu_device *adev,
+ uint64_t size, u32 domain, bool sg)
{
size_t acc_size;
@@ -154,70 +168,61 @@ static void unreserve_system_mem_limit(struct amdgpu_device *adev,
spin_lock(&kfd_mem_limit.mem_limit_lock);
if (domain == AMDGPU_GEM_DOMAIN_GTT) {
kfd_mem_limit.system_mem_used -= (acc_size + size);
- } else if (domain == AMDGPU_GEM_DOMAIN_CPU) {
+ kfd_mem_limit.ttm_mem_used -= (acc_size + size);
+ } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
+ kfd_mem_limit.system_mem_used -= (acc_size + size);
+ kfd_mem_limit.ttm_mem_used -= acc_size;
+ } else {
kfd_mem_limit.system_mem_used -= acc_size;
- kfd_mem_limit.userptr_mem_used -= size;
+ kfd_mem_limit.ttm_mem_used -= acc_size;
+ if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
+ adev->kfd.vram_used -= size;
+ WARN_ONCE(adev->kfd.vram_used < 0,
+ "kfd VRAM memory accounting unbalanced");
+ }
}
WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
"kfd system memory accounting unbalanced");
- WARN_ONCE(kfd_mem_limit.userptr_mem_used < 0,
- "kfd userptr memory accounting unbalanced");
+ WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
+ "kfd TTM memory accounting unbalanced");
spin_unlock(&kfd_mem_limit.mem_limit_lock);
}
-void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo)
+void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
{
- spin_lock(&kfd_mem_limit.mem_limit_lock);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ u32 domain = bo->preferred_domains;
+ bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU);
if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) {
- kfd_mem_limit.system_mem_used -= bo->tbo.acc_size;
- kfd_mem_limit.userptr_mem_used -= amdgpu_bo_size(bo);
- } else if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT) {
- kfd_mem_limit.system_mem_used -=
- (bo->tbo.acc_size + amdgpu_bo_size(bo));
+ domain = AMDGPU_GEM_DOMAIN_CPU;
+ sg = false;
}
- WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
- "kfd system memory accounting unbalanced");
- WARN_ONCE(kfd_mem_limit.userptr_mem_used < 0,
- "kfd userptr memory accounting unbalanced");
- spin_unlock(&kfd_mem_limit.mem_limit_lock);
+ unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg);
}
-/* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence(s) from BO's
+/* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
* reservation object.
*
* @bo: [IN] Remove eviction fence(s) from this BO
- * @ef: [IN] If ef is specified, then this eviction fence is removed if it
+ * @ef: [IN] This eviction fence is removed if it
* is present in the shared list.
- * @ef_list: [OUT] Returns list of eviction fences. These fences are removed
- * from BO's reservation object shared list.
- * @ef_count: [OUT] Number of fences in ef_list.
*
- * NOTE: If called with ef_list, then amdgpu_amdkfd_add_eviction_fence must be
- * called to restore the eviction fences and to avoid memory leak. This is
- * useful for shared BOs.
* NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
*/
static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
- struct amdgpu_amdkfd_fence *ef,
- struct amdgpu_amdkfd_fence ***ef_list,
- unsigned int *ef_count)
+ struct amdgpu_amdkfd_fence *ef)
{
struct reservation_object *resv = bo->tbo.resv;
struct reservation_object_list *old, *new;
unsigned int i, j, k;
- if (!ef && !ef_list)
+ if (!ef)
return -EINVAL;
- if (ef_list) {
- *ef_list = NULL;
- *ef_count = 0;
- }
-
old = reservation_object_get_list(resv);
if (!old)
return 0;
@@ -236,8 +241,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
f = rcu_dereference_protected(old->shared[i],
reservation_object_held(resv));
- if ((ef && f->context == ef->base.context) ||
- (!ef && to_amdgpu_amdkfd_fence(f)))
+ if (f->context == ef->base.context)
RCU_INIT_POINTER(new->shared[--j], f);
else
RCU_INIT_POINTER(new->shared[k++], f);
@@ -245,21 +249,6 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
new->shared_max = old->shared_max;
new->shared_count = k;
- if (!ef) {
- unsigned int count = old->shared_count - j;
-
- /* Alloc memory for count number of eviction fence pointers.
- * Fill the ef_list array and ef_count
- */
- *ef_list = kcalloc(count, sizeof(**ef_list), GFP_KERNEL);
- *ef_count = count;
-
- if (!*ef_list) {
- kfree(new);
- return -ENOMEM;
- }
- }
-
/* Install the new fence list, seqcount provides the barriers */
preempt_disable();
write_seqcount_begin(&resv->seq);
@@ -273,46 +262,13 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
f = rcu_dereference_protected(new->shared[i],
reservation_object_held(resv));
- if (!ef)
- (*ef_list)[k++] = to_amdgpu_amdkfd_fence(f);
- else
- dma_fence_put(f);
+ dma_fence_put(f);
}
kfree_rcu(old, rcu);
return 0;
}
-/* amdgpu_amdkfd_add_eviction_fence - Adds eviction fence(s) back into BO's
- * reservation object.
- *
- * @bo: [IN] Add eviction fences to this BO
- * @ef_list: [IN] List of eviction fences to be added
- * @ef_count: [IN] Number of fences in ef_list.
- *
- * NOTE: Must call amdgpu_amdkfd_remove_eviction_fence before calling this
- * function.
- */
-static void amdgpu_amdkfd_add_eviction_fence(struct amdgpu_bo *bo,
- struct amdgpu_amdkfd_fence **ef_list,
- unsigned int ef_count)
-{
- int i;
-
- if (!ef_list || !ef_count)
- return;
-
- for (i = 0; i < ef_count; i++) {
- amdgpu_bo_fence(bo, &ef_list[i]->base, true);
- /* Re-adding the fence takes an additional reference. Drop that
- * reference.
- */
- dma_fence_put(&ef_list[i]->base);
- }
-
- kfree(ef_list);
-}
-
static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
bool wait)
{
@@ -328,18 +284,8 @@ static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (ret)
goto validate_fail;
- if (wait) {
- struct amdgpu_amdkfd_fence **ef_list;
- unsigned int ef_count;
-
- ret = amdgpu_amdkfd_remove_eviction_fence(bo, NULL, &ef_list,
- &ef_count);
- if (ret)
- goto validate_fail;
-
- ttm_bo_wait(&bo->tbo, false, false);
- amdgpu_amdkfd_add_eviction_fence(bo, ef_list, ef_count);
- }
+ if (wait)
+ amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
validate_fail:
return ret;
@@ -395,23 +341,6 @@ static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
return 0;
}
-static int sync_vm_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
- struct dma_fence *f)
-{
- int ret = amdgpu_sync_fence(adev, sync, f, false);
-
- /* Sync objects can't handle multiple GPUs (contexts) updating
- * sync->last_vm_update. Fortunately we don't need it for
- * KFD's purposes, so we can just drop that fence.
- */
- if (sync->last_vm_update) {
- dma_fence_put(sync->last_vm_update);
- sync->last_vm_update = NULL;
- }
-
- return ret;
-}
-
static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
{
struct amdgpu_bo *pd = vm->root.base.bo;
@@ -422,7 +351,7 @@ static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
if (ret)
return ret;
- return sync_vm_fence(adev, sync, vm->last_update);
+ return amdgpu_sync_fence(NULL, sync, vm->last_update, false);
}
/* add_bo_to_vm - Add a BO to a VM
@@ -443,7 +372,6 @@ static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
{
int ret;
struct kfd_bo_va_list *bo_va_entry;
- struct amdgpu_bo *pd = vm->root.base.bo;
struct amdgpu_bo *bo = mem->bo;
uint64_t va = mem->va;
struct list_head *list_bo_va = &mem->bo_va_list;
@@ -483,14 +411,8 @@ static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
*p_bo_va_entry = bo_va_entry;
/* Allocate new page tables if needed and validate
- * them. Clearing of new page tables and validate need to wait
- * on move fences. We don't want that to trigger the eviction
- * fence, so remove it temporarily.
+ * them.
*/
- amdgpu_amdkfd_remove_eviction_fence(pd,
- vm->process_info->eviction_fence,
- NULL, NULL);
-
ret = amdgpu_vm_alloc_pts(adev, vm, va, amdgpu_bo_size(bo));
if (ret) {
pr_err("Failed to allocate pts, err=%d\n", ret);
@@ -503,13 +425,9 @@ static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
goto err_alloc_pts;
}
- /* Add the eviction fence back */
- amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true);
-
return 0;
err_alloc_pts:
- amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true);
amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va);
list_del(&bo_va_entry->bo_list);
err_vmadd:
@@ -536,7 +454,7 @@ static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
struct amdgpu_bo *bo = mem->bo;
INIT_LIST_HEAD(&entry->head);
- entry->shared = true;
+ entry->num_shared = 1;
entry->bo = &bo->tbo;
mutex_lock(&process_info->lock);
if (userptr)
@@ -677,7 +595,7 @@ static int reserve_bo_and_vm(struct kgd_mem *mem,
ctx->kfd_bo.priority = 0;
ctx->kfd_bo.tv.bo = &bo->tbo;
- ctx->kfd_bo.tv.shared = true;
+ ctx->kfd_bo.tv.num_shared = 1;
ctx->kfd_bo.user_pages = NULL;
list_add(&ctx->kfd_bo.tv.head, &ctx->list);
@@ -741,7 +659,7 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
ctx->kfd_bo.priority = 0;
ctx->kfd_bo.tv.bo = &bo->tbo;
- ctx->kfd_bo.tv.shared = true;
+ ctx->kfd_bo.tv.num_shared = 1;
ctx->kfd_bo.user_pages = NULL;
list_add(&ctx->kfd_bo.tv.head, &ctx->list);
@@ -808,25 +726,12 @@ static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
{
struct amdgpu_bo_va *bo_va = entry->bo_va;
struct amdgpu_vm *vm = bo_va->base.vm;
- struct amdgpu_bo *pd = vm->root.base.bo;
- /* Remove eviction fence from PD (and thereby from PTs too as
- * they share the resv. object). Otherwise during PT update
- * job (see amdgpu_vm_bo_update_mapping), eviction fence would
- * get added to job->sync object and job execution would
- * trigger the eviction fence.
- */
- amdgpu_amdkfd_remove_eviction_fence(pd,
- vm->process_info->eviction_fence,
- NULL, NULL);
amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
- /* Add the eviction fence back */
- amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true);
-
- sync_vm_fence(adev, sync, bo_va->last_pt_update);
+ amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false);
return 0;
}
@@ -851,7 +756,7 @@ static int update_gpuvm_pte(struct amdgpu_device *adev,
return ret;
}
- return sync_vm_fence(adev, sync, bo_va->last_pt_update);
+ return amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false);
}
static int map_bo_to_gpuvm(struct amdgpu_device *adev,
@@ -886,6 +791,24 @@ update_gpuvm_pte_failed:
return ret;
}
+static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size)
+{
+ struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
+
+ if (!sg)
+ return NULL;
+ if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
+ kfree(sg);
+ return NULL;
+ }
+ sg->sgl->dma_address = addr;
+ sg->sgl->length = size;
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+ sg->sgl->dma_length = size;
+#endif
+ return sg;
+}
+
static int process_validate_vms(struct amdkfd_process_info *process_info)
{
struct amdgpu_vm *peer_vm;
@@ -901,6 +824,26 @@ static int process_validate_vms(struct amdkfd_process_info *process_info)
return 0;
}
+static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
+ struct amdgpu_sync *sync)
+{
+ struct amdgpu_vm *peer_vm;
+ int ret;
+
+ list_for_each_entry(peer_vm, &process_info->vm_list_head,
+ vm_list_node) {
+ struct amdgpu_bo *pd = peer_vm->root.base.bo;
+
+ ret = amdgpu_sync_resv(NULL,
+ sync, pd->tbo.resv,
+ AMDGPU_FENCE_OWNER_UNDEFINED, false);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int process_update_pds(struct amdkfd_process_info *process_info,
struct amdgpu_sync *sync)
{
@@ -963,7 +906,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
pr_err("validate_pt_pd_bos() failed\n");
goto validate_pd_fail;
}
- ret = ttm_bo_wait(&vm->root.base.bo->tbo, false, false);
+ amdgpu_bo_sync_wait(vm->root.base.bo, AMDGPU_FENCE_OWNER_KFD, false);
if (ret)
goto wait_pd_fail;
amdgpu_bo_fence(vm->root.base.bo,
@@ -1149,6 +1092,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ struct sg_table *sg = NULL;
uint64_t user_addr = 0;
struct amdgpu_bo *bo;
struct amdgpu_bo_param bp;
@@ -1177,13 +1122,25 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
if (!offset || !*offset)
return -EINVAL;
user_addr = *offset;
+ } else if (flags & ALLOC_MEM_FLAGS_DOORBELL) {
+ domain = AMDGPU_GEM_DOMAIN_GTT;
+ alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
+ bo_type = ttm_bo_type_sg;
+ alloc_flags = 0;
+ if (size > UINT_MAX)
+ return -EINVAL;
+ sg = create_doorbell_sg(*offset, size);
+ if (!sg)
+ return -ENOMEM;
} else {
return -EINVAL;
}
*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
- if (!*mem)
- return -ENOMEM;
+ if (!*mem) {
+ ret = -ENOMEM;
+ goto err;
+ }
INIT_LIST_HEAD(&(*mem)->bo_va_list);
mutex_init(&(*mem)->lock);
(*mem)->aql_queue = !!(flags & ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
@@ -1199,7 +1156,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
byte_align = (adev->family == AMDGPU_FAMILY_VI &&
adev->asic_type != CHIP_FIJI &&
adev->asic_type != CHIP_POLARIS10 &&
- adev->asic_type != CHIP_POLARIS11) ?
+ adev->asic_type != CHIP_POLARIS11 &&
+ adev->asic_type != CHIP_POLARIS12) ?
VI_BO_SIZE_ALIGN : 1;
mapping_flags = AMDGPU_VM_PAGE_READABLE;
@@ -1215,10 +1173,10 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
amdgpu_sync_create(&(*mem)->sync);
- ret = amdgpu_amdkfd_reserve_system_mem_limit(adev, size, alloc_domain);
+ ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg);
if (ret) {
pr_debug("Insufficient system memory\n");
- goto err_reserve_system_mem;
+ goto err_reserve_limit;
}
pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
@@ -1229,7 +1187,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
bp.byte_align = byte_align;
bp.domain = alloc_domain;
bp.flags = alloc_flags;
- bp.type = ttm_bo_type_device;
+ bp.type = bo_type;
bp.resv = NULL;
ret = amdgpu_bo_create(adev, &bp, &bo);
if (ret) {
@@ -1237,6 +1195,10 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
domain_string(alloc_domain), ret);
goto err_bo_create;
}
+ if (bo_type == ttm_bo_type_sg) {
+ bo->tbo.sg = sg;
+ bo->tbo.ttm->sg = sg;
+ }
bo->kfd_bo = *mem;
(*mem)->bo = bo;
if (user_addr)
@@ -1266,12 +1228,17 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
allocate_init_user_pages_failed:
amdgpu_bo_unref(&bo);
/* Don't unreserve system mem limit twice */
- goto err_reserve_system_mem;
+ goto err_reserve_limit;
err_bo_create:
- unreserve_system_mem_limit(adev, size, alloc_domain);
-err_reserve_system_mem:
+ unreserve_mem_limit(adev, size, alloc_domain, !!sg);
+err_reserve_limit:
mutex_destroy(&(*mem)->lock);
kfree(*mem);
+err:
+ if (sg) {
+ sg_free_table(sg);
+ kfree(sg);
+ }
return ret;
}
@@ -1326,8 +1293,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
* attached
*/
amdgpu_amdkfd_remove_eviction_fence(mem->bo,
- process_info->eviction_fence,
- NULL, NULL);
+ process_info->eviction_fence);
pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
mem->va + bo_size * (1 + mem->aql_queue));
@@ -1341,6 +1307,14 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
/* Free the sync object */
amdgpu_sync_free(&mem->sync);
+ /* If the SG is not NULL, it's one we created for a doorbell
+ * BO. We need to free it.
+ */
+ if (mem->bo->tbo.sg) {
+ sg_free_table(mem->bo->tbo.sg);
+ kfree(mem->bo->tbo.sg);
+ }
+
/* Free the BO*/
amdgpu_bo_unref(&mem->bo);
mutex_destroy(&mem->lock);
@@ -1405,7 +1379,8 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
* the queues are still stopped and we can leave mapping for
* the next restore worker
*/
- if (bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
+ if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
+ bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
is_invalid_userptr = true;
if (check_if_add_bo_to_vm(avm, mem)) {
@@ -1545,8 +1520,7 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
if (mem->mapped_to_gpu_memory == 0 &&
!amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && !mem->bo->pin_count)
amdgpu_amdkfd_remove_eviction_fence(mem->bo,
- process_info->eviction_fence,
- NULL, NULL);
+ process_info->eviction_fence);
unreserve_out:
unreserve_bo_and_vms(&ctx, false, false);
@@ -1607,7 +1581,7 @@ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
}
amdgpu_amdkfd_remove_eviction_fence(
- bo, mem->process_info->eviction_fence, NULL, NULL);
+ bo, mem->process_info->eviction_fence);
list_del_init(&mem->validate_list.head);
if (size)
@@ -1642,6 +1616,60 @@ int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
return 0;
}
+int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
+ struct dma_buf *dma_buf,
+ uint64_t va, void *vm,
+ struct kgd_mem **mem, uint64_t *size,
+ uint64_t *mmap_offset)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+ struct drm_gem_object *obj;
+ struct amdgpu_bo *bo;
+ struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
+
+ if (dma_buf->ops != &amdgpu_dmabuf_ops)
+ /* Can't handle non-graphics buffers */
+ return -EINVAL;
+
+ obj = dma_buf->priv;
+ if (obj->dev->dev_private != adev)
+ /* Can't handle buffers from other devices */
+ return -EINVAL;
+
+ bo = gem_to_amdgpu_bo(obj);
+ if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT)))
+ /* Only VRAM and GTT BOs are supported */
+ return -EINVAL;
+
+ *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
+ if (!*mem)
+ return -ENOMEM;
+
+ if (size)
+ *size = amdgpu_bo_size(bo);
+
+ if (mmap_offset)
+ *mmap_offset = amdgpu_bo_mmap_offset(bo);
+
+ INIT_LIST_HEAD(&(*mem)->bo_va_list);
+ mutex_init(&(*mem)->lock);
+ (*mem)->mapping_flags =
+ AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
+ AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_NC;
+
+ (*mem)->bo = amdgpu_bo_ref(bo);
+ (*mem)->va = va;
+ (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
+ AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
+ (*mem)->mapped_to_gpu_memory = 0;
+ (*mem)->process_info = avm->process_info;
+ add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
+ amdgpu_sync_create(&(*mem)->sync);
+
+ return 0;
+}
+
/* Evict a userptr BO by stopping the queues if necessary
*
* Runs in MMU notifier, may be in RECLAIM_FS context. This means it
@@ -1664,7 +1692,7 @@ int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
evicted_bos = atomic_inc_return(&process_info->evicted_bos);
if (evicted_bos == 1) {
/* First eviction, stop the queues */
- r = kgd2kfd->quiesce_mm(mm);
+ r = kgd2kfd_quiesce_mm(mm);
if (r)
pr_err("Failed to quiesce KFD\n");
schedule_delayed_work(&process_info->restore_userptr_work,
@@ -1808,7 +1836,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
validate_list.head) {
list_add_tail(&mem->resv_list.head, &resv_list);
mem->resv_list.bo = mem->validate_list.bo;
- mem->resv_list.shared = mem->validate_list.shared;
+ mem->resv_list.num_shared = mem->validate_list.num_shared;
}
/* Reserve all BOs and page tables for validation */
@@ -1819,16 +1847,6 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
amdgpu_sync_create(&sync);
- /* Avoid triggering eviction fences when unmapping invalid
- * userptr BOs (waits for all fences, doesn't use
- * FENCE_OWNER_VM)
- */
- list_for_each_entry(peer_vm, &process_info->vm_list_head,
- vm_list_node)
- amdgpu_amdkfd_remove_eviction_fence(peer_vm->root.base.bo,
- process_info->eviction_fence,
- NULL, NULL);
-
ret = process_validate_vms(process_info);
if (ret)
goto unreserve_out;
@@ -1889,10 +1907,6 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
ret = process_update_pds(process_info, &sync);
unreserve_out:
- list_for_each_entry(peer_vm, &process_info->vm_list_head,
- vm_list_node)
- amdgpu_bo_fence(peer_vm->root.base.bo,
- &process_info->eviction_fence->base, true);
ttm_eu_backoff_reservation(&ticket, &resv_list);
amdgpu_sync_wait(&sync, false);
amdgpu_sync_free(&sync);
@@ -1956,7 +1970,7 @@ static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
evicted_bos)
goto unlock_out;
evicted_bos = 0;
- if (kgd2kfd->resume_mm(mm)) {
+ if (kgd2kfd_resume_mm(mm)) {
pr_err("%s: Failed to resume KFD\n", __func__);
/* No recovery from this failure. Probably the CP is
* hanging. No point trying again.
@@ -2027,7 +2041,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
list_add_tail(&mem->resv_list.head, &ctx.list);
mem->resv_list.bo = mem->validate_list.bo;
- mem->resv_list.shared = mem->validate_list.shared;
+ mem->resv_list.num_shared = mem->validate_list.num_shared;
}
ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
@@ -2044,13 +2058,10 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
if (ret)
goto validate_map_fail;
- /* Wait for PD/PTs validate to finish */
- /* FIXME: I think this isn't needed */
- list_for_each_entry(peer_vm, &process_info->vm_list_head,
- vm_list_node) {
- struct amdgpu_bo *bo = peer_vm->root.base.bo;
-
- ttm_bo_wait(&bo->tbo, false, false);
+ ret = process_sync_pds_resv(process_info, &sync_obj);
+ if (ret) {
+ pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
+ goto validate_map_fail;
}
/* Validate BOs and map them to GPUVM (update VM page tables). */
@@ -2066,7 +2077,11 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
pr_debug("Memory eviction: Validate BOs failed. Try again\n");
goto validate_map_fail;
}
-
+ ret = amdgpu_sync_fence(NULL, &sync_obj, bo->tbo.moving, false);
+ if (ret) {
+ pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
+ goto validate_map_fail;
+ }
list_for_each_entry(bo_va_entry, &mem->bo_va_list,
bo_list) {
ret = update_gpuvm_pte((struct amdgpu_device *)
@@ -2087,6 +2102,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
goto validate_map_fail;
}
+ /* Wait for validate and PT updates to finish */
amdgpu_sync_wait(&sync_obj, false);
/* Release old eviction fence and create new one, because fence only
@@ -2105,10 +2121,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
process_info->eviction_fence = new_fence;
*ef = dma_fence_get(&new_fence->base);
- /* Wait for validate to finish and attach new eviction fence */
- list_for_each_entry(mem, &process_info->kfd_bo_list,
- validate_list.head)
- ttm_bo_wait(&mem->bo->tbo, false, false);
+ /* Attach new eviction fence to all BOs */
list_for_each_entry(mem, &process_info->kfd_bo_list,
validate_list.head)
amdgpu_bo_fence(mem->bo,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index a028661d9e20..92b11de19581 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -576,6 +576,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
{ 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX },
+ { 0x1002, 0x6900, 0x17AA, 0x3806, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0, 0, 0, 0, 0 },
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 14d2982a47cc..5c79da8e1150 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -118,7 +118,6 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
entry->priority = min(info[i].bo_priority,
AMDGPU_BO_LIST_MAX_PRIORITY);
entry->tv.bo = &bo->tbo;
- entry->tv.shared = !bo->prime_shared_count;
if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GDS)
list->gds_obj = bo;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 69ad6ec0a4f3..bf04c12bd324 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -25,8 +25,8 @@
*/
#include <drm/drmP.h>
#include <drm/drm_edid.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_probe_helper.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "atom.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 0acc8dee2cb8..52a5e4fdc95b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -50,7 +50,8 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
p->uf_entry.priority = 0;
p->uf_entry.tv.bo = &bo->tbo;
- p->uf_entry.tv.shared = true;
+ /* One for TTM and one for the CS job */
+ p->uf_entry.tv.num_shared = 2;
p->uf_entry.user_pages = NULL;
drm_gem_object_put_unlocked(gobj);
@@ -213,6 +214,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
case AMDGPU_CHUNK_ID_DEPENDENCIES:
case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
+ case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
break;
default:
@@ -598,6 +600,10 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
return r;
}
+ /* One for TTM and one for the CS job */
+ amdgpu_bo_list_for_each_entry(e, p->bo_list)
+ e->tv.num_shared = 2;
+
amdgpu_bo_list_get_list(p->bo_list, &p->validated);
if (p->bo_list->first_userptr != p->bo_list->num_entries)
p->mn = amdgpu_mn_get(p->adev, AMDGPU_MN_TYPE_GFX);
@@ -717,8 +723,14 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
gws = p->bo_list->gws_obj;
oa = p->bo_list->oa_obj;
- amdgpu_bo_list_for_each_entry(e, p->bo_list)
- e->bo_va = amdgpu_vm_bo_find(vm, ttm_to_amdgpu_bo(e->tv.bo));
+ amdgpu_bo_list_for_each_entry(e, p->bo_list) {
+ struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
+
+ /* Make sure we use the exclusive slot for shared BOs */
+ if (bo->prime_shared_count)
+ e->tv.num_shared = 0;
+ e->bo_va = amdgpu_vm_bo_find(vm, bo);
+ }
if (gds) {
p->job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT;
@@ -955,10 +967,6 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
- if (r)
- return r;
-
p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
if (amdgpu_vm_debug) {
@@ -1083,6 +1091,15 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
fence = amdgpu_ctx_get_fence(ctx, entity,
deps[i].handle);
+
+ if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
+ struct drm_sched_fence *s_fence = to_drm_sched_fence(fence);
+ struct dma_fence *old = fence;
+
+ fence = dma_fence_get(&s_fence->scheduled);
+ dma_fence_put(old);
+ }
+
if (IS_ERR(fence)) {
r = PTR_ERR(fence);
amdgpu_ctx_put(ctx);
@@ -1104,7 +1121,7 @@ static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
{
int r;
struct dma_fence *fence;
- r = drm_syncobj_find_fence(p->filp, handle, 0, &fence);
+ r = drm_syncobj_find_fence(p->filp, handle, 0, 0, &fence);
if (r)
return r;
@@ -1170,7 +1187,8 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
chunk = &p->chunks[i];
- if (chunk->chunk_id == AMDGPU_CHUNK_ID_DEPENDENCIES) {
+ if (chunk->chunk_id == AMDGPU_CHUNK_ID_DEPENDENCIES ||
+ chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
r = amdgpu_cs_process_fence_dep(p, chunk);
if (r)
return r;
@@ -1193,7 +1211,7 @@ static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
int i;
for (i = 0; i < p->num_post_dep_syncobjs; ++i)
- drm_syncobj_replace_fence(p->post_dep_syncobjs[i], 0, p->fence);
+ drm_syncobj_replace_fence(p->post_dep_syncobjs[i], p->fence);
}
static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
@@ -1260,8 +1278,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
return 0;
error_abort:
- dma_fence_put(&job->base.s_fence->finished);
- job->base.s_fence = NULL;
+ drm_sched_job_cleanup(&job->base);
amdgpu_mn_unlock(p->mn);
error_unlock:
@@ -1285,7 +1302,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
r = amdgpu_cs_parser_init(&parser, data);
if (r) {
- DRM_ERROR("Failed to initialize parser !\n");
+ DRM_ERROR("Failed to initialize parser %d!\n", r);
goto out;
}
@@ -1422,6 +1439,9 @@ int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
if (IS_ERR(fence))
return PTR_ERR(fence);
+ if (!fence)
+ fence = dma_fence_get_stub();
+
switch (info->in.what) {
case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
r = drm_syncobj_create(&syncobj, 0, fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
new file mode 100644
index 000000000000..7e22be7ca68a
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+
+ * * Author: Monk.liu@amd.com
+ */
+
+#include "amdgpu.h"
+
+uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev)
+{
+ uint64_t addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT;
+
+ addr -= AMDGPU_VA_RESERVED_SIZE;
+ addr = amdgpu_gmc_sign_extend(addr);
+
+ return addr;
+}
+
+int amdgpu_allocate_static_csa(struct amdgpu_device *adev, struct amdgpu_bo **bo,
+ u32 domain, uint32_t size)
+{
+ int r;
+ void *ptr;
+
+ r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
+ domain, bo,
+ NULL, &ptr);
+ if (!*bo)
+ return -ENOMEM;
+
+ memset(ptr, 0, size);
+ return 0;
+}
+
+void amdgpu_free_static_csa(struct amdgpu_bo **bo)
+{
+ amdgpu_bo_free_kernel(bo, NULL, NULL);
+}
+
+/*
+ * amdgpu_map_static_csa should be called during amdgpu_vm_init
+ * it maps virtual address amdgpu_csa_vaddr() to this VM, and each command
+ * submission of GFX should use this virtual address within META_DATA init
+ * package to support SRIOV gfx preemption.
+ */
+int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va,
+ uint64_t csa_addr, uint32_t size)
+{
+ struct ww_acquire_ctx ticket;
+ struct list_head list;
+ struct amdgpu_bo_list_entry pd;
+ struct ttm_validate_buffer csa_tv;
+ int r;
+
+ INIT_LIST_HEAD(&list);
+ INIT_LIST_HEAD(&csa_tv.head);
+ csa_tv.bo = &bo->tbo;
+ csa_tv.num_shared = 1;
+
+ list_add(&csa_tv.head, &list);
+ amdgpu_vm_get_pd_bo(vm, &list, &pd);
+
+ r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
+ if (r) {
+ DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
+ return r;
+ }
+
+ *bo_va = amdgpu_vm_bo_add(adev, vm, bo);
+ if (!*bo_va) {
+ ttm_eu_backoff_reservation(&ticket, &list);
+ DRM_ERROR("failed to create bo_va for static CSA\n");
+ return -ENOMEM;
+ }
+
+ r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, csa_addr,
+ size);
+ if (r) {
+ DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
+ amdgpu_vm_bo_rmv(adev, *bo_va);
+ ttm_eu_backoff_reservation(&ticket, &list);
+ return r;
+ }
+
+ r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, size,
+ AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
+ AMDGPU_PTE_EXECUTABLE);
+
+ if (r) {
+ DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
+ amdgpu_vm_bo_rmv(adev, *bo_va);
+ ttm_eu_backoff_reservation(&ticket, &list);
+ return r;
+ }
+
+ ttm_eu_backoff_reservation(&ticket, &list);
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h
new file mode 100644
index 000000000000..524b4437a021
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Monk.liu@amd.com
+ */
+
+#ifndef AMDGPU_CSA_MANAGER_H
+#define AMDGPU_CSA_MANAGER_H
+
+#define AMDGPU_CSA_SIZE (128 * 1024)
+
+uint32_t amdgpu_get_total_csa_size(struct amdgpu_device *adev);
+uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev);
+int amdgpu_allocate_static_csa(struct amdgpu_device *adev, struct amdgpu_bo **bo,
+ u32 domain, uint32_t size);
+int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va,
+ uint64_t csa_addr, uint32_t size);
+void amdgpu_free_static_csa(struct amdgpu_bo **bo);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 95f4c4139fc6..7b526593eb77 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -124,6 +124,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
struct drm_sched_rq *rqs[AMDGPU_MAX_RINGS];
unsigned num_rings;
+ unsigned num_rqs = 0;
switch (i) {
case AMDGPU_HW_IP_GFX:
@@ -166,12 +167,16 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
break;
}
- for (j = 0; j < num_rings; ++j)
- rqs[j] = &rings[j]->sched.sched_rq[priority];
+ for (j = 0; j < num_rings; ++j) {
+ if (!rings[j]->adev)
+ continue;
+
+ rqs[num_rqs++] = &rings[j]->sched.sched_rq[priority];
+ }
for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j)
r = drm_sched_entity_init(&ctx->entities[i][j].entity,
- rqs, num_rings, &ctx->guilty);
+ rqs, num_rqs, &ctx->guilty);
if (r)
goto error_cleanup_entities;
}
@@ -248,7 +253,7 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
return -ENOMEM;
mutex_lock(&mgr->lock);
- r = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
+ r = idr_alloc(&mgr->ctx_handles, ctx, 1, AMDGPU_VM_MAX_NUM_CTX, GFP_KERNEL);
if (r < 0) {
mutex_unlock(&mgr->lock);
kfree(ctx);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index dd9a4fb9ce39..4ae3ff9a1d4c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -158,9 +158,6 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
while (size) {
uint32_t value;
- if (*pos > adev->rmmio_size)
- goto end;
-
if (read) {
value = RREG32(*pos >> 2);
r = put_user(value, (uint32_t *)buf);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 30bc345d6fdf..79fb302fb954 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -30,8 +30,8 @@
#include <linux/console.h>
#include <linux/slab.h>
#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_probe_helper.h>
#include <drm/amdgpu_drm.h>
#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
@@ -59,6 +59,8 @@
#include "amdgpu_amdkfd.h"
#include "amdgpu_pm.h"
+#include "amdgpu_xgmi.h"
+
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
@@ -513,6 +515,7 @@ void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
*/
static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
{
+
/* No doorbell on SI hardware generation */
if (adev->asic_type < CHIP_BONAIRE) {
adev->doorbell.base = 0;
@@ -525,15 +528,26 @@ static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
return -EINVAL;
+ amdgpu_asic_init_doorbell_index(adev);
+
/* doorbell bar mapping */
adev->doorbell.base = pci_resource_start(adev->pdev, 2);
adev->doorbell.size = pci_resource_len(adev->pdev, 2);
adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
- AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
+ adev->doorbell_index.max_assignment+1);
if (adev->doorbell.num_doorbells == 0)
return -EINVAL;
+ /* For Vega, reserve and map two pages on doorbell BAR since SDMA
+ * paging queue doorbell use the second page. The
+ * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
+ * doorbells are in the first page. So with paging queue enabled,
+ * the max num_doorbells should + 1 page (0x400 in dword)
+ */
+ if (adev->asic_type >= CHIP_VEGA10)
+ adev->doorbell.num_doorbells += 0x400;
+
adev->doorbell.ptr = ioremap(adev->doorbell.base,
adev->doorbell.num_doorbells *
sizeof(u32));
@@ -1631,7 +1645,7 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
if (r) {
DRM_ERROR("sw_init of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
- return r;
+ goto init_failed;
}
adev->ip_blocks[i].status.sw = true;
@@ -1640,26 +1654,28 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
r = amdgpu_device_vram_scratch_init(adev);
if (r) {
DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
- return r;
+ goto init_failed;
}
r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
if (r) {
DRM_ERROR("hw_init %d failed %d\n", i, r);
- return r;
+ goto init_failed;
}
r = amdgpu_device_wb_init(adev);
if (r) {
DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
- return r;
+ goto init_failed;
}
adev->ip_blocks[i].status.hw = true;
/* right after GMC hw init, we create CSA */
if (amdgpu_sriov_vf(adev)) {
- r = amdgpu_allocate_static_csa(adev);
+ r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_CSA_SIZE);
if (r) {
DRM_ERROR("allocate CSA failed %d\n", r);
- return r;
+ goto init_failed;
}
}
}
@@ -1667,27 +1683,32 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
if (r)
- return r;
+ goto init_failed;
r = amdgpu_device_ip_hw_init_phase1(adev);
if (r)
- return r;
+ goto init_failed;
r = amdgpu_device_fw_loading(adev);
if (r)
- return r;
+ goto init_failed;
r = amdgpu_device_ip_hw_init_phase2(adev);
if (r)
- return r;
+ goto init_failed;
- amdgpu_xgmi_add_device(adev);
+ if (adev->gmc.xgmi.num_physical_nodes > 1)
+ amdgpu_xgmi_add_device(adev);
amdgpu_amdkfd_device_init(adev);
- if (amdgpu_sriov_vf(adev))
+init_failed:
+ if (amdgpu_sriov_vf(adev)) {
+ if (!r)
+ amdgpu_virt_init_data_exchange(adev);
amdgpu_virt_release_full_gpu(adev, true);
+ }
- return 0;
+ return r;
}
/**
@@ -1848,6 +1869,9 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
{
int i, r;
+ if (adev->gmc.xgmi.num_physical_nodes > 1)
+ amdgpu_xgmi_remove_device(adev);
+
amdgpu_amdkfd_device_fini(adev);
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
@@ -1890,7 +1914,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
amdgpu_ucode_free_bo(adev);
- amdgpu_free_static_csa(adev);
+ amdgpu_free_static_csa(&adev->virt.csa_obj);
amdgpu_device_wb_fini(adev);
amdgpu_device_vram_scratch_fini(adev);
}
@@ -2111,7 +2135,7 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
continue;
r = block->version->funcs->hw_init(adev);
- DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
+ DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
if (r)
return r;
}
@@ -2145,7 +2169,7 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
continue;
r = block->version->funcs->hw_init(adev);
- DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
+ DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
if (r)
return r;
}
@@ -2337,6 +2361,19 @@ bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
return amdgpu_device_asic_has_dc_support(adev->asic_type);
}
+
+static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
+{
+ struct amdgpu_device *adev =
+ container_of(__work, struct amdgpu_device, xgmi_reset_work);
+
+ adev->asic_reset_res = amdgpu_asic_reset(adev);
+ if (adev->asic_reset_res)
+ DRM_WARN("ASIC reset failed with err r, %d for drm dev, %s",
+ adev->asic_reset_res, adev->ddev->unique);
+}
+
+
/**
* amdgpu_device_init - initialize the driver
*
@@ -2435,6 +2472,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
amdgpu_device_delay_enable_gfx_off);
+ INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
+
adev->gfx.gfx_off_req_count = 1;
adev->pm.ac_power = power_supply_is_system_supplied() > 0 ? true : false;
@@ -2455,9 +2494,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
- /* doorbell bar mapping */
- amdgpu_device_doorbell_init(adev);
-
/* io port mapping */
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
@@ -2476,6 +2512,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (r)
return r;
+ /* doorbell bar mapping and doorbell index init*/
+ amdgpu_device_doorbell_init(adev);
+
/* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
/* this will fail for cards that aren't VGA class devices, just
* ignore it */
@@ -2511,6 +2550,17 @@ int amdgpu_device_init(struct amdgpu_device *adev,
/* detect if we are with an SRIOV vbios */
amdgpu_device_detect_sriov_bios(adev);
+ /* check if we need to reset the asic
+ * E.g., driver was not cleanly unloaded previously, etc.
+ */
+ if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
+ r = amdgpu_asic_reset(adev);
+ if (r) {
+ dev_err(adev->dev, "asic reset on init failed\n");
+ goto failed;
+ }
+ }
+
/* Post card if necessary */
if (amdgpu_device_need_post(adev)) {
if (!adev->bios) {
@@ -2575,6 +2625,8 @@ fence_driver_init:
}
dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
+ if (amdgpu_virt_request_full_gpu(adev, false))
+ amdgpu_virt_release_full_gpu(adev, false);
goto failed;
}
@@ -2597,9 +2649,6 @@ fence_driver_init:
goto failed;
}
- if (amdgpu_sriov_vf(adev))
- amdgpu_virt_init_data_exchange(adev);
-
amdgpu_fbdev_init(adev);
r = amdgpu_pm_sysfs_init(adev);
@@ -2673,7 +2722,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
amdgpu_irq_disable_all(adev);
if (adev->mode_info.mode_config_initialized){
if (!amdgpu_device_has_dc_support(adev))
- drm_crtc_force_disable_all(adev->ddev);
+ drm_helper_force_disable_all(adev->ddev);
else
drm_atomic_helper_shutdown(adev->ddev);
}
@@ -2763,7 +2812,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
struct drm_framebuffer *fb = crtc->primary->fb;
struct amdgpu_bo *robj;
- if (amdgpu_crtc->cursor_bo) {
+ if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
r = amdgpu_bo_reserve(aobj, true);
if (r == 0) {
@@ -2871,7 +2920,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- if (amdgpu_crtc->cursor_bo) {
+ if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
r = amdgpu_bo_reserve(aobj, true);
if (r == 0) {
@@ -3116,6 +3165,7 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
/* No need to recover an evicted BO */
if (shadow->tbo.mem.mem_type != TTM_PL_TT ||
+ shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET ||
shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM)
continue;
@@ -3124,11 +3174,16 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
break;
if (fence) {
- r = dma_fence_wait_timeout(fence, false, tmo);
+ tmo = dma_fence_wait_timeout(fence, false, tmo);
dma_fence_put(fence);
fence = next;
- if (r <= 0)
+ if (tmo == 0) {
+ r = -ETIMEDOUT;
break;
+ } else if (tmo < 0) {
+ r = tmo;
+ break;
+ }
} else {
fence = next;
}
@@ -3139,8 +3194,8 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
tmo = dma_fence_wait_timeout(fence, false, tmo);
dma_fence_put(fence);
- if (r <= 0 || tmo <= 0) {
- DRM_ERROR("recover vram bo from shadow failed\n");
+ if (r < 0 || tmo <= 0) {
+ DRM_ERROR("recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
return -EIO;
}
@@ -3148,86 +3203,6 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
return 0;
}
-/**
- * amdgpu_device_reset - reset ASIC/GPU for bare-metal or passthrough
- *
- * @adev: amdgpu device pointer
- *
- * attempt to do soft-reset or full-reset and reinitialize Asic
- * return 0 means succeeded otherwise failed
- */
-static int amdgpu_device_reset(struct amdgpu_device *adev)
-{
- bool need_full_reset, vram_lost = 0;
- int r;
-
- need_full_reset = amdgpu_device_ip_need_full_reset(adev);
-
- if (!need_full_reset) {
- amdgpu_device_ip_pre_soft_reset(adev);
- r = amdgpu_device_ip_soft_reset(adev);
- amdgpu_device_ip_post_soft_reset(adev);
- if (r || amdgpu_device_ip_check_soft_reset(adev)) {
- DRM_INFO("soft reset failed, will fallback to full reset!\n");
- need_full_reset = true;
- }
- }
-
- if (need_full_reset) {
- r = amdgpu_device_ip_suspend(adev);
-
-retry:
- r = amdgpu_asic_reset(adev);
- /* post card */
- amdgpu_atom_asic_init(adev->mode_info.atom_context);
-
- if (!r) {
- dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
- r = amdgpu_device_ip_resume_phase1(adev);
- if (r)
- goto out;
-
- vram_lost = amdgpu_device_check_vram_lost(adev);
- if (vram_lost) {
- DRM_ERROR("VRAM is lost!\n");
- atomic_inc(&adev->vram_lost_counter);
- }
-
- r = amdgpu_gtt_mgr_recover(
- &adev->mman.bdev.man[TTM_PL_TT]);
- if (r)
- goto out;
-
- r = amdgpu_device_fw_loading(adev);
- if (r)
- return r;
-
- r = amdgpu_device_ip_resume_phase2(adev);
- if (r)
- goto out;
-
- if (vram_lost)
- amdgpu_device_fill_reset_magic(adev);
- }
- }
-
-out:
- if (!r) {
- amdgpu_irq_gpu_reset_resume_helper(adev);
- r = amdgpu_ib_ring_tests(adev);
- if (r) {
- dev_err(adev->dev, "ib ring test failed (%d).\n", r);
- r = amdgpu_device_ip_suspend(adev);
- need_full_reset = true;
- goto retry;
- }
- }
-
- if (!r)
- r = amdgpu_device_recover_vram(adev);
-
- return r;
-}
/**
* amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
@@ -3271,6 +3246,7 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
r = amdgpu_ib_ring_tests(adev);
error:
+ amdgpu_virt_init_data_exchange(adev);
amdgpu_virt_release_full_gpu(adev, true);
if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
atomic_inc(&adev->vram_lost_counter);
@@ -3295,40 +3271,46 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
return false;
}
- if (amdgpu_gpu_recovery == 0 || (amdgpu_gpu_recovery == -1 &&
- !amdgpu_sriov_vf(adev))) {
- DRM_INFO("GPU recovery disabled.\n");
- return false;
- }
+ if (amdgpu_gpu_recovery == 0)
+ goto disabled;
- return true;
-}
+ if (amdgpu_sriov_vf(adev))
+ return true;
-/**
- * amdgpu_device_gpu_recover - reset the asic and recover scheduler
- *
- * @adev: amdgpu device pointer
- * @job: which job trigger hang
- *
- * Attempt to reset the GPU if it has hung (all asics).
- * Returns 0 for success or an error on failure.
- */
-int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
- struct amdgpu_job *job)
-{
- int i, r, resched;
+ if (amdgpu_gpu_recovery == -1) {
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ case CHIP_TOPAZ:
+ case CHIP_TONGA:
+ case CHIP_FIJI:
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
+ case CHIP_VEGAM:
+ case CHIP_VEGA20:
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
+ break;
+ default:
+ goto disabled;
+ }
+ }
- dev_info(adev->dev, "GPU reset begin!\n");
+ return true;
- mutex_lock(&adev->lock_reset);
- atomic_inc(&adev->gpu_reset_counter);
- adev->in_gpu_reset = 1;
+disabled:
+ DRM_INFO("GPU recovery disabled.\n");
+ return false;
+}
- /* Block kfd */
- amdgpu_amdkfd_pre_reset(adev);
- /* block TTM */
- resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
+static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
+ struct amdgpu_job *job,
+ bool *need_full_reset_arg)
+{
+ int i, r = 0;
+ bool need_full_reset = *need_full_reset_arg;
/* block all schedulers and reset given job's ring */
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
@@ -3337,21 +3319,153 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
if (!ring || !ring->sched.thread)
continue;
- kthread_park(ring->sched.thread);
-
- if (job && job->base.sched != &ring->sched)
- continue;
-
- drm_sched_hw_job_reset(&ring->sched, job ? &job->base : NULL);
+ drm_sched_stop(&ring->sched);
/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
amdgpu_fence_driver_force_completion(ring);
}
- if (amdgpu_sriov_vf(adev))
- r = amdgpu_device_reset_sriov(adev, job ? false : true);
- else
- r = amdgpu_device_reset(adev);
+ if(job)
+ drm_sched_increase_karma(&job->base);
+
+
+
+ if (!amdgpu_sriov_vf(adev)) {
+
+ if (!need_full_reset)
+ need_full_reset = amdgpu_device_ip_need_full_reset(adev);
+
+ if (!need_full_reset) {
+ amdgpu_device_ip_pre_soft_reset(adev);
+ r = amdgpu_device_ip_soft_reset(adev);
+ amdgpu_device_ip_post_soft_reset(adev);
+ if (r || amdgpu_device_ip_check_soft_reset(adev)) {
+ DRM_INFO("soft reset failed, will fallback to full reset!\n");
+ need_full_reset = true;
+ }
+ }
+
+ if (need_full_reset)
+ r = amdgpu_device_ip_suspend(adev);
+
+ *need_full_reset_arg = need_full_reset;
+ }
+
+ return r;
+}
+
+static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
+ struct list_head *device_list_handle,
+ bool *need_full_reset_arg)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+ bool need_full_reset = *need_full_reset_arg, vram_lost = false;
+ int r = 0;
+
+ /*
+ * ASIC reset has to be done on all HGMI hive nodes ASAP
+ * to allow proper links negotiation in FW (within 1 sec)
+ */
+ if (need_full_reset) {
+ list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+ /* For XGMI run all resets in parallel to speed up the process */
+ if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
+ if (!queue_work(system_highpri_wq, &tmp_adev->xgmi_reset_work))
+ r = -EALREADY;
+ } else
+ r = amdgpu_asic_reset(tmp_adev);
+
+ if (r) {
+ DRM_ERROR("ASIC reset failed with err r, %d for drm dev, %s",
+ r, tmp_adev->ddev->unique);
+ break;
+ }
+ }
+
+ /* For XGMI wait for all PSP resets to complete before proceed */
+ if (!r) {
+ list_for_each_entry(tmp_adev, device_list_handle,
+ gmc.xgmi.head) {
+ if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
+ flush_work(&tmp_adev->xgmi_reset_work);
+ r = tmp_adev->asic_reset_res;
+ if (r)
+ break;
+ }
+ }
+ }
+ }
+
+
+ list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+ if (need_full_reset) {
+ /* post card */
+ if (amdgpu_atom_asic_init(tmp_adev->mode_info.atom_context))
+ DRM_WARN("asic atom init failed!");
+
+ if (!r) {
+ dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
+ r = amdgpu_device_ip_resume_phase1(tmp_adev);
+ if (r)
+ goto out;
+
+ vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
+ if (vram_lost) {
+ DRM_ERROR("VRAM is lost!\n");
+ atomic_inc(&tmp_adev->vram_lost_counter);
+ }
+
+ r = amdgpu_gtt_mgr_recover(
+ &tmp_adev->mman.bdev.man[TTM_PL_TT]);
+ if (r)
+ goto out;
+
+ r = amdgpu_device_fw_loading(tmp_adev);
+ if (r)
+ return r;
+
+ r = amdgpu_device_ip_resume_phase2(tmp_adev);
+ if (r)
+ goto out;
+
+ if (vram_lost)
+ amdgpu_device_fill_reset_magic(tmp_adev);
+
+ /* Update PSP FW topology after reset */
+ if (hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1)
+ r = amdgpu_xgmi_update_topology(hive, tmp_adev);
+ }
+ }
+
+
+out:
+ if (!r) {
+ amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
+ r = amdgpu_ib_ring_tests(tmp_adev);
+ if (r) {
+ dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
+ r = amdgpu_device_ip_suspend(tmp_adev);
+ need_full_reset = true;
+ r = -EAGAIN;
+ goto end;
+ }
+ }
+
+ if (!r)
+ r = amdgpu_device_recover_vram(tmp_adev);
+ else
+ tmp_adev->asic_reset_res = r;
+ }
+
+end:
+ *need_full_reset_arg = need_full_reset;
+ return r;
+}
+
+static void amdgpu_device_post_asic_reset(struct amdgpu_device *adev,
+ struct amdgpu_job *job)
+{
+ int i;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
@@ -3359,38 +3473,194 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
if (!ring || !ring->sched.thread)
continue;
- /* only need recovery sched of the given job's ring
- * or all rings (in the case @job is NULL)
- * after above amdgpu_reset accomplished
- */
- if ((!job || job->base.sched == &ring->sched) && !r)
- drm_sched_job_recovery(&ring->sched);
+ if (!adev->asic_reset_res)
+ drm_sched_resubmit_jobs(&ring->sched);
- kthread_unpark(ring->sched.thread);
+ drm_sched_start(&ring->sched, !adev->asic_reset_res);
}
if (!amdgpu_device_has_dc_support(adev)) {
drm_helper_resume_force_mode(adev->ddev);
}
- ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
+ adev->asic_reset_res = 0;
+}
- if (r) {
- /* bad news, how to tell it to userspace ? */
- dev_info(adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
- amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
- } else {
- dev_info(adev->dev, "GPU reset(%d) succeeded!\n",atomic_read(&adev->gpu_reset_counter));
- }
+static void amdgpu_device_lock_adev(struct amdgpu_device *adev)
+{
+ mutex_lock(&adev->lock_reset);
+ atomic_inc(&adev->gpu_reset_counter);
+ adev->in_gpu_reset = 1;
+ /* Block kfd: SRIOV would do it separately */
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_amdkfd_pre_reset(adev);
+}
- /*unlock kfd */
- amdgpu_amdkfd_post_reset(adev);
+static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
+{
+ /*unlock kfd: SRIOV would do it separately */
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_amdkfd_post_reset(adev);
amdgpu_vf_error_trans_all(adev);
adev->in_gpu_reset = 0;
mutex_unlock(&adev->lock_reset);
+}
+
+
+/**
+ * amdgpu_device_gpu_recover - reset the asic and recover scheduler
+ *
+ * @adev: amdgpu device pointer
+ * @job: which job trigger hang
+ *
+ * Attempt to reset the GPU if it has hung (all asics).
+ * Attempt to do soft-reset or full-reset and reinitialize Asic
+ * Returns 0 for success or an error on failure.
+ */
+
+int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ struct amdgpu_job *job)
+{
+ int r;
+ struct amdgpu_hive_info *hive = NULL;
+ bool need_full_reset = false;
+ struct amdgpu_device *tmp_adev = NULL;
+ struct list_head device_list, *device_list_handle = NULL;
+
+ INIT_LIST_HEAD(&device_list);
+
+ dev_info(adev->dev, "GPU reset begin!\n");
+
+ /*
+ * In case of XGMI hive disallow concurrent resets to be triggered
+ * by different nodes. No point also since the one node already executing
+ * reset will also reset all the other nodes in the hive.
+ */
+ hive = amdgpu_get_xgmi_hive(adev, 0);
+ if (hive && adev->gmc.xgmi.num_physical_nodes > 1 &&
+ !mutex_trylock(&hive->reset_lock))
+ return 0;
+
+ /* Start with adev pre asic reset first for soft reset check.*/
+ amdgpu_device_lock_adev(adev);
+ r = amdgpu_device_pre_asic_reset(adev,
+ job,
+ &need_full_reset);
+ if (r) {
+ /*TODO Should we stop ?*/
+ DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
+ r, adev->ddev->unique);
+ adev->asic_reset_res = r;
+ }
+
+ /* Build list of devices to reset */
+ if (need_full_reset && adev->gmc.xgmi.num_physical_nodes > 1) {
+ if (!hive) {
+ amdgpu_device_unlock_adev(adev);
+ return -ENODEV;
+ }
+
+ /*
+ * In case we are in XGMI hive mode device reset is done for all the
+ * nodes in the hive to retrain all XGMI links and hence the reset
+ * sequence is executed in loop on all nodes.
+ */
+ device_list_handle = &hive->device_list;
+ } else {
+ list_add_tail(&adev->gmc.xgmi.head, &device_list);
+ device_list_handle = &device_list;
+ }
+
+retry: /* Rest of adevs pre asic reset from XGMI hive. */
+ list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+
+ if (tmp_adev == adev)
+ continue;
+
+ amdgpu_device_lock_adev(tmp_adev);
+ r = amdgpu_device_pre_asic_reset(tmp_adev,
+ NULL,
+ &need_full_reset);
+ /*TODO Should we stop ?*/
+ if (r) {
+ DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
+ r, tmp_adev->ddev->unique);
+ tmp_adev->asic_reset_res = r;
+ }
+ }
+
+ /* Actual ASIC resets if needed.*/
+ /* TODO Implement XGMI hive reset logic for SRIOV */
+ if (amdgpu_sriov_vf(adev)) {
+ r = amdgpu_device_reset_sriov(adev, job ? false : true);
+ if (r)
+ adev->asic_reset_res = r;
+ } else {
+ r = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset);
+ if (r && r == -EAGAIN)
+ goto retry;
+ }
+
+ /* Post ASIC reset for all devs .*/
+ list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+ amdgpu_device_post_asic_reset(tmp_adev, tmp_adev == adev ? job : NULL);
+
+ if (r) {
+ /* bad news, how to tell it to userspace ? */
+ dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
+ amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
+ } else {
+ dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&adev->gpu_reset_counter));
+ }
+
+ amdgpu_device_unlock_adev(tmp_adev);
+ }
+
+ if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
+ mutex_unlock(&hive->reset_lock);
+
+ if (r)
+ dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
return r;
}
+static void amdgpu_device_get_min_pci_speed_width(struct amdgpu_device *adev,
+ enum pci_bus_speed *speed,
+ enum pcie_link_width *width)
+{
+ struct pci_dev *pdev = adev->pdev;
+ enum pci_bus_speed cur_speed;
+ enum pcie_link_width cur_width;
+ u32 ret = 1;
+
+ *speed = PCI_SPEED_UNKNOWN;
+ *width = PCIE_LNK_WIDTH_UNKNOWN;
+
+ while (pdev) {
+ cur_speed = pcie_get_speed_cap(pdev);
+ cur_width = pcie_get_width_cap(pdev);
+ ret = pcie_bandwidth_available(adev->pdev, NULL,
+ NULL, &cur_width);
+ if (!ret)
+ cur_width = PCIE_LNK_WIDTH_RESRV;
+
+ if (cur_speed != PCI_SPEED_UNKNOWN) {
+ if (*speed == PCI_SPEED_UNKNOWN)
+ *speed = cur_speed;
+ else if (cur_speed < *speed)
+ *speed = cur_speed;
+ }
+
+ if (cur_width != PCIE_LNK_WIDTH_UNKNOWN) {
+ if (*width == PCIE_LNK_WIDTH_UNKNOWN)
+ *width = cur_width;
+ else if (cur_width < *width)
+ *width = cur_width;
+ }
+ pdev = pci_upstream_bridge(pdev);
+ }
+}
+
/**
* amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
*
@@ -3403,8 +3673,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
{
struct pci_dev *pdev;
- enum pci_bus_speed speed_cap;
- enum pcie_link_width link_width;
+ enum pci_bus_speed speed_cap, platform_speed_cap;
+ enum pcie_link_width platform_link_width;
if (amdgpu_pcie_gen_cap)
adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
@@ -3421,6 +3691,12 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
return;
}
+ if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
+ return;
+
+ amdgpu_device_get_min_pci_speed_width(adev, &platform_speed_cap,
+ &platform_link_width);
+
if (adev->pm.pcie_gen_mask == 0) {
/* asic caps */
pdev = adev->pdev;
@@ -3446,22 +3722,20 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
}
/* platform caps */
- pdev = adev->ddev->pdev->bus->self;
- speed_cap = pcie_get_speed_cap(pdev);
- if (speed_cap == PCI_SPEED_UNKNOWN) {
+ if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
} else {
- if (speed_cap == PCIE_SPEED_16_0GT)
+ if (platform_speed_cap == PCIE_SPEED_16_0GT)
adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
- else if (speed_cap == PCIE_SPEED_8_0GT)
+ else if (platform_speed_cap == PCIE_SPEED_8_0GT)
adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
- else if (speed_cap == PCIE_SPEED_5_0GT)
+ else if (platform_speed_cap == PCIE_SPEED_5_0GT)
adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
else
@@ -3470,12 +3744,10 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
}
}
if (adev->pm.pcie_mlw_mask == 0) {
- pdev = adev->ddev->pdev->bus->self;
- link_width = pcie_get_width_cap(pdev);
- if (link_width == PCIE_LNK_WIDTH_UNKNOWN) {
+ if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
} else {
- switch (link_width) {
+ switch (platform_link_width) {
case PCIE_LNK_X32:
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 686a26de50f9..b083b219b1a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -188,10 +188,12 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
goto cleanup;
}
- r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev));
- if (unlikely(r != 0)) {
- DRM_ERROR("failed to pin new abo buffer before flip\n");
- goto unreserve;
+ if (!adev->enable_virtual_display) {
+ r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev));
+ if (unlikely(r != 0)) {
+ DRM_ERROR("failed to pin new abo buffer before flip\n");
+ goto unreserve;
+ }
}
r = amdgpu_ttm_alloc_gart(&new_abo->tbo);
@@ -211,7 +213,8 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags);
amdgpu_bo_unreserve(new_abo);
- work->base = amdgpu_bo_gpu_offset(new_abo);
+ if (!adev->enable_virtual_display)
+ work->base = amdgpu_bo_gpu_offset(new_abo);
work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
amdgpu_get_vblank_counter_kms(dev, work->crtc_id);
@@ -242,9 +245,10 @@ pflip_cleanup:
goto cleanup;
}
unpin:
- if (unlikely(amdgpu_bo_unpin(new_abo) != 0)) {
- DRM_ERROR("failed to unpin new abo in error path\n");
- }
+ if (!adev->enable_virtual_display)
+ if (unlikely(amdgpu_bo_unpin(new_abo) != 0))
+ DRM_ERROR("failed to unpin new abo in error path\n");
+
unreserve:
amdgpu_bo_unreserve(new_abo);
@@ -631,6 +635,11 @@ int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
drm_property_create_range(adev->ddev, 0, "max bpc", 8, 16);
if (!adev->mode_info.max_bpc_property)
return -ENOMEM;
+ adev->mode_info.abm_level_property =
+ drm_property_create_range(adev->ddev, 0,
+ "abm level", 0, 4);
+ if (!adev->mode_info.abm_level_property)
+ return -ENOMEM;
}
return 0;
@@ -857,7 +866,12 @@ int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev,
/* Inside "upper part" of vblank area? Apply corrective offset if so: */
if (in_vbl && (*vpos >= vbl_start)) {
vtotal = mode->crtc_vtotal;
- *vpos = *vpos - vtotal;
+
+ /* With variable refresh rate displays the vpos can exceed
+ * the vtotal value. Clamp to 0 to return -vbl_end instead
+ * of guessing the remaining number of lines until scanout.
+ */
+ *vpos = (*vpos < vtotal) ? (*vpos - vtotal) : 0;
}
/* Correct for shifted end of vbl at vbl_end. */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h
new file mode 100644
index 000000000000..68959b923f89
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h
@@ -0,0 +1,247 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * GPU doorbell structures, functions & helpers
+ */
+struct amdgpu_doorbell {
+ /* doorbell mmio */
+ resource_size_t base;
+ resource_size_t size;
+ u32 __iomem *ptr;
+ u32 num_doorbells; /* Number of doorbells actually reserved for amdgpu. */
+};
+
+/* Reserved doorbells for amdgpu (including multimedia).
+ * KFD can use all the rest in the 2M doorbell bar.
+ * For asic before vega10, doorbell is 32-bit, so the
+ * index/offset is in dword. For vega10 and after, doorbell
+ * can be 64-bit, so the index defined is in qword.
+ */
+struct amdgpu_doorbell_index {
+ uint32_t kiq;
+ uint32_t mec_ring0;
+ uint32_t mec_ring1;
+ uint32_t mec_ring2;
+ uint32_t mec_ring3;
+ uint32_t mec_ring4;
+ uint32_t mec_ring5;
+ uint32_t mec_ring6;
+ uint32_t mec_ring7;
+ uint32_t userqueue_start;
+ uint32_t userqueue_end;
+ uint32_t gfx_ring0;
+ uint32_t sdma_engine[8];
+ uint32_t ih;
+ union {
+ struct {
+ uint32_t vcn_ring0_1;
+ uint32_t vcn_ring2_3;
+ uint32_t vcn_ring4_5;
+ uint32_t vcn_ring6_7;
+ } vcn;
+ struct {
+ uint32_t uvd_ring0_1;
+ uint32_t uvd_ring2_3;
+ uint32_t uvd_ring4_5;
+ uint32_t uvd_ring6_7;
+ uint32_t vce_ring0_1;
+ uint32_t vce_ring2_3;
+ uint32_t vce_ring4_5;
+ uint32_t vce_ring6_7;
+ } uvd_vce;
+ };
+ uint32_t first_non_cp;
+ uint32_t last_non_cp;
+ uint32_t max_assignment;
+ /* Per engine SDMA doorbell size in dword */
+ uint32_t sdma_doorbell_range;
+};
+
+typedef enum _AMDGPU_DOORBELL_ASSIGNMENT
+{
+ AMDGPU_DOORBELL_KIQ = 0x000,
+ AMDGPU_DOORBELL_HIQ = 0x001,
+ AMDGPU_DOORBELL_DIQ = 0x002,
+ AMDGPU_DOORBELL_MEC_RING0 = 0x010,
+ AMDGPU_DOORBELL_MEC_RING1 = 0x011,
+ AMDGPU_DOORBELL_MEC_RING2 = 0x012,
+ AMDGPU_DOORBELL_MEC_RING3 = 0x013,
+ AMDGPU_DOORBELL_MEC_RING4 = 0x014,
+ AMDGPU_DOORBELL_MEC_RING5 = 0x015,
+ AMDGPU_DOORBELL_MEC_RING6 = 0x016,
+ AMDGPU_DOORBELL_MEC_RING7 = 0x017,
+ AMDGPU_DOORBELL_GFX_RING0 = 0x020,
+ AMDGPU_DOORBELL_sDMA_ENGINE0 = 0x1E0,
+ AMDGPU_DOORBELL_sDMA_ENGINE1 = 0x1E1,
+ AMDGPU_DOORBELL_IH = 0x1E8,
+ AMDGPU_DOORBELL_MAX_ASSIGNMENT = 0x3FF,
+ AMDGPU_DOORBELL_INVALID = 0xFFFF
+} AMDGPU_DOORBELL_ASSIGNMENT;
+
+typedef enum _AMDGPU_VEGA20_DOORBELL_ASSIGNMENT
+{
+ /* Compute + GFX: 0~255 */
+ AMDGPU_VEGA20_DOORBELL_KIQ = 0x000,
+ AMDGPU_VEGA20_DOORBELL_HIQ = 0x001,
+ AMDGPU_VEGA20_DOORBELL_DIQ = 0x002,
+ AMDGPU_VEGA20_DOORBELL_MEC_RING0 = 0x003,
+ AMDGPU_VEGA20_DOORBELL_MEC_RING1 = 0x004,
+ AMDGPU_VEGA20_DOORBELL_MEC_RING2 = 0x005,
+ AMDGPU_VEGA20_DOORBELL_MEC_RING3 = 0x006,
+ AMDGPU_VEGA20_DOORBELL_MEC_RING4 = 0x007,
+ AMDGPU_VEGA20_DOORBELL_MEC_RING5 = 0x008,
+ AMDGPU_VEGA20_DOORBELL_MEC_RING6 = 0x009,
+ AMDGPU_VEGA20_DOORBELL_MEC_RING7 = 0x00A,
+ AMDGPU_VEGA20_DOORBELL_USERQUEUE_START = 0x00B,
+ AMDGPU_VEGA20_DOORBELL_USERQUEUE_END = 0x08A,
+ AMDGPU_VEGA20_DOORBELL_GFX_RING0 = 0x08B,
+ /* SDMA:256~335*/
+ AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE0 = 0x100,
+ AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE1 = 0x10A,
+ AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE2 = 0x114,
+ AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE3 = 0x11E,
+ AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE4 = 0x128,
+ AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE5 = 0x132,
+ AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE6 = 0x13C,
+ AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE7 = 0x146,
+ /* IH: 376~391 */
+ AMDGPU_VEGA20_DOORBELL_IH = 0x178,
+ /* MMSCH: 392~407
+ * overlap the doorbell assignment with VCN as they are mutually exclusive
+ * VCE engine's doorbell is 32 bit and two VCE ring share one QWORD
+ */
+ AMDGPU_VEGA20_DOORBELL64_VCN0_1 = 0x188, /* lower 32 bits for VNC0 and upper 32 bits for VNC1 */
+ AMDGPU_VEGA20_DOORBELL64_VCN2_3 = 0x189,
+ AMDGPU_VEGA20_DOORBELL64_VCN4_5 = 0x18A,
+ AMDGPU_VEGA20_DOORBELL64_VCN6_7 = 0x18B,
+
+ AMDGPU_VEGA20_DOORBELL64_UVD_RING0_1 = 0x188,
+ AMDGPU_VEGA20_DOORBELL64_UVD_RING2_3 = 0x189,
+ AMDGPU_VEGA20_DOORBELL64_UVD_RING4_5 = 0x18A,
+ AMDGPU_VEGA20_DOORBELL64_UVD_RING6_7 = 0x18B,
+
+ AMDGPU_VEGA20_DOORBELL64_VCE_RING0_1 = 0x18C,
+ AMDGPU_VEGA20_DOORBELL64_VCE_RING2_3 = 0x18D,
+ AMDGPU_VEGA20_DOORBELL64_VCE_RING4_5 = 0x18E,
+ AMDGPU_VEGA20_DOORBELL64_VCE_RING6_7 = 0x18F,
+
+ AMDGPU_VEGA20_DOORBELL64_FIRST_NON_CP = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE0,
+ AMDGPU_VEGA20_DOORBELL64_LAST_NON_CP = AMDGPU_VEGA20_DOORBELL64_VCE_RING6_7,
+
+ AMDGPU_VEGA20_DOORBELL_MAX_ASSIGNMENT = 0x18F,
+ AMDGPU_VEGA20_DOORBELL_INVALID = 0xFFFF
+} AMDGPU_VEGA20_DOORBELL_ASSIGNMENT;
+
+/*
+ * 64bit doorbell, offset are in QWORD, occupy 2KB doorbell space
+ */
+typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT
+{
+ /*
+ * All compute related doorbells: kiq, hiq, diq, traditional compute queue, user queue, should locate in
+ * a continues range so that programming CP_MEC_DOORBELL_RANGE_LOWER/UPPER can cover this range.
+ * Compute related doorbells are allocated from 0x00 to 0x8a
+ */
+
+
+ /* kernel scheduling */
+ AMDGPU_DOORBELL64_KIQ = 0x00,
+
+ /* HSA interface queue and debug queue */
+ AMDGPU_DOORBELL64_HIQ = 0x01,
+ AMDGPU_DOORBELL64_DIQ = 0x02,
+
+ /* Compute engines */
+ AMDGPU_DOORBELL64_MEC_RING0 = 0x03,
+ AMDGPU_DOORBELL64_MEC_RING1 = 0x04,
+ AMDGPU_DOORBELL64_MEC_RING2 = 0x05,
+ AMDGPU_DOORBELL64_MEC_RING3 = 0x06,
+ AMDGPU_DOORBELL64_MEC_RING4 = 0x07,
+ AMDGPU_DOORBELL64_MEC_RING5 = 0x08,
+ AMDGPU_DOORBELL64_MEC_RING6 = 0x09,
+ AMDGPU_DOORBELL64_MEC_RING7 = 0x0a,
+
+ /* User queue doorbell range (128 doorbells) */
+ AMDGPU_DOORBELL64_USERQUEUE_START = 0x0b,
+ AMDGPU_DOORBELL64_USERQUEUE_END = 0x8a,
+
+ /* Graphics engine */
+ AMDGPU_DOORBELL64_GFX_RING0 = 0x8b,
+
+ /*
+ * Other graphics doorbells can be allocated here: from 0x8c to 0xdf
+ * Graphics voltage island aperture 1
+ * default non-graphics QWORD index is 0xe0 - 0xFF inclusive
+ */
+
+ /* For vega10 sriov, the sdma doorbell must be fixed as follow
+ * to keep the same setting with host driver, or it will
+ * happen conflicts
+ */
+ AMDGPU_DOORBELL64_sDMA_ENGINE0 = 0xF0,
+ AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE0 = 0xF1,
+ AMDGPU_DOORBELL64_sDMA_ENGINE1 = 0xF2,
+ AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE1 = 0xF3,
+
+ /* Interrupt handler */
+ AMDGPU_DOORBELL64_IH = 0xF4, /* For legacy interrupt ring buffer */
+ AMDGPU_DOORBELL64_IH_RING1 = 0xF5, /* For page migration request log */
+ AMDGPU_DOORBELL64_IH_RING2 = 0xF6, /* For page migration translation/invalidation log */
+
+ /* VCN engine use 32 bits doorbell */
+ AMDGPU_DOORBELL64_VCN0_1 = 0xF8, /* lower 32 bits for VNC0 and upper 32 bits for VNC1 */
+ AMDGPU_DOORBELL64_VCN2_3 = 0xF9,
+ AMDGPU_DOORBELL64_VCN4_5 = 0xFA,
+ AMDGPU_DOORBELL64_VCN6_7 = 0xFB,
+
+ /* overlap the doorbell assignment with VCN as they are mutually exclusive
+ * VCE engine's doorbell is 32 bit and two VCE ring share one QWORD
+ */
+ AMDGPU_DOORBELL64_UVD_RING0_1 = 0xF8,
+ AMDGPU_DOORBELL64_UVD_RING2_3 = 0xF9,
+ AMDGPU_DOORBELL64_UVD_RING4_5 = 0xFA,
+ AMDGPU_DOORBELL64_UVD_RING6_7 = 0xFB,
+
+ AMDGPU_DOORBELL64_VCE_RING0_1 = 0xFC,
+ AMDGPU_DOORBELL64_VCE_RING2_3 = 0xFD,
+ AMDGPU_DOORBELL64_VCE_RING4_5 = 0xFE,
+ AMDGPU_DOORBELL64_VCE_RING6_7 = 0xFF,
+
+ AMDGPU_DOORBELL64_FIRST_NON_CP = AMDGPU_DOORBELL64_sDMA_ENGINE0,
+ AMDGPU_DOORBELL64_LAST_NON_CP = AMDGPU_DOORBELL64_VCE_RING6_7,
+
+ AMDGPU_DOORBELL64_MAX_ASSIGNMENT = 0xFF,
+ AMDGPU_DOORBELL64_INVALID = 0xFFFF
+} AMDGPU_DOORBELL64_ASSIGNMENT;
+
+u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index);
+void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
+u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index);
+void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
+
+#define RDOORBELL32(index) amdgpu_mm_rdoorbell(adev, (index))
+#define WDOORBELL32(index, v) amdgpu_mm_wdoorbell(adev, (index), (v))
+#define RDOORBELL64(index) amdgpu_mm_rdoorbell64(adev, (index))
+#define WDOORBELL64(index, v) amdgpu_mm_wdoorbell64(adev, (index), (v))
+
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index 1c4595562f8f..344967df3137 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -184,61 +184,6 @@ u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
return vrefresh;
}
-void amdgpu_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
- u32 *p, u32 *u)
-{
- u32 b_c = 0;
- u32 i_c;
- u32 tmp;
-
- i_c = (i * r_c) / 100;
- tmp = i_c >> p_b;
-
- while (tmp) {
- b_c++;
- tmp >>= 1;
- }
-
- *u = (b_c + 1) / 2;
- *p = i_c / (1 << (2 * (*u)));
-}
-
-int amdgpu_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th)
-{
- u32 k, a, ah, al;
- u32 t1;
-
- if ((fl == 0) || (fh == 0) || (fl > fh))
- return -EINVAL;
-
- k = (100 * fh) / fl;
- t1 = (t * (k - 100));
- a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100));
- a = (a + 5) / 10;
- ah = ((a * t) + 5000) / 10000;
- al = a - ah;
-
- *th = t - ah;
- *tl = t + al;
-
- return 0;
-}
-
-bool amdgpu_is_uvd_state(u32 class, u32 class2)
-{
- if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
- return true;
- if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
- return true;
- if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
- return true;
- if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
- return true;
- if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
- return true;
- return false;
-}
-
bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)
{
switch (sensor) {
@@ -949,39 +894,6 @@ enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
return AMDGPU_PCIE_GEN1;
}
-u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev,
- u16 asic_lanes,
- u16 default_lanes)
-{
- switch (asic_lanes) {
- case 0:
- default:
- return default_lanes;
- case 1:
- return 1;
- case 2:
- return 2;
- case 4:
- return 4;
- case 8:
- return 8;
- case 12:
- return 12;
- case 16:
- return 16;
- }
-}
-
-u8 amdgpu_encode_pci_lane_width(u32 lanes)
-{
- u8 encoded_lanes[] = { 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6 };
-
- if (lanes > 16)
- return 0;
-
- return encoded_lanes[lanes];
-}
-
struct amd_vce_state*
amdgpu_get_vce_clock_state(void *handle, u32 idx)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index f972cd156795..e871e022c129 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -364,6 +364,14 @@ enum amdgpu_pcie_gen {
((adev)->powerplay.pp_funcs->enable_mgpu_fan_boost(\
(adev)->powerplay.pp_handle))
+#define amdgpu_dpm_get_ppfeature_status(adev, buf) \
+ ((adev)->powerplay.pp_funcs->get_ppfeature_status(\
+ (adev)->powerplay.pp_handle, (buf)))
+
+#define amdgpu_dpm_set_ppfeature_status(adev, ppfeatures) \
+ ((adev)->powerplay.pp_funcs->set_ppfeature_status(\
+ (adev)->powerplay.pp_handle, (ppfeatures)))
+
struct amdgpu_dpm {
struct amdgpu_ps *ps;
/* number of valid power states */
@@ -478,10 +486,6 @@ void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev);
u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev);
void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev);
-bool amdgpu_is_uvd_state(u32 class, u32 class2);
-void amdgpu_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
- u32 *p, u32 *u);
-int amdgpu_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th);
bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor);
@@ -497,11 +501,6 @@ enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
enum amdgpu_pcie_gen asic_gen,
enum amdgpu_pcie_gen default_gen);
-u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev,
- u16 asic_lanes,
- u16 default_lanes);
-u8 amdgpu_encode_pci_lane_width(u32 lanes);
-
struct amd_vce_state*
amdgpu_get_vce_clock_state(void *handle, u32 idx);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 74b611e8a1b1..7419ea8a388b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -32,7 +32,7 @@
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/vga_switcheroo.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
#include "amdgpu.h"
#include "amdgpu_irq.h"
@@ -71,9 +71,12 @@
* - 3.25.0 - Add support for sensor query info (stable pstate sclk/mclk).
* - 3.26.0 - GFX9: Process AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE.
* - 3.27.0 - Add new chunk to to AMDGPU_CS to enable BO_LIST creation.
+ * - 3.28.0 - Add AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES
+ * - 3.29.0 - Add AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID
+ * - 3.30.0 - Add AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE.
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 27
+#define KMS_DRIVER_MINOR 30
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0;
@@ -454,9 +457,10 @@ module_param_named(cntl_sb_buf_per_se, amdgpu_cntl_sb_buf_per_se, int, 0444);
/**
* DOC: param_buf_per_se (int)
- * Override the size of Off-Chip Pramater Cache per Shader Engine in Byte. The default is 0 (depending on gfx).
+ * Override the size of Off-Chip Parameter Cache per Shader Engine in Byte.
+ * The default is 0 (depending on gfx).
*/
-MODULE_PARM_DESC(param_buf_per_se, "the size of Off-Chip Pramater Cache per Shader Engine (default depending on gfx)");
+MODULE_PARM_DESC(param_buf_per_se, "the size of Off-Chip Parameter Cache per Shader Engine (default depending on gfx)");
module_param_named(param_buf_per_se, amdgpu_param_buf_per_se, int, 0444);
/**
@@ -864,6 +868,7 @@ static const struct pci_device_id pciidlist[] = {
/* VEGAM */
{0x1002, 0x694C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM},
{0x1002, 0x694E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM},
+ {0x1002, 0x694F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM},
/* Vega 10 */
{0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x6861, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
@@ -1174,6 +1179,22 @@ static const struct file_operations amdgpu_driver_kms_fops = {
#endif
};
+int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv)
+{
+ struct drm_file *file;
+
+ if (!filp)
+ return -EINVAL;
+
+ if (filp->f_op != &amdgpu_driver_kms_fops) {
+ return -EINVAL;
+ }
+
+ file = filp->private_data;
+ *fpriv = file->driver_priv;
+ return 0;
+}
+
static bool
amdgpu_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe,
bool in_vblank_irq, int *vpos, int *hpos,
@@ -1187,7 +1208,7 @@ amdgpu_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe,
static struct drm_driver kms_driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_ATOMIC |
- DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
+ DRIVER_GEM |
DRIVER_PRIME | DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ,
.load = amdgpu_driver_load_kms,
.open = amdgpu_driver_open_kms,
@@ -1227,9 +1248,6 @@ static struct drm_driver kms_driver = {
.patchlevel = KMS_DRIVER_PATCHLEVEL,
};
-static struct drm_driver *driver;
-static struct pci_driver *pdriver;
-
static struct pci_driver amdgpu_kms_pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
@@ -1259,16 +1277,14 @@ static int __init amdgpu_init(void)
goto error_fence;
DRM_INFO("amdgpu kernel modesetting enabled.\n");
- driver = &kms_driver;
- pdriver = &amdgpu_kms_pci_driver;
- driver->num_ioctls = amdgpu_max_kms_ioctl;
+ kms_driver.num_ioctls = amdgpu_max_kms_ioctl;
amdgpu_register_atpx_handler();
/* Ignore KFD init failures. Normal when CONFIG_HSA_AMD is not set. */
amdgpu_amdkfd_init();
/* let modprobe override vga console setting */
- return pci_register_driver(pdriver);
+ return pci_register_driver(&amdgpu_kms_pci_driver);
error_fence:
amdgpu_sync_fini();
@@ -1280,7 +1296,7 @@ error_sync:
static void __exit amdgpu_exit(void)
{
amdgpu_amdkfd_fini();
- pci_unregister_driver(pdriver);
+ pci_unregister_driver(&amdgpu_kms_pci_driver);
amdgpu_unregister_atpx_handler();
amdgpu_sync_fini();
amdgpu_fence_slab_fini();
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 5448cf27654e..ee47c11e92ce 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -398,9 +398,9 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
ring->fence_drv.irq_type = irq_type;
ring->fence_drv.initialized = true;
- dev_dbg(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, "
- "cpu addr 0x%p\n", ring->idx,
- ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
+ DRM_DEV_DEBUG(adev->dev, "fence driver on ring %s use gpu addr "
+ "0x%016llx, cpu addr 0x%p\n", ring->name,
+ ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 11fea28f8ad3..6d11e1721147 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -248,7 +248,7 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
}
mb();
amdgpu_asic_flush_hdp(adev, NULL);
- amdgpu_gmc_flush_gpu_tlb(adev, 0);
+ amdgpu_gmc_flush_gpu_tlb(adev, 0, 0);
return 0;
}
@@ -259,6 +259,8 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
* @offset: offset into the GPU's gart aperture
* @pages: number of pages to bind
* @dma_addr: DMA addresses of pages
+ * @flags: page table entry flags
+ * @dst: CPU address of the gart table
*
* Map the dma_addresses into GART entries (all asics).
* Returns 0 for success, -EINVAL for failure.
@@ -331,7 +333,7 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
mb();
amdgpu_asic_flush_hdp(adev, NULL);
- amdgpu_gmc_flush_gpu_tlb(adev, 0);
+ amdgpu_gmc_flush_gpu_tlb(adev, 0, 0);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
index 9ff62887e4e3..afa2e2877d87 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
@@ -41,6 +41,7 @@ struct amdgpu_bo;
struct amdgpu_gart {
struct amdgpu_bo *bo;
+ /* CPU kmapped address of gart table */
void *ptr;
unsigned num_gpu_pages;
unsigned num_cpu_pages;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h
index ecbcefe49a98..f89f5734d985 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h
@@ -37,6 +37,8 @@ struct amdgpu_gds {
struct amdgpu_gds_asic_info mem;
struct amdgpu_gds_asic_info gws;
struct amdgpu_gds_asic_info oa;
+ uint32_t gds_compute_max_wave_id;
+
/* At present, GDS, GWS and OA resources for gfx (graphics)
* is always pre-allocated and available for graphics operation.
* Such resource is shared between all gfx clients.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 7b3d1ebda9df..d21dd2f369da 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -54,10 +54,6 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
memset(&bp, 0, sizeof(bp));
*obj = NULL;
- /* At least align on page size */
- if (alignment < PAGE_SIZE) {
- alignment = PAGE_SIZE;
- }
bp.size = size;
bp.byte_align = alignment;
@@ -169,7 +165,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
INIT_LIST_HEAD(&duplicates);
tv.bo = &bo->tbo;
- tv.shared = true;
+ tv.num_shared = 1;
list_add(&tv.head, &list);
amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
@@ -244,9 +240,6 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
- /* GDS allocations must be DW aligned */
- if (args->in.domains & AMDGPU_GEM_DOMAIN_GDS)
- size = ALIGN(size, 4);
}
if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
@@ -604,7 +597,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
abo = gem_to_amdgpu_bo(gobj);
tv.bo = &abo->tbo;
- tv.shared = !!(abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID);
+ if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
+ tv.num_shared = 1;
+ else
+ tv.num_shared = 0;
list_add(&tv.head, &list);
} else {
gobj = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
index d63daba9b17c..f1ddfc50bcc7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
@@ -54,6 +54,8 @@ void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
+extern const struct dma_buf_ops amdgpu_dmabuf_ops;
+
/*
* GEM objects.
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 1a656b8657f7..97a60da62004 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -25,6 +25,7 @@
#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_gfx.h"
+#include "amdgpu_rlc.h"
/* delay 0.1 second to enable gfx off feature */
#define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100)
@@ -249,7 +250,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
ring->adev = NULL;
ring->ring_obj = NULL;
ring->use_doorbell = true;
- ring->doorbell_index = AMDGPU_DOORBELL_KIQ;
+ ring->doorbell_index = adev->doorbell_index.kiq;
r = amdgpu_gfx_kiq_acquire(adev, ring);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index b61b5c11aead..f790e15bcd08 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -29,6 +29,7 @@
*/
#include "clearstate_defs.h"
#include "amdgpu_ring.h"
+#include "amdgpu_rlc.h"
/* GFX current status */
#define AMDGPU_GFX_NORMAL_MODE 0x00000000L
@@ -37,59 +38,6 @@
#define AMDGPU_GFX_CG_DISABLED_MODE 0x00000004L
#define AMDGPU_GFX_LBPW_DISABLED_MODE 0x00000008L
-
-struct amdgpu_rlc_funcs {
- void (*enter_safe_mode)(struct amdgpu_device *adev);
- void (*exit_safe_mode)(struct amdgpu_device *adev);
-};
-
-struct amdgpu_rlc {
- /* for power gating */
- struct amdgpu_bo *save_restore_obj;
- uint64_t save_restore_gpu_addr;
- volatile uint32_t *sr_ptr;
- const u32 *reg_list;
- u32 reg_list_size;
- /* for clear state */
- struct amdgpu_bo *clear_state_obj;
- uint64_t clear_state_gpu_addr;
- volatile uint32_t *cs_ptr;
- const struct cs_section_def *cs_data;
- u32 clear_state_size;
- /* for cp tables */
- struct amdgpu_bo *cp_table_obj;
- uint64_t cp_table_gpu_addr;
- volatile uint32_t *cp_table_ptr;
- u32 cp_table_size;
-
- /* safe mode for updating CG/PG state */
- bool in_safe_mode;
- const struct amdgpu_rlc_funcs *funcs;
-
- /* for firmware data */
- u32 save_and_restore_offset;
- u32 clear_state_descriptor_offset;
- u32 avail_scratch_ram_locations;
- u32 reg_restore_list_size;
- u32 reg_list_format_start;
- u32 reg_list_format_separate_start;
- u32 starting_offsets_start;
- u32 reg_list_format_size_bytes;
- u32 reg_list_size_bytes;
- u32 reg_list_format_direct_reg_list_length;
- u32 save_restore_list_cntl_size_bytes;
- u32 save_restore_list_gpm_size_bytes;
- u32 save_restore_list_srm_size_bytes;
-
- u32 *register_list_format;
- u32 *register_restore;
- u8 *save_restore_list_cntl;
- u8 *save_restore_list_gpm;
- u8 *save_restore_list_srm;
-
- bool is_rlc_v2_1;
-};
-
#define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES
struct amdgpu_mec {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index 6fa7ef446e46..81e6070d255b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -64,7 +64,7 @@ struct amdgpu_vmhub {
struct amdgpu_gmc_funcs {
/* flush the vm tlb via mmio */
void (*flush_gpu_tlb)(struct amdgpu_device *adev,
- uint32_t vmid);
+ uint32_t vmid, uint32_t flush_type);
/* flush the vm tlb via ring */
uint64_t (*emit_flush_gpu_tlb)(struct amdgpu_ring *ring, unsigned vmid,
uint64_t pd_addr);
@@ -89,7 +89,7 @@ struct amdgpu_gmc_funcs {
struct amdgpu_xgmi {
/* from psp */
- u64 device_id;
+ u64 node_id;
u64 hive_id;
/* fixed per family */
u64 node_segment_size;
@@ -99,6 +99,7 @@ struct amdgpu_xgmi {
unsigned num_physical_nodes;
/* gpu list in the same hive */
struct list_head head;
+ bool supported;
};
struct amdgpu_gmc {
@@ -151,7 +152,7 @@ struct amdgpu_gmc {
struct amdgpu_xgmi xgmi;
};
-#define amdgpu_gmc_flush_gpu_tlb(adev, vmid) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid))
+#define amdgpu_gmc_flush_gpu_tlb(adev, vmid, type) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (type))
#define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
#define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
#define amdgpu_gmc_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gmc.gmc_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index b8963b725dfa..fe393a46f881 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -35,6 +35,7 @@
#include "amdgpu_trace.h"
#define AMDGPU_IB_TEST_TIMEOUT msecs_to_jiffies(1000)
+#define AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT msecs_to_jiffies(2000)
/*
* IB
@@ -146,7 +147,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
fence_ctx = 0;
}
- if (!ring->ready) {
+ if (!ring->sched.ready) {
dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name);
return -EINVAL;
}
@@ -202,12 +203,12 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
amdgpu_asic_flush_hdp(adev, ring);
}
+ if (need_ctx_switch)
+ status |= AMDGPU_HAVE_CTX_SWITCH;
+
skip_preamble = ring->current_ctx == fence_ctx;
if (job && ring->funcs->emit_cntxcntl) {
- if (need_ctx_switch)
- status |= AMDGPU_HAVE_CTX_SWITCH;
status |= job->preamble_status;
-
amdgpu_ring_emit_cntxcntl(ring, status);
}
@@ -221,9 +222,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
!amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */
continue;
- amdgpu_ring_emit_ib(ring, ib, job ? job->vmid : 0,
- need_ctx_switch);
- need_ctx_switch = false;
+ amdgpu_ring_emit_ib(ring, job, ib, status);
+ status &= ~AMDGPU_HAVE_CTX_SWITCH;
}
if (ring->funcs->emit_tmz)
@@ -345,21 +345,18 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
* cost waiting for it coming back under RUNTIME only
*/
tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT;
+ } else if (adev->gmc.xgmi.hive_id) {
+ tmo_gfx = AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT;
}
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+ for (i = 0; i < adev->num_rings; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
long tmo;
- if (!ring || !ring->ready)
- continue;
-
- /* skip IB tests for KIQ in general for the below reasons:
- * 1. We never submit IBs to the KIQ
- * 2. KIQ doesn't use the EOP interrupts,
- * we use some other CP interrupt.
+ /* KIQ rings don't have an IB test because we never submit IBs
+ * to them and they have no interrupt support.
*/
- if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
+ if (!ring->sched.ready || !ring->funcs->test_ib)
continue;
/* MM engine need more time */
@@ -374,20 +371,23 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
tmo = tmo_gfx;
r = amdgpu_ring_test_ib(ring, tmo);
- if (r) {
- ring->ready = false;
-
- if (ring == &adev->gfx.gfx_ring[0]) {
- /* oh, oh, that's really bad */
- DRM_ERROR("amdgpu: failed testing IB on GFX ring (%d).\n", r);
- adev->accel_working = false;
- return r;
-
- } else {
- /* still not good, but we can live with it */
- DRM_ERROR("amdgpu: failed testing IB on ring %d (%d).\n", i, r);
- ret = r;
- }
+ if (!r) {
+ DRM_DEV_DEBUG(adev->dev, "ib test on %s succeeded\n",
+ ring->name);
+ continue;
+ }
+
+ ring->sched.ready = false;
+ DRM_DEV_ERROR(adev->dev, "IB test failed on %s (%d).\n",
+ ring->name, r);
+
+ if (ring == &adev->gfx.gfx_ring[0]) {
+ /* oh, oh, that's really bad */
+ adev->accel_working = false;
+ return r;
+
+ } else {
+ ret = r;
}
}
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index 8af67f649660..1c50be3ab8a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -52,6 +52,8 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
ih->use_bus_addr = use_bus_addr;
if (use_bus_addr) {
+ dma_addr_t dma_addr;
+
if (ih->ring)
return 0;
@@ -59,21 +61,26 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
* add them to the end of the ring allocation.
*/
ih->ring = dma_alloc_coherent(adev->dev, ih->ring_size + 8,
- &ih->rb_dma_addr, GFP_KERNEL);
+ &dma_addr, GFP_KERNEL);
if (ih->ring == NULL)
return -ENOMEM;
memset((void *)ih->ring, 0, ih->ring_size + 8);
- ih->wptr_offs = (ih->ring_size / 4) + 0;
- ih->rptr_offs = (ih->ring_size / 4) + 1;
+ ih->gpu_addr = dma_addr;
+ ih->wptr_addr = dma_addr + ih->ring_size;
+ ih->wptr_cpu = &ih->ring[ih->ring_size / 4];
+ ih->rptr_addr = dma_addr + ih->ring_size + 4;
+ ih->rptr_cpu = &ih->ring[(ih->ring_size / 4) + 1];
} else {
- r = amdgpu_device_wb_get(adev, &ih->wptr_offs);
+ unsigned wptr_offs, rptr_offs;
+
+ r = amdgpu_device_wb_get(adev, &wptr_offs);
if (r)
return r;
- r = amdgpu_device_wb_get(adev, &ih->rptr_offs);
+ r = amdgpu_device_wb_get(adev, &rptr_offs);
if (r) {
- amdgpu_device_wb_free(adev, ih->wptr_offs);
+ amdgpu_device_wb_free(adev, wptr_offs);
return r;
}
@@ -82,10 +89,15 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
&ih->ring_obj, &ih->gpu_addr,
(void **)&ih->ring);
if (r) {
- amdgpu_device_wb_free(adev, ih->rptr_offs);
- amdgpu_device_wb_free(adev, ih->wptr_offs);
+ amdgpu_device_wb_free(adev, rptr_offs);
+ amdgpu_device_wb_free(adev, wptr_offs);
return r;
}
+
+ ih->wptr_addr = adev->wb.gpu_addr + wptr_offs * 4;
+ ih->wptr_cpu = &adev->wb.wb[wptr_offs];
+ ih->rptr_addr = adev->wb.gpu_addr + rptr_offs * 4;
+ ih->rptr_cpu = &adev->wb.wb[rptr_offs];
}
return 0;
}
@@ -109,13 +121,13 @@ void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
* add them to the end of the ring allocation.
*/
dma_free_coherent(adev->dev, ih->ring_size + 8,
- (void *)ih->ring, ih->rb_dma_addr);
+ (void *)ih->ring, ih->gpu_addr);
ih->ring = NULL;
} else {
amdgpu_bo_free_kernel(&ih->ring_obj, &ih->gpu_addr,
(void **)&ih->ring);
- amdgpu_device_wb_free(adev, ih->wptr_offs);
- amdgpu_device_wb_free(adev, ih->rptr_offs);
+ amdgpu_device_wb_free(adev, (ih->wptr_addr - ih->gpu_addr) / 4);
+ amdgpu_device_wb_free(adev, (ih->rptr_addr - ih->gpu_addr) / 4);
}
}
@@ -128,16 +140,14 @@ void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
* Interrupt hander (VI), walk the IH ring.
* Returns irq process return code.
*/
-int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
- void (*callback)(struct amdgpu_device *adev,
- struct amdgpu_ih_ring *ih))
+int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
{
u32 wptr;
if (!ih->enabled || adev->shutdown)
return IRQ_NONE;
- wptr = amdgpu_ih_get_wptr(adev);
+ wptr = amdgpu_ih_get_wptr(adev, ih);
restart_ih:
/* is somebody else already processing irqs? */
@@ -150,15 +160,15 @@ restart_ih:
rmb();
while (ih->rptr != wptr) {
- callback(adev, ih);
+ amdgpu_irq_dispatch(adev, ih);
ih->rptr &= ih->ptr_mask;
}
- amdgpu_ih_set_rptr(adev);
+ amdgpu_ih_set_rptr(adev, ih);
atomic_set(&ih->lock, 0);
/* make sure wptr hasn't changed while processing */
- wptr = amdgpu_ih_get_wptr(adev);
+ wptr = amdgpu_ih_get_wptr(adev, ih);
if (wptr != ih->rptr)
goto restart_ih;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index 9ce8c93ec19b..113a1ba13d4a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -31,42 +31,44 @@ struct amdgpu_iv_entry;
* R6xx+ IH ring
*/
struct amdgpu_ih_ring {
- struct amdgpu_bo *ring_obj;
- volatile uint32_t *ring;
- unsigned rptr;
unsigned ring_size;
- uint64_t gpu_addr;
uint32_t ptr_mask;
- atomic_t lock;
- bool enabled;
- unsigned wptr_offs;
- unsigned rptr_offs;
u32 doorbell_index;
bool use_doorbell;
bool use_bus_addr;
- dma_addr_t rb_dma_addr; /* only used when use_bus_addr = true */
+
+ struct amdgpu_bo *ring_obj;
+ volatile uint32_t *ring;
+ uint64_t gpu_addr;
+
+ uint64_t wptr_addr;
+ volatile uint32_t *wptr_cpu;
+
+ uint64_t rptr_addr;
+ volatile uint32_t *rptr_cpu;
+
+ bool enabled;
+ unsigned rptr;
+ atomic_t lock;
};
/* provided by the ih block */
struct amdgpu_ih_funcs {
/* ring read/write ptr handling, called from interrupt context */
- u32 (*get_wptr)(struct amdgpu_device *adev);
- bool (*prescreen_iv)(struct amdgpu_device *adev);
- void (*decode_iv)(struct amdgpu_device *adev,
+ u32 (*get_wptr)(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
+ void (*decode_iv)(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
struct amdgpu_iv_entry *entry);
- void (*set_rptr)(struct amdgpu_device *adev);
+ void (*set_rptr)(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
};
-#define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
-#define amdgpu_ih_prescreen_iv(adev) (adev)->irq.ih_funcs->prescreen_iv((adev))
-#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
-#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
+#define amdgpu_ih_get_wptr(adev, ih) (adev)->irq.ih_funcs->get_wptr((adev), (ih))
+#define amdgpu_ih_decode_iv(adev, iv) \
+ (adev)->irq.ih_funcs->decode_iv((adev), (ih), (iv))
+#define amdgpu_ih_set_rptr(adev, ih) (adev)->irq.ih_funcs->set_rptr((adev), (ih))
int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
unsigned ring_size, bool use_bus_addr);
void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
-int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
- void (*callback)(struct amdgpu_device *adev,
- struct amdgpu_ih_ring *ih));
+int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 52c17f6219a7..af4c3b1af322 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -94,23 +94,6 @@ static void amdgpu_hotplug_work_func(struct work_struct *work)
}
/**
- * amdgpu_irq_reset_work_func - execute GPU reset
- *
- * @work: work struct pointer
- *
- * Execute scheduled GPU reset (Cayman+).
- * This function is called when the IRQ handler thinks we need a GPU reset.
- */
-static void amdgpu_irq_reset_work_func(struct work_struct *work)
-{
- struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
- reset_work);
-
- if (!amdgpu_sriov_vf(adev) && amdgpu_device_should_recover_gpu(adev))
- amdgpu_device_gpu_recover(adev, NULL);
-}
-
-/**
* amdgpu_irq_disable_all - disable *all* interrupts
*
* @adev: amdgpu device pointer
@@ -148,34 +131,6 @@ void amdgpu_irq_disable_all(struct amdgpu_device *adev)
}
/**
- * amdgpu_irq_callback - callback from the IH ring
- *
- * @adev: amdgpu device pointer
- * @ih: amdgpu ih ring
- *
- * Callback from IH ring processing to handle the entry at the current position
- * and advance the read pointer.
- */
-static void amdgpu_irq_callback(struct amdgpu_device *adev,
- struct amdgpu_ih_ring *ih)
-{
- u32 ring_index = ih->rptr >> 2;
- struct amdgpu_iv_entry entry;
-
- /* Prescreening of high-frequency interrupts */
- if (!amdgpu_ih_prescreen_iv(adev))
- return;
-
- /* Before dispatching irq to IP blocks, send it to amdkfd */
- amdgpu_amdkfd_interrupt(adev, (const void *) &ih->ring[ring_index]);
-
- entry.iv_entry = (const uint32_t *)&ih->ring[ring_index];
- amdgpu_ih_decode_iv(adev, &entry);
-
- amdgpu_irq_dispatch(adev, &entry);
-}
-
-/**
* amdgpu_irq_handler - IRQ handler
*
* @irq: IRQ number (unused)
@@ -192,13 +147,43 @@ irqreturn_t amdgpu_irq_handler(int irq, void *arg)
struct amdgpu_device *adev = dev->dev_private;
irqreturn_t ret;
- ret = amdgpu_ih_process(adev, &adev->irq.ih, amdgpu_irq_callback);
+ ret = amdgpu_ih_process(adev, &adev->irq.ih);
if (ret == IRQ_HANDLED)
pm_runtime_mark_last_busy(dev->dev);
return ret;
}
/**
+ * amdgpu_irq_handle_ih1 - kick of processing for IH1
+ *
+ * @work: work structure in struct amdgpu_irq
+ *
+ * Kick of processing IH ring 1.
+ */
+static void amdgpu_irq_handle_ih1(struct work_struct *work)
+{
+ struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
+ irq.ih1_work);
+
+ amdgpu_ih_process(adev, &adev->irq.ih1);
+}
+
+/**
+ * amdgpu_irq_handle_ih2 - kick of processing for IH2
+ *
+ * @work: work structure in struct amdgpu_irq
+ *
+ * Kick of processing IH ring 2.
+ */
+static void amdgpu_irq_handle_ih2(struct work_struct *work)
+{
+ struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
+ irq.ih2_work);
+
+ amdgpu_ih_process(adev, &adev->irq.ih2);
+}
+
+/**
* amdgpu_msi_ok - check whether MSI functionality is enabled
*
* @adev: amdgpu device pointer (unused)
@@ -262,7 +247,8 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
amdgpu_hotplug_work_func);
}
- INIT_WORK(&adev->reset_work, amdgpu_irq_reset_work_func);
+ INIT_WORK(&adev->irq.ih1_work, amdgpu_irq_handle_ih1);
+ INIT_WORK(&adev->irq.ih2_work, amdgpu_irq_handle_ih2);
adev->irq.installed = true;
r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq);
@@ -270,7 +256,6 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
adev->irq.installed = false;
if (!amdgpu_device_has_dc_support(adev))
flush_work(&adev->hotplug_work);
- cancel_work_sync(&adev->reset_work);
return r;
}
adev->ddev->max_vblank_count = 0x00ffffff;
@@ -299,7 +284,6 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
pci_disable_msi(adev->pdev);
if (!amdgpu_device_has_dc_support(adev))
flush_work(&adev->hotplug_work);
- cancel_work_sync(&adev->reset_work);
}
for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
@@ -387,44 +371,50 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev,
* Dispatches IRQ to IP blocks.
*/
void amdgpu_irq_dispatch(struct amdgpu_device *adev,
- struct amdgpu_iv_entry *entry)
+ struct amdgpu_ih_ring *ih)
{
- unsigned client_id = entry->client_id;
- unsigned src_id = entry->src_id;
+ u32 ring_index = ih->rptr >> 2;
+ struct amdgpu_iv_entry entry;
+ unsigned client_id, src_id;
struct amdgpu_irq_src *src;
+ bool handled = false;
int r;
- trace_amdgpu_iv(entry);
+ entry.iv_entry = (const uint32_t *)&ih->ring[ring_index];
+ amdgpu_ih_decode_iv(adev, &entry);
+
+ trace_amdgpu_iv(ih - &adev->irq.ih, &entry);
+
+ client_id = entry.client_id;
+ src_id = entry.src_id;
if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) {
DRM_DEBUG("Invalid client_id in IV: %d\n", client_id);
- return;
- }
- if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
+ } else if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
- return;
- }
- if (adev->irq.virq[src_id]) {
+ } else if (adev->irq.virq[src_id]) {
generic_handle_irq(irq_find_mapping(adev->irq.domain, src_id));
- } else {
- if (!adev->irq.client[client_id].sources) {
- DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
- client_id, src_id);
- return;
- }
- src = adev->irq.client[client_id].sources[src_id];
- if (!src) {
- DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id);
- return;
- }
+ } else if (!adev->irq.client[client_id].sources) {
+ DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
+ client_id, src_id);
- r = src->funcs->process(adev, src, entry);
- if (r)
+ } else if ((src = adev->irq.client[client_id].sources[src_id])) {
+ r = src->funcs->process(adev, src, &entry);
+ if (r < 0)
DRM_ERROR("error processing interrupt (%d)\n", r);
+ else if (r)
+ handled = true;
+
+ } else {
+ DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id);
}
+
+ /* Send it to amdkfd as well if it isn't already handled */
+ if (!handled)
+ amdgpu_amdkfd_interrupt(adev, entry.iv_entry);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
index f6ce171cb8aa..c718e94a55c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
@@ -87,9 +87,11 @@ struct amdgpu_irq {
/* status, etc. */
bool msi_enabled; /* msi enabled */
- /* interrupt ring */
- struct amdgpu_ih_ring ih;
- const struct amdgpu_ih_funcs *ih_funcs;
+ /* interrupt rings */
+ struct amdgpu_ih_ring ih, ih1, ih2;
+ const struct amdgpu_ih_funcs *ih_funcs;
+ struct work_struct ih1_work, ih2_work;
+ struct amdgpu_irq_src self_irq;
/* gen irq stuff */
struct irq_domain *domain; /* GPU irq controller domain */
@@ -106,7 +108,7 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev,
unsigned client_id, unsigned src_id,
struct amdgpu_irq_src *source);
void amdgpu_irq_dispatch(struct amdgpu_device *adev,
- struct amdgpu_iv_entry *entry);
+ struct amdgpu_ih_ring *ih);
int amdgpu_irq_update(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
unsigned type);
int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 755f733bf0d9..0a17fb1af204 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -32,6 +32,9 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job)
{
struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
struct amdgpu_job *job = to_amdgpu_job(s_job);
+ struct amdgpu_task_info ti;
+
+ memset(&ti, 0, sizeof(struct amdgpu_task_info));
if (amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
DRM_ERROR("ring %s timeout, but soft recovered\n",
@@ -39,9 +42,12 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job)
return;
}
+ amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti);
DRM_ERROR("ring %s timeout, signaled seq=%u, emitted seq=%u\n",
job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
ring->fence_drv.sync_seq);
+ DRM_ERROR("Process information: process %s pid %d thread %s pid %d\n",
+ ti.process_name, ti.tgid, ti.task_name, ti.pid);
if (amdgpu_device_should_recover_gpu(ring->adev))
amdgpu_device_gpu_recover(ring->adev, job);
@@ -112,6 +118,8 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
struct amdgpu_job *job = to_amdgpu_job(s_job);
+ drm_sched_job_cleanup(s_job);
+
amdgpu_ring_priority_put(ring, s_job->s_priority);
dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index 57cfe78a262b..e1b46a6703de 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -33,6 +33,8 @@
#define to_amdgpu_job(sched_job) \
container_of((sched_job), struct amdgpu_job, base)
+#define AMDGPU_JOB_GET_VMID(job) ((job) ? (job)->vmid : 0)
+
struct amdgpu_fence;
struct amdgpu_job {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 8f3d44e5e787..e860412043bb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -207,11 +207,12 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
if (!r) {
acpi_status = amdgpu_acpi_init(adev);
if (acpi_status)
- dev_dbg(&dev->pdev->dev,
+ dev_dbg(&dev->pdev->dev,
"Error during ACPI methods call\n");
}
if (amdgpu_device_is_px(dev)) {
+ dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
pm_runtime_use_autosuspend(dev->dev);
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
pm_runtime_set_active(dev->dev);
@@ -336,7 +337,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
case AMDGPU_HW_IP_GFX:
type = AMD_IP_BLOCK_TYPE_GFX;
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
- if (adev->gfx.gfx_ring[i].ready)
+ if (adev->gfx.gfx_ring[i].sched.ready)
++num_rings;
ib_start_alignment = 32;
ib_size_alignment = 32;
@@ -344,7 +345,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
case AMDGPU_HW_IP_COMPUTE:
type = AMD_IP_BLOCK_TYPE_GFX;
for (i = 0; i < adev->gfx.num_compute_rings; i++)
- if (adev->gfx.compute_ring[i].ready)
+ if (adev->gfx.compute_ring[i].sched.ready)
++num_rings;
ib_start_alignment = 32;
ib_size_alignment = 32;
@@ -352,7 +353,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
case AMDGPU_HW_IP_DMA:
type = AMD_IP_BLOCK_TYPE_SDMA;
for (i = 0; i < adev->sdma.num_instances; i++)
- if (adev->sdma.instance[i].ring.ready)
+ if (adev->sdma.instance[i].ring.sched.ready)
++num_rings;
ib_start_alignment = 256;
ib_size_alignment = 4;
@@ -363,7 +364,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
if (adev->uvd.harvest_config & (1 << i))
continue;
- if (adev->uvd.inst[i].ring.ready)
+ if (adev->uvd.inst[i].ring.sched.ready)
++num_rings;
}
ib_start_alignment = 64;
@@ -372,7 +373,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
case AMDGPU_HW_IP_VCE:
type = AMD_IP_BLOCK_TYPE_VCE;
for (i = 0; i < adev->vce.num_rings; i++)
- if (adev->vce.ring[i].ready)
+ if (adev->vce.ring[i].sched.ready)
++num_rings;
ib_start_alignment = 4;
ib_size_alignment = 1;
@@ -384,7 +385,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
continue;
for (j = 0; j < adev->uvd.num_enc_rings; j++)
- if (adev->uvd.inst[i].ring_enc[j].ready)
+ if (adev->uvd.inst[i].ring_enc[j].sched.ready)
++num_rings;
}
ib_start_alignment = 64;
@@ -392,7 +393,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
break;
case AMDGPU_HW_IP_VCN_DEC:
type = AMD_IP_BLOCK_TYPE_VCN;
- if (adev->vcn.ring_dec.ready)
+ if (adev->vcn.ring_dec.sched.ready)
++num_rings;
ib_start_alignment = 16;
ib_size_alignment = 16;
@@ -400,14 +401,14 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
case AMDGPU_HW_IP_VCN_ENC:
type = AMD_IP_BLOCK_TYPE_VCN;
for (i = 0; i < adev->vcn.num_enc_rings; i++)
- if (adev->vcn.ring_enc[i].ready)
+ if (adev->vcn.ring_enc[i].sched.ready)
++num_rings;
ib_start_alignment = 64;
ib_size_alignment = 1;
break;
case AMDGPU_HW_IP_VCN_JPEG:
type = AMD_IP_BLOCK_TYPE_VCN;
- if (adev->vcn.ring_jpeg.ready)
+ if (adev->vcn.ring_jpeg.sched.ready)
++num_rings;
ib_start_alignment = 16;
ib_size_alignment = 16;
@@ -978,7 +979,10 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
}
if (amdgpu_sriov_vf(adev)) {
- r = amdgpu_map_static_csa(adev, &fpriv->vm, &fpriv->csa_va);
+ uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
+
+ r = amdgpu_map_static_csa(adev, &fpriv->vm, adev->virt.csa_obj,
+ &fpriv->csa_va, csa_addr, AMDGPU_CSA_SIZE);
if (r)
goto error_vm;
}
@@ -1048,8 +1052,8 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
pasid = fpriv->vm.pasid;
pd = amdgpu_bo_ref(fpriv->vm.root.base.bo);
- amdgpu_vm_fini(adev, &fpriv->vm);
amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
+ amdgpu_vm_fini(adev, &fpriv->vm);
if (pasid)
amdgpu_pasid_free_delayed(pd->tbo.resv, pasid);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index e55508b39496..3e6823fdd939 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -238,44 +238,40 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
* amdgpu_mn_invalidate_range_start_gfx - callback to notify about mm change
*
* @mn: our notifier
- * @mm: the mm this callback is about
- * @start: start of updated range
- * @end: end of updated range
+ * @range: mmu notifier context
*
* Block for operations on BOs to finish and mark pages as accessed and
* potentially dirty.
*/
static int amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long start,
- unsigned long end,
- bool blockable)
+ const struct mmu_notifier_range *range)
{
struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
struct interval_tree_node *it;
+ unsigned long end;
/* notification is exclusive, but interval is inclusive */
- end -= 1;
+ end = range->end - 1;
/* TODO we should be able to split locking for interval tree and
* amdgpu_mn_invalidate_node
*/
- if (amdgpu_mn_read_lock(amn, blockable))
+ if (amdgpu_mn_read_lock(amn, range->blockable))
return -EAGAIN;
- it = interval_tree_iter_first(&amn->objects, start, end);
+ it = interval_tree_iter_first(&amn->objects, range->start, end);
while (it) {
struct amdgpu_mn_node *node;
- if (!blockable) {
+ if (!range->blockable) {
amdgpu_mn_read_unlock(amn);
return -EAGAIN;
}
node = container_of(it, struct amdgpu_mn_node, it);
- it = interval_tree_iter_next(it, start, end);
+ it = interval_tree_iter_next(it, range->start, end);
- amdgpu_mn_invalidate_node(node, start, end);
+ amdgpu_mn_invalidate_node(node, range->start, end);
}
return 0;
@@ -294,39 +290,38 @@ static int amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
* are restorted in amdgpu_mn_invalidate_range_end_hsa.
*/
static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long start,
- unsigned long end,
- bool blockable)
+ const struct mmu_notifier_range *range)
{
struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
struct interval_tree_node *it;
+ unsigned long end;
/* notification is exclusive, but interval is inclusive */
- end -= 1;
+ end = range->end - 1;
- if (amdgpu_mn_read_lock(amn, blockable))
+ if (amdgpu_mn_read_lock(amn, range->blockable))
return -EAGAIN;
- it = interval_tree_iter_first(&amn->objects, start, end);
+ it = interval_tree_iter_first(&amn->objects, range->start, end);
while (it) {
struct amdgpu_mn_node *node;
struct amdgpu_bo *bo;
- if (!blockable) {
+ if (!range->blockable) {
amdgpu_mn_read_unlock(amn);
return -EAGAIN;
}
node = container_of(it, struct amdgpu_mn_node, it);
- it = interval_tree_iter_next(it, start, end);
+ it = interval_tree_iter_next(it, range->start, end);
list_for_each_entry(bo, &node->bos, mn_list) {
struct kgd_mem *mem = bo->kfd_bo;
if (amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm,
- start, end))
- amdgpu_amdkfd_evict_userptr(mem, mm);
+ range->start,
+ end))
+ amdgpu_amdkfd_evict_userptr(mem, range->mm);
}
}
@@ -344,9 +339,7 @@ static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
* Release the lock again to allow new command submissions.
*/
static void amdgpu_mn_invalidate_range_end(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long start,
- unsigned long end)
+ const struct mmu_notifier_range *range)
{
struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index d1b4d9b6aae0..889e443eeee7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -38,7 +38,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_plane_helper.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_probe_helper.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <linux/hrtimer.h>
@@ -57,7 +57,6 @@ struct amdgpu_hpd;
#define to_amdgpu_connector(x) container_of(x, struct amdgpu_connector, base)
#define to_amdgpu_encoder(x) container_of(x, struct amdgpu_encoder, base)
#define to_amdgpu_framebuffer(x) container_of(x, struct amdgpu_framebuffer, base)
-#define to_amdgpu_plane(x) container_of(x, struct amdgpu_plane, base)
#define to_dm_plane_state(x) container_of(x, struct dm_plane_state, base);
@@ -295,13 +294,6 @@ struct amdgpu_display_funcs {
uint16_t connector_object_id,
struct amdgpu_hpd *hpd,
struct amdgpu_router *router);
- /* it is used to enter or exit into free sync mode */
- int (*notify_freesync)(struct drm_device *dev, void *data,
- struct drm_file *filp);
- /* it is used to allow enablement of freesync mode */
- int (*set_freesync_property)(struct drm_connector *connector,
- struct drm_property *property,
- uint64_t val);
};
@@ -325,7 +317,7 @@ struct amdgpu_mode_info {
struct card_info *atom_card_info;
bool mode_config_initialized;
struct amdgpu_crtc *crtcs[AMDGPU_MAX_CRTCS];
- struct amdgpu_plane *planes[AMDGPU_MAX_PLANES];
+ struct drm_plane *planes[AMDGPU_MAX_PLANES];
struct amdgpu_afmt *afmt[AMDGPU_MAX_AFMT_BLOCKS];
/* DVI-I properties */
struct drm_property *coherent_mode_property;
@@ -341,6 +333,8 @@ struct amdgpu_mode_info {
struct drm_property *dither_property;
/* maximum number of bits per channel for monitor color */
struct drm_property *max_bpc_property;
+ /* Adaptive Backlight Modulation (power feature) */
+ struct drm_property *abm_level_property;
/* hardcoded DFP edid from BIOS */
struct edid *bios_hardcoded_edid;
int bios_hardcoded_edid_size;
@@ -412,6 +406,7 @@ struct amdgpu_crtc {
struct amdgpu_flip_work *pflip_works;
enum amdgpu_flip_status pflip_status;
int deferred_flip_completion;
+ u64 last_flip_vblank;
/* pll sharing */
struct amdgpu_atom_ss ss;
bool ss_enabled;
@@ -436,11 +431,6 @@ struct amdgpu_crtc {
struct drm_pending_vblank_event *event;
};
-struct amdgpu_plane {
- struct drm_plane base;
- enum drm_plane_type plane_type;
-};
-
struct amdgpu_encoder_atom_dig {
bool linkb;
/* atom dig */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 904014dc5915..ec9e45004bff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -81,7 +81,7 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
amdgpu_bo_subtract_pin_size(bo);
if (bo->kfd_bo)
- amdgpu_amdkfd_unreserve_system_memory_limit(bo);
+ amdgpu_amdkfd_unreserve_memory_limit(bo);
amdgpu_bo_kunmap(bo);
@@ -426,12 +426,20 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
size_t acc_size;
int r;
- page_align = roundup(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT;
- if (bp->domain & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS |
- AMDGPU_GEM_DOMAIN_OA))
+ /* Note that GDS/GWS/OA allocates 1 page per byte/resource. */
+ if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
+ /* GWS and OA don't need any alignment. */
+ page_align = bp->byte_align;
size <<= PAGE_SHIFT;
- else
+ } else if (bp->domain & AMDGPU_GEM_DOMAIN_GDS) {
+ /* Both size and alignment must be a multiple of 4. */
+ page_align = ALIGN(bp->byte_align, 4);
+ size = ALIGN(size, 4) << PAGE_SHIFT;
+ } else {
+ /* Memory should be aligned at least to a page size. */
+ page_align = ALIGN(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT;
size = ALIGN(size, PAGE_SIZE);
+ }
if (!amdgpu_bo_validate_size(adev, size, bp->domain))
return -ENOMEM;
@@ -608,53 +616,6 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
}
/**
- * amdgpu_bo_backup_to_shadow - Backs up an &amdgpu_bo buffer object
- * @adev: amdgpu device object
- * @ring: amdgpu_ring for the engine handling the buffer operations
- * @bo: &amdgpu_bo buffer to be backed up
- * @resv: reservation object with embedded fence
- * @fence: dma_fence associated with the operation
- * @direct: whether to submit the job directly
- *
- * Copies an &amdgpu_bo buffer object to its shadow object.
- * Not used for now.
- *
- * Returns:
- * 0 for success or a negative error code on failure.
- */
-int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- struct amdgpu_bo *bo,
- struct reservation_object *resv,
- struct dma_fence **fence,
- bool direct)
-
-{
- struct amdgpu_bo *shadow = bo->shadow;
- uint64_t bo_addr, shadow_addr;
- int r;
-
- if (!shadow)
- return -EINVAL;
-
- bo_addr = amdgpu_bo_gpu_offset(bo);
- shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
-
- r = reservation_object_reserve_shared(bo->tbo.resv);
- if (r)
- goto err;
-
- r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr,
- amdgpu_bo_size(bo), resv, fence,
- direct, false);
- if (!r)
- amdgpu_bo_fence(bo, *fence, true);
-
-err:
- return r;
-}
-
-/**
* amdgpu_bo_validate - validate an &amdgpu_bo buffer object
* @bo: pointer to the buffer object
*
@@ -959,7 +920,7 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
struct ttm_operation_ctx ctx = { false, false };
int r, i;
- if (!bo->pin_count) {
+ if (WARN_ON_ONCE(!bo->pin_count)) {
dev_warn(adev->dev, "%p unpin not necessary\n", bo);
return 0;
}
@@ -1324,6 +1285,30 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
}
/**
+ * amdgpu_sync_wait_resv - Wait for BO reservation fences
+ *
+ * @bo: buffer object
+ * @owner: fence owner
+ * @intr: Whether the wait is interruptible
+ *
+ * Returns:
+ * 0 on success, errno otherwise.
+ */
+int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct amdgpu_sync sync;
+ int r;
+
+ amdgpu_sync_create(&sync);
+ amdgpu_sync_resv(adev, &sync, bo->tbo.resv, owner, false);
+ r = amdgpu_sync_wait(&sync, intr);
+ amdgpu_sync_free(&sync);
+
+ return r;
+}
+
+/**
* amdgpu_bo_gpu_offset - return GPU offset of bo
* @bo: amdgpu object for which we query the offset
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 7d3312d0da11..220a6a7b1bc1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -266,12 +266,8 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
bool shared);
+int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr);
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
-int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- struct amdgpu_bo *bo,
- struct reservation_object *resv,
- struct dma_fence **fence, bool direct);
int amdgpu_bo_validate(struct amdgpu_bo *bo);
int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow,
struct dma_fence **fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 59cc678de8c1..a7adb7b6bd98 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -33,6 +33,8 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/nospec.h>
+#include "hwmgr.h"
+#define WIDTH_4K 3840
static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
@@ -624,11 +626,71 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
}
/**
- * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_pcie
+ * DOC: ppfeatures
+ *
+ * The amdgpu driver provides a sysfs API for adjusting what powerplay
+ * features to be enabled. The file ppfeatures is used for this. And
+ * this is only available for Vega10 and later dGPUs.
+ *
+ * Reading back the file will show you the followings:
+ * - Current ppfeature masks
+ * - List of the all supported powerplay features with their naming,
+ * bitmasks and enablement status('Y'/'N' means "enabled"/"disabled").
+ *
+ * To manually enable or disable a specific feature, just set or clear
+ * the corresponding bit from original ppfeature masks and input the
+ * new ppfeature masks.
+ */
+static ssize_t amdgpu_set_ppfeature_status(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ uint64_t featuremask;
+ int ret;
+
+ ret = kstrtou64(buf, 0, &featuremask);
+ if (ret)
+ return -EINVAL;
+
+ pr_debug("featuremask = 0x%llx\n", featuremask);
+
+ if (adev->powerplay.pp_funcs->set_ppfeature_status) {
+ ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
+ if (ret)
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static ssize_t amdgpu_get_ppfeature_status(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+
+ if (adev->powerplay.pp_funcs->get_ppfeature_status)
+ return amdgpu_dpm_get_ppfeature_status(adev, buf);
+
+ return snprintf(buf, PAGE_SIZE, "\n");
+}
+
+/**
+ * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk
+ * pp_dpm_pcie
*
* The amdgpu driver provides a sysfs API for adjusting what power levels
* are enabled for a given power state. The files pp_dpm_sclk, pp_dpm_mclk,
- * and pp_dpm_pcie are used for this.
+ * pp_dpm_socclk, pp_dpm_fclk, pp_dpm_dcefclk and pp_dpm_pcie are used for
+ * this.
+ *
+ * pp_dpm_socclk and pp_dpm_dcefclk interfaces are only available for
+ * Vega10 and later ASICs.
+ * pp_dpm_fclk interface is only available for Vega20 and later ASICs.
*
* Reading back the files will show you the available power levels within
* the power state and the clock information for those levels.
@@ -638,6 +700,8 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
* Secondly,Enter a new value for each level by inputing a string that
* contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie"
* E.g., echo 4 5 6 to > pp_dpm_sclk will enable sclk levels 4, 5, and 6.
+ *
+ * NOTE: change to the dcefclk max dpm level is not supported now
*/
static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
@@ -748,6 +812,114 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
return count;
}
+static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+
+ if (adev->powerplay.pp_funcs->print_clock_levels)
+ return amdgpu_dpm_print_clock_levels(adev, PP_SOCCLK, buf);
+ else
+ return snprintf(buf, PAGE_SIZE, "\n");
+}
+
+static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ int ret;
+ uint32_t mask = 0;
+
+ ret = amdgpu_read_mask(buf, count, &mask);
+ if (ret)
+ return ret;
+
+ if (adev->powerplay.pp_funcs->force_clock_level)
+ ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask);
+
+ if (ret)
+ return -EINVAL;
+
+ return count;
+}
+
+static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+
+ if (adev->powerplay.pp_funcs->print_clock_levels)
+ return amdgpu_dpm_print_clock_levels(adev, PP_FCLK, buf);
+ else
+ return snprintf(buf, PAGE_SIZE, "\n");
+}
+
+static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ int ret;
+ uint32_t mask = 0;
+
+ ret = amdgpu_read_mask(buf, count, &mask);
+ if (ret)
+ return ret;
+
+ if (adev->powerplay.pp_funcs->force_clock_level)
+ ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask);
+
+ if (ret)
+ return -EINVAL;
+
+ return count;
+}
+
+static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+
+ if (adev->powerplay.pp_funcs->print_clock_levels)
+ return amdgpu_dpm_print_clock_levels(adev, PP_DCEFCLK, buf);
+ else
+ return snprintf(buf, PAGE_SIZE, "\n");
+}
+
+static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ int ret;
+ uint32_t mask = 0;
+
+ ret = amdgpu_read_mask(buf, count, &mask);
+ if (ret)
+ return ret;
+
+ if (adev->powerplay.pp_funcs->force_clock_level)
+ ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask);
+
+ if (ret)
+ return -EINVAL;
+
+ return count;
+}
+
static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -988,6 +1160,31 @@ static ssize_t amdgpu_get_busy_percent(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", value);
}
+/**
+ * DOC: pcie_bw
+ *
+ * The amdgpu driver provides a sysfs API for estimating how much data
+ * has been received and sent by the GPU in the last second through PCIe.
+ * The file pcie_bw is used for this.
+ * The Perf counters count the number of received and sent messages and return
+ * those values, as well as the maximum payload size of a PCIe packet (mps).
+ * Note that it is not possible to easily and quickly obtain the size of each
+ * packet transmitted, so we output the max payload size (mps) to allow for
+ * quick estimation of the PCIe bandwidth usage
+ */
+static ssize_t amdgpu_get_pcie_bw(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ uint64_t count0, count1;
+
+ amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
+ return snprintf(buf, PAGE_SIZE, "%llu %llu %i\n",
+ count0, count1, pcie_get_mps(adev->pdev));
+}
+
static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state);
static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
amdgpu_get_dpm_forced_performance_level,
@@ -1006,6 +1203,15 @@ static DEVICE_ATTR(pp_dpm_sclk, S_IRUGO | S_IWUSR,
static DEVICE_ATTR(pp_dpm_mclk, S_IRUGO | S_IWUSR,
amdgpu_get_pp_dpm_mclk,
amdgpu_set_pp_dpm_mclk);
+static DEVICE_ATTR(pp_dpm_socclk, S_IRUGO | S_IWUSR,
+ amdgpu_get_pp_dpm_socclk,
+ amdgpu_set_pp_dpm_socclk);
+static DEVICE_ATTR(pp_dpm_fclk, S_IRUGO | S_IWUSR,
+ amdgpu_get_pp_dpm_fclk,
+ amdgpu_set_pp_dpm_fclk);
+static DEVICE_ATTR(pp_dpm_dcefclk, S_IRUGO | S_IWUSR,
+ amdgpu_get_pp_dpm_dcefclk,
+ amdgpu_set_pp_dpm_dcefclk);
static DEVICE_ATTR(pp_dpm_pcie, S_IRUGO | S_IWUSR,
amdgpu_get_pp_dpm_pcie,
amdgpu_set_pp_dpm_pcie);
@@ -1023,6 +1229,10 @@ static DEVICE_ATTR(pp_od_clk_voltage, S_IRUGO | S_IWUSR,
amdgpu_set_pp_od_clk_voltage);
static DEVICE_ATTR(gpu_busy_percent, S_IRUGO,
amdgpu_get_busy_percent, NULL);
+static DEVICE_ATTR(pcie_bw, S_IRUGO, amdgpu_get_pcie_bw, NULL);
+static DEVICE_ATTR(ppfeatures, S_IRUGO | S_IWUSR,
+ amdgpu_get_ppfeature_status,
+ amdgpu_set_ppfeature_status);
static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
struct device_attribute *attr,
@@ -1514,6 +1724,75 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
return count;
}
+static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct amdgpu_device *adev = dev_get_drvdata(dev);
+ struct drm_device *ddev = adev->ddev;
+ uint32_t sclk;
+ int r, size = sizeof(sclk);
+
+ /* Can't get voltage when the card is off */
+ if ((adev->flags & AMD_IS_PX) &&
+ (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+ return -EINVAL;
+
+ /* sanity check PP is enabled */
+ if (!(adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->read_sensor))
+ return -EINVAL;
+
+ /* get the sclk */
+ r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
+ (void *)&sclk, &size);
+ if (r)
+ return r;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", sclk * 10 * 1000);
+}
+
+static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "sclk\n");
+}
+
+static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct amdgpu_device *adev = dev_get_drvdata(dev);
+ struct drm_device *ddev = adev->ddev;
+ uint32_t mclk;
+ int r, size = sizeof(mclk);
+
+ /* Can't get voltage when the card is off */
+ if ((adev->flags & AMD_IS_PX) &&
+ (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+ return -EINVAL;
+
+ /* sanity check PP is enabled */
+ if (!(adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->read_sensor))
+ return -EINVAL;
+
+ /* get the sclk */
+ r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
+ (void *)&mclk, &size);
+ if (r)
+ return r;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", mclk * 10 * 1000);
+}
+
+static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "mclk\n");
+}
/**
* DOC: hwmon
@@ -1530,6 +1809,10 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
*
* - GPU fan
*
+ * - GPU gfx/compute engine clock
+ *
+ * - GPU memory clock (dGPU only)
+ *
* hwmon interfaces for GPU temperature:
*
* - temp1_input: the on die GPU temperature in millidegrees Celsius
@@ -1574,6 +1857,12 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
*
* - fan[1-*]_enable: Enable or disable the sensors.1: Enable 0: Disable
*
+ * hwmon interfaces for GPU clocks:
+ *
+ * - freq1_input: the gfx/compute clock in hertz
+ *
+ * - freq2_input: the memory clock in hertz
+ *
* You can use hwmon tools like sensors to view this information on your system.
*
*/
@@ -1598,6 +1887,10 @@ static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg,
static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0);
static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
+static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0);
+static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0);
+static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0);
+static SENSOR_DEVICE_ATTR(freq2_label, S_IRUGO, amdgpu_hwmon_show_mclk_label, NULL, 0);
static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
@@ -1620,6 +1913,10 @@ static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_power1_cap_max.dev_attr.attr,
&sensor_dev_attr_power1_cap_min.dev_attr.attr,
&sensor_dev_attr_power1_cap.dev_attr.attr,
+ &sensor_dev_attr_freq1_input.dev_attr.attr,
+ &sensor_dev_attr_freq1_label.dev_attr.attr,
+ &sensor_dev_attr_freq2_input.dev_attr.attr,
+ &sensor_dev_attr_freq2_label.dev_attr.attr,
NULL
};
@@ -1642,6 +1939,19 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
return 0;
+ /* Skip fan attributes on APU */
+ if ((adev->flags & AMD_IS_APU) &&
+ (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
+ attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
+ attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
+ attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
+ attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
+ attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
+ attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
+ return 0;
+
/* Skip limit attributes if DPM is not enabled */
if (!adev->pm.dpm_enabled &&
(attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
@@ -1671,7 +1981,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
effective_mode &= ~S_IWUSR;
if ((adev->flags & AMD_IS_APU) &&
- (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
+ (attr == &sensor_dev_attr_power1_average.dev_attr.attr ||
+ attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
attr == &sensor_dev_attr_power1_cap.dev_attr.attr))
return 0;
@@ -1697,6 +2008,12 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
attr == &sensor_dev_attr_in1_label.dev_attr.attr))
return 0;
+ /* no mclk on APUs */
+ if ((adev->flags & AMD_IS_APU) &&
+ (attr == &sensor_dev_attr_freq2_input.dev_attr.attr ||
+ attr == &sensor_dev_attr_freq2_label.dev_attr.attr))
+ return 0;
+
return effective_mode;
}
@@ -1956,6 +2273,17 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
mutex_unlock(&adev->pm.mutex);
}
+ /* enable/disable Low Memory PState for UVD (4k videos) */
+ if (adev->asic_type == CHIP_STONEY &&
+ adev->uvd.decode_image_width >= WIDTH_4K) {
+ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+
+ if (hwmgr && hwmgr->hwmgr_func &&
+ hwmgr->hwmgr_func->update_nbdpm_pstate)
+ hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr,
+ !enable,
+ true);
+ }
}
void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
@@ -1982,6 +2310,7 @@ void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
{
+ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
int ret;
if (adev->pm.sysfs_initialized)
@@ -2043,6 +2372,25 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
DRM_ERROR("failed to create device file pp_dpm_mclk\n");
return ret;
}
+ if (adev->asic_type >= CHIP_VEGA10) {
+ ret = device_create_file(adev->dev, &dev_attr_pp_dpm_socclk);
+ if (ret) {
+ DRM_ERROR("failed to create device file pp_dpm_socclk\n");
+ return ret;
+ }
+ ret = device_create_file(adev->dev, &dev_attr_pp_dpm_dcefclk);
+ if (ret) {
+ DRM_ERROR("failed to create device file pp_dpm_dcefclk\n");
+ return ret;
+ }
+ }
+ if (adev->asic_type >= CHIP_VEGA20) {
+ ret = device_create_file(adev->dev, &dev_attr_pp_dpm_fclk);
+ if (ret) {
+ DRM_ERROR("failed to create device file pp_dpm_fclk\n");
+ return ret;
+ }
+ }
ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie);
if (ret) {
DRM_ERROR("failed to create device file pp_dpm_pcie\n");
@@ -2065,12 +2413,14 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
"pp_power_profile_mode\n");
return ret;
}
- ret = device_create_file(adev->dev,
- &dev_attr_pp_od_clk_voltage);
- if (ret) {
- DRM_ERROR("failed to create device file "
- "pp_od_clk_voltage\n");
- return ret;
+ if (hwmgr->od_enabled) {
+ ret = device_create_file(adev->dev,
+ &dev_attr_pp_od_clk_voltage);
+ if (ret) {
+ DRM_ERROR("failed to create device file "
+ "pp_od_clk_voltage\n");
+ return ret;
+ }
}
ret = device_create_file(adev->dev,
&dev_attr_gpu_busy_percent);
@@ -2079,12 +2429,31 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
"gpu_busy_level\n");
return ret;
}
+ /* PCIe Perf counters won't work on APU nodes */
+ if (!(adev->flags & AMD_IS_APU)) {
+ ret = device_create_file(adev->dev, &dev_attr_pcie_bw);
+ if (ret) {
+ DRM_ERROR("failed to create device file pcie_bw\n");
+ return ret;
+ }
+ }
ret = amdgpu_debugfs_pm_init(adev);
if (ret) {
DRM_ERROR("Failed to register debugfs file for dpm!\n");
return ret;
}
+ if ((adev->asic_type >= CHIP_VEGA10) &&
+ !(adev->flags & AMD_IS_APU)) {
+ ret = device_create_file(adev->dev,
+ &dev_attr_ppfeatures);
+ if (ret) {
+ DRM_ERROR("failed to create device file "
+ "ppfeatures\n");
+ return ret;
+ }
+ }
+
adev->pm.sysfs_initialized = true;
return 0;
@@ -2092,6 +2461,8 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
{
+ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+
if (adev->pm.dpm_enabled == 0)
return;
@@ -2107,14 +2478,26 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk);
device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk);
+ if (adev->asic_type >= CHIP_VEGA10) {
+ device_remove_file(adev->dev, &dev_attr_pp_dpm_socclk);
+ device_remove_file(adev->dev, &dev_attr_pp_dpm_dcefclk);
+ }
device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie);
+ if (adev->asic_type >= CHIP_VEGA20)
+ device_remove_file(adev->dev, &dev_attr_pp_dpm_fclk);
device_remove_file(adev->dev, &dev_attr_pp_sclk_od);
device_remove_file(adev->dev, &dev_attr_pp_mclk_od);
device_remove_file(adev->dev,
&dev_attr_pp_power_profile_mode);
- device_remove_file(adev->dev,
- &dev_attr_pp_od_clk_voltage);
+ if (hwmgr->od_enabled)
+ device_remove_file(adev->dev,
+ &dev_attr_pp_od_clk_voltage);
device_remove_file(adev->dev, &dev_attr_gpu_busy_percent);
+ if (!(adev->flags & AMD_IS_APU))
+ device_remove_file(adev->dev, &dev_attr_pcie_bw);
+ if ((adev->asic_type >= CHIP_VEGA10) &&
+ !(adev->flags & AMD_IS_APU))
+ device_remove_file(adev->dev, &dev_attr_ppfeatures);
}
void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
@@ -2129,7 +2512,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
- if (ring && ring->ready)
+ if (ring && ring->sched.ready)
amdgpu_fence_wait_empty(ring);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index e45e929aaab5..a38e0fb4a6fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -38,8 +38,7 @@
#include "amdgpu_gem.h"
#include <drm/amdgpu_drm.h>
#include <linux/dma-buf.h>
-
-static const struct dma_buf_ops amdgpu_dmabuf_ops;
+#include <linux/dma-fence-array.h>
/**
* amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table
@@ -189,6 +188,48 @@ error:
return ERR_PTR(ret);
}
+static int
+__reservation_object_make_exclusive(struct reservation_object *obj)
+{
+ struct dma_fence **fences;
+ unsigned int count;
+ int r;
+
+ if (!reservation_object_get_list(obj)) /* no shared fences to convert */
+ return 0;
+
+ r = reservation_object_get_fences_rcu(obj, NULL, &count, &fences);
+ if (r)
+ return r;
+
+ if (count == 0) {
+ /* Now that was unexpected. */
+ } else if (count == 1) {
+ reservation_object_add_excl_fence(obj, fences[0]);
+ dma_fence_put(fences[0]);
+ kfree(fences);
+ } else {
+ struct dma_fence_array *array;
+
+ array = dma_fence_array_create(count, fences,
+ dma_fence_context_alloc(1), 0,
+ false);
+ if (!array)
+ goto err_fences_put;
+
+ reservation_object_add_excl_fence(obj, &array->base);
+ dma_fence_put(&array->base);
+ }
+
+ return 0;
+
+err_fences_put:
+ while (count--)
+ dma_fence_put(fences[count]);
+ kfree(fences);
+ return -ENOMEM;
+}
+
/**
* amdgpu_gem_map_attach - &dma_buf_ops.attach implementation
* @dma_buf: Shared DMA buffer
@@ -220,16 +261,16 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
if (attach->dev->driver != adev->dev->driver) {
/*
- * Wait for all shared fences to complete before we switch to future
- * use of exclusive fence on this prime shared bo.
+ * We only create shared fences for internal use, but importers
+ * of the dmabuf rely on exclusive fences for implicitly
+ * tracking write hazards. As any of the current fences may
+ * correspond to a write, we need to convert all existing
+ * fences on the reservation object into a single exclusive
+ * fence.
*/
- r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
- true, false,
- MAX_SCHEDULE_TIMEOUT);
- if (unlikely(r < 0)) {
- DRM_DEBUG_PRIME("Fence wait failed: %li\n", r);
+ r = __reservation_object_make_exclusive(bo->tbo.resv);
+ if (r)
goto error_unreserve;
- }
}
/* pin buffer into GTT */
@@ -332,15 +373,13 @@ static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf,
return ret;
}
-static const struct dma_buf_ops amdgpu_dmabuf_ops = {
+const struct dma_buf_ops amdgpu_dmabuf_ops = {
.attach = amdgpu_gem_map_attach,
.detach = amdgpu_gem_map_detach,
.map_dma_buf = drm_gem_map_dma_buf,
.unmap_dma_buf = drm_gem_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
.begin_cpu_access = amdgpu_gem_begin_cpu_access,
- .map = drm_gem_dmabuf_kmap,
- .unmap = drm_gem_dmabuf_kunmap,
.mmap = drm_gem_dmabuf_mmap,
.vmap = drm_gem_dmabuf_vmap,
.vunmap = drm_gem_dmabuf_vunmap,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 25d2f3e757f1..3091488cd8cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -67,9 +67,6 @@ static int psp_sw_init(void *handle)
psp->adev = adev;
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
- return 0;
-
ret = psp_init_microcode(psp);
if (ret) {
DRM_ERROR("Failed to load psp firmware!\n");
@@ -83,13 +80,14 @@ static int psp_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
- return 0;
-
release_firmware(adev->psp.sos_fw);
adev->psp.sos_fw = NULL;
release_firmware(adev->psp.asd_fw);
adev->psp.asd_fw = NULL;
+ if (adev->psp.ta_fw) {
+ release_firmware(adev->psp.ta_fw);
+ adev->psp.ta_fw = NULL;
+ }
return 0;
}
@@ -118,29 +116,44 @@ int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
static int
psp_cmd_submit_buf(struct psp_context *psp,
struct amdgpu_firmware_info *ucode,
- struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr,
- int index)
+ struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr)
{
int ret;
+ int index;
memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
+ index = atomic_inc_return(&psp->fence_value);
ret = psp_cmd_submit(psp, ucode, psp->cmd_buf_mc_addr,
fence_mc_addr, index);
+ if (ret) {
+ atomic_dec(&psp->fence_value);
+ return ret;
+ }
- while (*((unsigned int *)psp->fence_buf) != index) {
+ while (*((unsigned int *)psp->fence_buf) != index)
msleep(1);
- }
- /* the status field must be 0 after FW is loaded */
- if (ucode && psp->cmd_buf_mem->resp.status) {
- DRM_ERROR("failed loading with status (%d) and ucode id (%d)\n",
- psp->cmd_buf_mem->resp.status, ucode->ucode_id);
- return -EINVAL;
+ /* In some cases, psp response status is not 0 even there is no
+ * problem while the command is submitted. Some version of PSP FW
+ * doesn't write 0 to that field.
+ * So here we would like to only print a warning instead of an error
+ * during psp initialization to avoid breaking hw_init and it doesn't
+ * return -EINVAL.
+ */
+ if (psp->cmd_buf_mem->resp.status) {
+ if (ucode)
+ DRM_WARN("failed to load ucode id (%d) ",
+ ucode->ucode_id);
+ DRM_WARN("psp command failed and response status is (%d)\n",
+ psp->cmd_buf_mem->resp.status);
}
+ /* get xGMI session id from response buffer */
+ cmd->resp.session_id = psp->cmd_buf_mem->resp.session_id;
+
if (ucode) {
ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo;
ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi;
@@ -149,10 +162,14 @@ psp_cmd_submit_buf(struct psp_context *psp,
return ret;
}
-static void psp_prep_tmr_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
+ struct psp_gfx_cmd_resp *cmd,
uint64_t tmr_mc, uint32_t size)
{
- cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
+ if (psp_support_vmr_ring(psp))
+ cmd->cmd_id = GFX_CMD_ID_SETUP_VMR;
+ else
+ cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc);
cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc);
cmd->cmd.cmd_setup_tmr.buf_size = size;
@@ -186,12 +203,12 @@ static int psp_tmr_load(struct psp_context *psp)
if (!cmd)
return -ENOMEM;
- psp_prep_tmr_cmd_buf(cmd, psp->tmr_mc_addr, PSP_TMR_SIZE);
+ psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, PSP_TMR_SIZE);
DRM_INFO("reserve 0x%x from 0x%llx for PSP TMR SIZE\n",
PSP_TMR_SIZE, psp->tmr_mc_addr);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
- psp->fence_buf_mc_addr, 1);
+ psp->fence_buf_mc_addr);
if (ret)
goto failed;
@@ -258,13 +275,197 @@ static int psp_asd_load(struct psp_context *psp)
psp->asd_ucode_size, PSP_ASD_SHARED_MEM_SIZE);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
- psp->fence_buf_mc_addr, 2);
+ psp->fence_buf_mc_addr);
kfree(cmd);
return ret;
}
+static void psp_prep_xgmi_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint64_t xgmi_ta_mc, uint64_t xgmi_mc_shared,
+ uint32_t xgmi_ta_size, uint32_t shared_size)
+{
+ cmd->cmd_id = GFX_CMD_ID_LOAD_TA;
+ cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(xgmi_ta_mc);
+ cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(xgmi_ta_mc);
+ cmd->cmd.cmd_load_ta.app_len = xgmi_ta_size;
+
+ cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(xgmi_mc_shared);
+ cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(xgmi_mc_shared);
+ cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size;
+}
+
+static int psp_xgmi_init_shared_buf(struct psp_context *psp)
+{
+ int ret;
+
+ /*
+ * Allocate 16k memory aligned to 4k from Frame Buffer (local
+ * physical) for xgmi ta <-> Driver
+ */
+ ret = amdgpu_bo_create_kernel(psp->adev, PSP_XGMI_SHARED_MEM_SIZE,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+ &psp->xgmi_context.xgmi_shared_bo,
+ &psp->xgmi_context.xgmi_shared_mc_addr,
+ &psp->xgmi_context.xgmi_shared_buf);
+
+ return ret;
+}
+
+static int psp_xgmi_load(struct psp_context *psp)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ /*
+ * TODO: bypass the loading in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ memset(psp->fw_pri_buf, 0, PSP_1_MEG);
+ memcpy(psp->fw_pri_buf, psp->ta_xgmi_start_addr, psp->ta_xgmi_ucode_size);
+
+ psp_prep_xgmi_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
+ psp->xgmi_context.xgmi_shared_mc_addr,
+ psp->ta_xgmi_ucode_size, PSP_XGMI_SHARED_MEM_SIZE);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd,
+ psp->fence_buf_mc_addr);
+
+ if (!ret) {
+ psp->xgmi_context.initialized = 1;
+ psp->xgmi_context.session_id = cmd->resp.session_id;
+ }
+
+ kfree(cmd);
+
+ return ret;
+}
+
+static void psp_prep_xgmi_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint32_t xgmi_session_id)
+{
+ cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
+ cmd->cmd.cmd_unload_ta.session_id = xgmi_session_id;
+}
+
+static int psp_xgmi_unload(struct psp_context *psp)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ /*
+ * TODO: bypass the unloading in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ psp_prep_xgmi_ta_unload_cmd_buf(cmd, psp->xgmi_context.session_id);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd,
+ psp->fence_buf_mc_addr);
+
+ kfree(cmd);
+
+ return ret;
+}
+
+static void psp_prep_xgmi_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint32_t ta_cmd_id,
+ uint32_t xgmi_session_id)
+{
+ cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
+ cmd->cmd.cmd_invoke_cmd.session_id = xgmi_session_id;
+ cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
+ /* Note: cmd_invoke_cmd.buf is not used for now */
+}
+
+int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ /*
+ * TODO: bypass the loading in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ psp_prep_xgmi_ta_invoke_cmd_buf(cmd, ta_cmd_id,
+ psp->xgmi_context.session_id);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd,
+ psp->fence_buf_mc_addr);
+
+ kfree(cmd);
+
+ return ret;
+}
+
+static int psp_xgmi_terminate(struct psp_context *psp)
+{
+ int ret;
+
+ if (!psp->xgmi_context.initialized)
+ return 0;
+
+ ret = psp_xgmi_unload(psp);
+ if (ret)
+ return ret;
+
+ psp->xgmi_context.initialized = 0;
+
+ /* free xgmi shared memory */
+ amdgpu_bo_free_kernel(&psp->xgmi_context.xgmi_shared_bo,
+ &psp->xgmi_context.xgmi_shared_mc_addr,
+ &psp->xgmi_context.xgmi_shared_buf);
+
+ return 0;
+}
+
+static int psp_xgmi_initialize(struct psp_context *psp)
+{
+ struct ta_xgmi_shared_memory *xgmi_cmd;
+ int ret;
+
+ if (!psp->adev->psp.ta_fw)
+ return -ENOENT;
+
+ if (!psp->xgmi_context.initialized) {
+ ret = psp_xgmi_init_shared_buf(psp);
+ if (ret)
+ return ret;
+ }
+
+ /* Load XGMI TA */
+ ret = psp_xgmi_load(psp);
+ if (ret)
+ return ret;
+
+ /* Initialize XGMI session */
+ xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.xgmi_shared_buf);
+ memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+ xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE;
+
+ ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
+
+ return ret;
+}
+
static int psp_hw_start(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -292,9 +493,110 @@ static int psp_hw_start(struct psp_context *psp)
if (ret)
return ret;
+ if (adev->gmc.xgmi.num_physical_nodes > 1) {
+ ret = psp_xgmi_initialize(psp);
+ /* Warning the XGMI seesion initialize failure
+ * Instead of stop driver initialization
+ */
+ if (ret)
+ dev_err(psp->adev->dev,
+ "XGMI: Failed to initialize XGMI session\n");
+ }
+ return 0;
+}
+
+static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
+ enum psp_gfx_fw_type *type)
+{
+ switch (ucode->ucode_id) {
+ case AMDGPU_UCODE_ID_SDMA0:
+ *type = GFX_FW_TYPE_SDMA0;
+ break;
+ case AMDGPU_UCODE_ID_SDMA1:
+ *type = GFX_FW_TYPE_SDMA1;
+ break;
+ case AMDGPU_UCODE_ID_CP_CE:
+ *type = GFX_FW_TYPE_CP_CE;
+ break;
+ case AMDGPU_UCODE_ID_CP_PFP:
+ *type = GFX_FW_TYPE_CP_PFP;
+ break;
+ case AMDGPU_UCODE_ID_CP_ME:
+ *type = GFX_FW_TYPE_CP_ME;
+ break;
+ case AMDGPU_UCODE_ID_CP_MEC1:
+ *type = GFX_FW_TYPE_CP_MEC;
+ break;
+ case AMDGPU_UCODE_ID_CP_MEC1_JT:
+ *type = GFX_FW_TYPE_CP_MEC_ME1;
+ break;
+ case AMDGPU_UCODE_ID_CP_MEC2:
+ *type = GFX_FW_TYPE_CP_MEC;
+ break;
+ case AMDGPU_UCODE_ID_CP_MEC2_JT:
+ *type = GFX_FW_TYPE_CP_MEC_ME2;
+ break;
+ case AMDGPU_UCODE_ID_RLC_G:
+ *type = GFX_FW_TYPE_RLC_G;
+ break;
+ case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
+ *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL;
+ break;
+ case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
+ *type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM;
+ break;
+ case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
+ *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM;
+ break;
+ case AMDGPU_UCODE_ID_SMC:
+ *type = GFX_FW_TYPE_SMU;
+ break;
+ case AMDGPU_UCODE_ID_UVD:
+ *type = GFX_FW_TYPE_UVD;
+ break;
+ case AMDGPU_UCODE_ID_UVD1:
+ *type = GFX_FW_TYPE_UVD1;
+ break;
+ case AMDGPU_UCODE_ID_VCE:
+ *type = GFX_FW_TYPE_VCE;
+ break;
+ case AMDGPU_UCODE_ID_VCN:
+ *type = GFX_FW_TYPE_VCN;
+ break;
+ case AMDGPU_UCODE_ID_DMCU_ERAM:
+ *type = GFX_FW_TYPE_DMCU_ERAM;
+ break;
+ case AMDGPU_UCODE_ID_DMCU_INTV:
+ *type = GFX_FW_TYPE_DMCU_ISR;
+ break;
+ case AMDGPU_UCODE_ID_MAXIMUM:
+ default:
+ return -EINVAL;
+ }
+
return 0;
}
+static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode,
+ struct psp_gfx_cmd_resp *cmd)
+{
+ int ret;
+ uint64_t fw_mem_mc_addr = ucode->mc_addr;
+
+ memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
+
+ cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
+ cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr);
+ cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr);
+ cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size;
+
+ ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type);
+ if (ret)
+ DRM_ERROR("Unknown firmware type\n");
+
+ return ret;
+}
+
static int psp_np_fw_load(struct psp_context *psp)
{
int i, ret;
@@ -316,12 +618,12 @@ static int psp_np_fw_load(struct psp_context *psp)
/*skip ucode loading in SRIOV VF */
continue;
- ret = psp_prep_cmd_buf(ucode, psp->cmd);
+ ret = psp_prep_load_ip_fw_cmd_buf(ucode, psp->cmd);
if (ret)
return ret;
ret = psp_cmd_submit_buf(psp, ucode, psp->cmd,
- psp->fence_buf_mc_addr, i + 3);
+ psp->fence_buf_mc_addr);
if (ret)
return ret;
@@ -340,8 +642,10 @@ static int psp_load_fw(struct amdgpu_device *adev)
int ret;
struct psp_context *psp = &adev->psp;
- if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset != 0)
+ if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset) {
+ psp_ring_stop(psp, PSP_RING_TYPE__KM); /* should not destroy ring, only stop */
goto skip_memalloc;
+ }
psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
if (!psp->cmd)
@@ -416,10 +720,6 @@ static int psp_hw_init(void *handle)
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
- return 0;
-
mutex_lock(&adev->firmware.mutex);
/*
* This sequence is just used on hw_init only once, no need on
@@ -449,8 +749,9 @@ static int psp_hw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct psp_context *psp = &adev->psp;
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
- return 0;
+ if (adev->gmc.xgmi.num_physical_nodes > 1 &&
+ psp->xgmi_context.initialized == 1)
+ psp_xgmi_terminate(psp);
psp_ring_destroy(psp, PSP_RING_TYPE__KM);
@@ -476,8 +777,14 @@ static int psp_suspend(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct psp_context *psp = &adev->psp;
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
- return 0;
+ if (adev->gmc.xgmi.num_physical_nodes > 1 &&
+ psp->xgmi_context.initialized == 1) {
+ ret = psp_xgmi_terminate(psp);
+ if (ret) {
+ DRM_ERROR("Failed to terminate xgmi ta\n");
+ return ret;
+ }
+ }
ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
if (ret) {
@@ -494,9 +801,6 @@ static int psp_resume(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct psp_context *psp = &adev->psp;
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
- return 0;
-
DRM_INFO("PSP is resuming...\n");
mutex_lock(&adev->firmware.mutex);
@@ -532,11 +836,6 @@ static bool psp_check_fw_loading_status(struct amdgpu_device *adev,
{
struct amdgpu_firmware_info *ucode = NULL;
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
- DRM_INFO("firmware is not loaded by PSP\n");
- return true;
- }
-
if (!adev->firmware.fw_size)
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 8b8720e9c3f0..2ef98cc755d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -27,14 +27,17 @@
#include "amdgpu.h"
#include "psp_gfx_if.h"
+#include "ta_xgmi_if.h"
#define PSP_FENCE_BUFFER_SIZE 0x1000
#define PSP_CMD_BUFFER_SIZE 0x1000
-#define PSP_ASD_SHARED_MEM_SIZE 0x4000
+#define PSP_ASD_SHARED_MEM_SIZE 0x4000
+#define PSP_XGMI_SHARED_MEM_SIZE 0x4000
#define PSP_1_MEG 0x100000
#define PSP_TMR_SIZE 0x400000
struct psp_context;
+struct psp_xgmi_node_info;
struct psp_xgmi_topology_info;
enum psp_ring_type
@@ -62,8 +65,6 @@ struct psp_funcs
int (*init_microcode)(struct psp_context *psp);
int (*bootloader_load_sysdrv)(struct psp_context *psp);
int (*bootloader_load_sos)(struct psp_context *psp);
- int (*prep_cmd_buf)(struct amdgpu_firmware_info *ucode,
- struct psp_gfx_cmd_resp *cmd);
int (*ring_init)(struct psp_context *psp, enum psp_ring_type ring_type);
int (*ring_create)(struct psp_context *psp,
enum psp_ring_type ring_type);
@@ -80,12 +81,21 @@ struct psp_funcs
enum AMDGPU_UCODE_ID ucode_type);
bool (*smu_reload_quirk)(struct psp_context *psp);
int (*mode1_reset)(struct psp_context *psp);
- uint64_t (*xgmi_get_device_id)(struct psp_context *psp);
- uint64_t (*xgmi_get_hive_id)(struct psp_context *psp);
+ int (*xgmi_get_node_id)(struct psp_context *psp, uint64_t *node_id);
+ int (*xgmi_get_hive_id)(struct psp_context *psp, uint64_t *hive_id);
int (*xgmi_get_topology_info)(struct psp_context *psp, int number_devices,
- struct psp_xgmi_topology_info *topology);
+ struct psp_xgmi_topology_info *topology);
int (*xgmi_set_topology_info)(struct psp_context *psp, int number_devices,
- struct psp_xgmi_topology_info *topology);
+ struct psp_xgmi_topology_info *topology);
+ bool (*support_vmr_ring)(struct psp_context *psp);
+};
+
+struct psp_xgmi_context {
+ uint8_t initialized;
+ uint32_t session_id;
+ struct amdgpu_bo *xgmi_shared_bo;
+ uint64_t xgmi_shared_mc_addr;
+ void *xgmi_shared_buf;
};
struct psp_context
@@ -96,7 +106,7 @@ struct psp_context
const struct psp_funcs *funcs;
- /* fence buffer */
+ /* firmware buffer */
struct amdgpu_bo *fw_pri_bo;
uint64_t fw_pri_mc_addr;
void *fw_pri_buf;
@@ -134,6 +144,16 @@ struct psp_context
struct amdgpu_bo *cmd_buf_bo;
uint64_t cmd_buf_mc_addr;
struct psp_gfx_cmd_resp *cmd_buf_mem;
+
+ /* fence value associated with cmd buffer */
+ atomic_t fence_value;
+
+ /* xgmi ta firmware and buffer */
+ const struct firmware *ta_fw;
+ uint32_t ta_xgmi_ucode_version;
+ uint32_t ta_xgmi_ucode_size;
+ uint8_t *ta_xgmi_start_addr;
+ struct psp_xgmi_context xgmi_context;
};
struct amdgpu_psp_funcs {
@@ -141,24 +161,19 @@ struct amdgpu_psp_funcs {
enum AMDGPU_UCODE_ID);
};
+#define AMDGPU_XGMI_MAX_CONNECTED_NODES 64
+struct psp_xgmi_node_info {
+ uint64_t node_id;
+ uint8_t num_hops;
+ uint8_t is_sharing_enabled;
+ enum ta_xgmi_assigned_sdma_engine sdma_engine;
+};
+
struct psp_xgmi_topology_info {
- /* Generated by PSP to identify the GPU instance within xgmi connection */
- uint64_t device_id;
- /*
- * If all bits set to 0 , driver indicates it wants to retrieve the xgmi
- * connection vector topology, but not access enable the connections
- * if some or all bits are set to 1, driver indicates it want to retrieve the
- * current xgmi topology and access enable the link to GPU[i] associated
- * with the bit position in the vector.
- * On return,: bits indicated which xgmi links are present/active depending
- * on the value passed in. The relative bit offset for the relative GPU index
- * within the hive is always marked active.
- */
- uint32_t connection_mask;
- uint32_t reserved; /* must be 0 */
+ uint32_t num_nodes;
+ struct psp_xgmi_node_info nodes[AMDGPU_XGMI_MAX_CONNECTED_NODES];
};
-#define psp_prep_cmd_buf(ucode, type) (psp)->funcs->prep_cmd_buf((ucode), (type))
#define psp_ring_init(psp, type) (psp)->funcs->ring_init((psp), (type))
#define psp_ring_create(psp, type) (psp)->funcs->ring_create((psp), (type))
#define psp_ring_stop(psp, type) (psp)->funcs->ring_stop((psp), (type))
@@ -175,12 +190,14 @@ struct psp_xgmi_topology_info {
((psp)->funcs->bootloader_load_sos ? (psp)->funcs->bootloader_load_sos((psp)) : 0)
#define psp_smu_reload_quirk(psp) \
((psp)->funcs->smu_reload_quirk ? (psp)->funcs->smu_reload_quirk((psp)) : false)
+#define psp_support_vmr_ring(psp) \
+ ((psp)->funcs->support_vmr_ring ? (psp)->funcs->support_vmr_ring((psp)) : false)
#define psp_mode1_reset(psp) \
((psp)->funcs->mode1_reset ? (psp)->funcs->mode1_reset((psp)) : false)
-#define psp_xgmi_get_device_id(psp) \
- ((psp)->funcs->xgmi_get_device_id ? (psp)->funcs->xgmi_get_device_id((psp)) : 0)
-#define psp_xgmi_get_hive_id(psp) \
- ((psp)->funcs->xgmi_get_hive_id ? (psp)->funcs->xgmi_get_hive_id((psp)) : 0)
+#define psp_xgmi_get_node_id(psp, node_id) \
+ ((psp)->funcs->xgmi_get_node_id ? (psp)->funcs->xgmi_get_node_id((psp), (node_id)) : -EINVAL)
+#define psp_xgmi_get_hive_id(psp, hive_id) \
+ ((psp)->funcs->xgmi_get_hive_id ? (psp)->funcs->xgmi_get_hive_id((psp), (hive_id)) : -EINVAL)
#define psp_xgmi_get_topology_info(psp, num_device, topology) \
((psp)->funcs->xgmi_get_topology_info ? \
(psp)->funcs->xgmi_get_topology_info((psp), (num_device), (topology)) : -EINVAL)
@@ -199,6 +216,7 @@ extern int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
extern const struct amdgpu_ip_block_version psp_v10_0_ip_block;
int psp_gpu_reset(struct amdgpu_device *adev);
+int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
extern const struct amdgpu_ip_block_version psp_v11_0_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index b70e85ec147d..335a0edf114b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -338,7 +338,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
*/
void amdgpu_ring_fini(struct amdgpu_ring *ring)
{
- ring->ready = false;
+ ring->sched.ready = false;
/* Not to finish a ring which is not initialized */
if (!(ring->adev) || !(ring->adev->rings[ring->idx]))
@@ -397,7 +397,7 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
{
ktime_t deadline = ktime_add_us(ktime_get(), 10000);
- if (!ring->funcs->soft_recovery)
+ if (!ring->funcs->soft_recovery || !fence)
return false;
atomic_inc(&ring->adev->gpu_reset_counter);
@@ -500,3 +500,29 @@ static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring)
debugfs_remove(ring->ent);
#endif
}
+
+/**
+ * amdgpu_ring_test_helper - tests ring and set sched readiness status
+ *
+ * @ring: ring to try the recovery on
+ *
+ * Tests ring and set sched readiness status
+ *
+ * Returns 0 on success, error on failure.
+ */
+int amdgpu_ring_test_helper(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int r;
+
+ r = amdgpu_ring_test_ring(ring);
+ if (r)
+ DRM_DEV_ERROR(adev->dev, "ring %s test failed (%d)\n",
+ ring->name, r);
+ else
+ DRM_DEV_DEBUG(adev->dev, "ring test on %s succeeded\n",
+ ring->name);
+
+ ring->sched.ready = !r;
+ return r;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 4caa301ce454..d7fae2676269 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -29,7 +29,7 @@
#include <drm/drm_print.h>
/* max number of rings */
-#define AMDGPU_MAX_RINGS 21
+#define AMDGPU_MAX_RINGS 23
#define AMDGPU_MAX_GFX_RINGS 1
#define AMDGPU_MAX_COMPUTE_RINGS 8
#define AMDGPU_MAX_VCE_RINGS 3
@@ -129,8 +129,9 @@ struct amdgpu_ring_funcs {
unsigned emit_ib_size;
/* command emit functions */
void (*emit_ib)(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch);
+ uint32_t flags);
void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
uint64_t seq, unsigned flags);
void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
@@ -189,7 +190,6 @@ struct amdgpu_ring {
uint64_t gpu_addr;
uint64_t ptr_mask;
uint32_t buf_mask;
- bool ready;
u32 idx;
u32 me;
u32 pipe;
@@ -229,7 +229,7 @@ struct amdgpu_ring {
#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
-#define amdgpu_ring_emit_ib(r, ib, vmid, c) (r)->funcs->emit_ib((r), (ib), (vmid), (c))
+#define amdgpu_ring_emit_ib(r, job, ib, flags) ((r)->funcs->emit_ib((r), (job), (ib), (flags)))
#define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r))
#define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
#define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
@@ -313,4 +313,6 @@ static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring,
ring->count_dw -= count_dw;
}
+int amdgpu_ring_test_helper(struct amdgpu_ring *ring);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
new file mode 100644
index 000000000000..c8793e6cc3c5
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
@@ -0,0 +1,282 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/firmware.h>
+#include "amdgpu.h"
+#include "amdgpu_gfx.h"
+#include "amdgpu_rlc.h"
+
+/**
+ * amdgpu_gfx_rlc_enter_safe_mode - Set RLC into safe mode
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set RLC enter into safe mode if RLC is enabled and haven't in safe mode.
+ */
+void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev)
+{
+ if (adev->gfx.rlc.in_safe_mode)
+ return;
+
+ /* if RLC is not enabled, do nothing */
+ if (!adev->gfx.rlc.funcs->is_rlc_enabled(adev))
+ return;
+
+ if (adev->cg_flags &
+ (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_3D_CGCG)) {
+ adev->gfx.rlc.funcs->set_safe_mode(adev);
+ adev->gfx.rlc.in_safe_mode = true;
+ }
+}
+
+/**
+ * amdgpu_gfx_rlc_exit_safe_mode - Set RLC out of safe mode
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set RLC exit safe mode if RLC is enabled and have entered into safe mode.
+ */
+void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev)
+{
+ if (!(adev->gfx.rlc.in_safe_mode))
+ return;
+
+ /* if RLC is not enabled, do nothing */
+ if (!adev->gfx.rlc.funcs->is_rlc_enabled(adev))
+ return;
+
+ if (adev->cg_flags &
+ (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_3D_CGCG)) {
+ adev->gfx.rlc.funcs->unset_safe_mode(adev);
+ adev->gfx.rlc.in_safe_mode = false;
+ }
+}
+
+/**
+ * amdgpu_gfx_rlc_init_sr - Init save restore block
+ *
+ * @adev: amdgpu_device pointer
+ * @dws: the size of save restore block
+ *
+ * Allocate and setup value to save restore block of rlc.
+ * Returns 0 on succeess or negative error code if allocate failed.
+ */
+int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws)
+{
+ const u32 *src_ptr;
+ volatile u32 *dst_ptr;
+ u32 i;
+ int r;
+
+ /* allocate save restore block */
+ r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.rlc.save_restore_obj,
+ &adev->gfx.rlc.save_restore_gpu_addr,
+ (void **)&adev->gfx.rlc.sr_ptr);
+ if (r) {
+ dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
+ amdgpu_gfx_rlc_fini(adev);
+ return r;
+ }
+
+ /* write the sr buffer */
+ src_ptr = adev->gfx.rlc.reg_list;
+ dst_ptr = adev->gfx.rlc.sr_ptr;
+ for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
+ dst_ptr[i] = cpu_to_le32(src_ptr[i]);
+ amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
+ amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
+
+ return 0;
+}
+
+/**
+ * amdgpu_gfx_rlc_init_csb - Init clear state block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate and setup value to clear state block of rlc.
+ * Returns 0 on succeess or negative error code if allocate failed.
+ */
+int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev)
+{
+ volatile u32 *dst_ptr;
+ u32 dws;
+ int r;
+
+ /* allocate clear state block */
+ adev->gfx.rlc.clear_state_size = dws = adev->gfx.rlc.funcs->get_csb_size(adev);
+ r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.rlc.clear_state_obj,
+ &adev->gfx.rlc.clear_state_gpu_addr,
+ (void **)&adev->gfx.rlc.cs_ptr);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to create rlc csb bo\n", r);
+ amdgpu_gfx_rlc_fini(adev);
+ return r;
+ }
+
+ /* set up the cs buffer */
+ dst_ptr = adev->gfx.rlc.cs_ptr;
+ adev->gfx.rlc.funcs->get_csb_buffer(adev, dst_ptr);
+ amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
+ amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
+ amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+
+ return 0;
+}
+
+/**
+ * amdgpu_gfx_rlc_init_cpt - Init cp table
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate and setup value to cp table of rlc.
+ * Returns 0 on succeess or negative error code if allocate failed.
+ */
+int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.rlc.cp_table_obj,
+ &adev->gfx.rlc.cp_table_gpu_addr,
+ (void **)&adev->gfx.rlc.cp_table_ptr);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to create cp table bo\n", r);
+ amdgpu_gfx_rlc_fini(adev);
+ return r;
+ }
+
+ /* set up the cp table */
+ amdgpu_gfx_rlc_setup_cp_table(adev);
+ amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
+ amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
+
+ return 0;
+}
+
+/**
+ * amdgpu_gfx_rlc_setup_cp_table - setup cp the buffer of cp table
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Write cp firmware data into cp table.
+ */
+void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev)
+{
+ const __le32 *fw_data;
+ volatile u32 *dst_ptr;
+ int me, i, max_me;
+ u32 bo_offset = 0;
+ u32 table_offset, table_size;
+
+ max_me = adev->gfx.rlc.funcs->get_cp_table_num(adev);
+
+ /* write the cp table buffer */
+ dst_ptr = adev->gfx.rlc.cp_table_ptr;
+ for (me = 0; me < max_me; me++) {
+ if (me == 0) {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.ce_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 1) {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.pfp_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 2) {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.me_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 3) {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.mec_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 4) {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.mec2_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ }
+
+ for (i = 0; i < table_size; i ++) {
+ dst_ptr[bo_offset + i] =
+ cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
+ }
+
+ bo_offset += table_size;
+ }
+}
+
+/**
+ * amdgpu_gfx_rlc_fini - Free BO which used for RLC
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Free three BO which is used for rlc_save_restore_block, rlc_clear_state_block
+ * and rlc_jump_table_block.
+ */
+void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev)
+{
+ /* save restore block */
+ if (adev->gfx.rlc.save_restore_obj) {
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj,
+ &adev->gfx.rlc.save_restore_gpu_addr,
+ (void **)&adev->gfx.rlc.sr_ptr);
+ }
+
+ /* clear state block */
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
+ &adev->gfx.rlc.clear_state_gpu_addr,
+ (void **)&adev->gfx.rlc.cs_ptr);
+
+ /* jump table block */
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
+ &adev->gfx.rlc.cp_table_gpu_addr,
+ (void **)&adev->gfx.rlc.cp_table_ptr);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
new file mode 100644
index 000000000000..49a8ab52113b
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_RLC_H__
+#define __AMDGPU_RLC_H__
+
+#include "clearstate_defs.h"
+
+struct amdgpu_rlc_funcs {
+ bool (*is_rlc_enabled)(struct amdgpu_device *adev);
+ void (*set_safe_mode)(struct amdgpu_device *adev);
+ void (*unset_safe_mode)(struct amdgpu_device *adev);
+ int (*init)(struct amdgpu_device *adev);
+ u32 (*get_csb_size)(struct amdgpu_device *adev);
+ void (*get_csb_buffer)(struct amdgpu_device *adev, volatile u32 *buffer);
+ int (*get_cp_table_num)(struct amdgpu_device *adev);
+ int (*resume)(struct amdgpu_device *adev);
+ void (*stop)(struct amdgpu_device *adev);
+ void (*reset)(struct amdgpu_device *adev);
+ void (*start)(struct amdgpu_device *adev);
+};
+
+struct amdgpu_rlc {
+ /* for power gating */
+ struct amdgpu_bo *save_restore_obj;
+ uint64_t save_restore_gpu_addr;
+ volatile uint32_t *sr_ptr;
+ const u32 *reg_list;
+ u32 reg_list_size;
+ /* for clear state */
+ struct amdgpu_bo *clear_state_obj;
+ uint64_t clear_state_gpu_addr;
+ volatile uint32_t *cs_ptr;
+ const struct cs_section_def *cs_data;
+ u32 clear_state_size;
+ /* for cp tables */
+ struct amdgpu_bo *cp_table_obj;
+ uint64_t cp_table_gpu_addr;
+ volatile uint32_t *cp_table_ptr;
+ u32 cp_table_size;
+
+ /* safe mode for updating CG/PG state */
+ bool in_safe_mode;
+ const struct amdgpu_rlc_funcs *funcs;
+
+ /* for firmware data */
+ u32 save_and_restore_offset;
+ u32 clear_state_descriptor_offset;
+ u32 avail_scratch_ram_locations;
+ u32 reg_restore_list_size;
+ u32 reg_list_format_start;
+ u32 reg_list_format_separate_start;
+ u32 starting_offsets_start;
+ u32 reg_list_format_size_bytes;
+ u32 reg_list_size_bytes;
+ u32 reg_list_format_direct_reg_list_length;
+ u32 save_restore_list_cntl_size_bytes;
+ u32 save_restore_list_gpm_size_bytes;
+ u32 save_restore_list_srm_size_bytes;
+
+ u32 *register_list_format;
+ u32 *register_restore;
+ u8 *save_restore_list_cntl;
+ u8 *save_restore_list_gpm;
+ u8 *save_restore_list_srm;
+
+ bool is_rlc_v2_1;
+};
+
+void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev);
+void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev);
+int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws);
+int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev);
+int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev);
+void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev);
+void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index 12f2bf97611f..bfaf5c6323be 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -388,7 +388,7 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
soffset, eoffset, eoffset - soffset);
if (i->fence)
- seq_printf(m, " protected by 0x%08x on context %llu",
+ seq_printf(m, " protected by 0x%016llx on context %llu",
i->fence->seqno, i->fence->context);
seq_printf(m, "\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
index 1cafe8d83a4d..0767a93e4d91 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -54,16 +54,20 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
enum drm_sched_priority priority)
{
struct file *filp = fget(fd);
- struct drm_file *file;
struct amdgpu_fpriv *fpriv;
struct amdgpu_ctx *ctx;
uint32_t id;
+ int r;
if (!filp)
return -EINVAL;
- file = filp->private_data;
- fpriv = file->driver_priv;
+ r = amdgpu_file_to_fpriv(filp, &fpriv);
+ if (r) {
+ fput(filp);
+ return r;
+ }
+
idr_for_each_entry(&fpriv->ctx_mgr.ctx_handles, ctx, id)
amdgpu_ctx_priority_override(ctx, priority);
@@ -72,6 +76,39 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
return 0;
}
+static int amdgpu_sched_context_priority_override(struct amdgpu_device *adev,
+ int fd,
+ unsigned ctx_id,
+ enum drm_sched_priority priority)
+{
+ struct file *filp = fget(fd);
+ struct amdgpu_fpriv *fpriv;
+ struct amdgpu_ctx *ctx;
+ int r;
+
+ if (!filp)
+ return -EINVAL;
+
+ r = amdgpu_file_to_fpriv(filp, &fpriv);
+ if (r) {
+ fput(filp);
+ return r;
+ }
+
+ ctx = amdgpu_ctx_get(fpriv, ctx_id);
+
+ if (!ctx) {
+ fput(filp);
+ return -EINVAL;
+ }
+
+ amdgpu_ctx_priority_override(ctx, priority);
+ amdgpu_ctx_put(ctx);
+ fput(filp);
+
+ return 0;
+}
+
int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
@@ -81,7 +118,7 @@ int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
int r;
priority = amdgpu_to_sched_priority(args->in.priority);
- if (args->in.flags || priority == DRM_SCHED_PRIORITY_INVALID)
+ if (priority == DRM_SCHED_PRIORITY_INVALID)
return -EINVAL;
switch (args->in.op) {
@@ -90,6 +127,12 @@ int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
args->in.fd,
priority);
break;
+ case AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE:
+ r = amdgpu_sched_context_priority_override(adev,
+ args->in.fd,
+ args->in.ctx_id,
+ priority);
+ break;
default:
DRM_ERROR("Invalid sched op specified: %d\n", args->in.op);
r = -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index bc9244b429ef..115bb0c99b0f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -28,17 +28,31 @@
* GPU SDMA IP block helpers function.
*/
-struct amdgpu_sdma_instance * amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
+struct amdgpu_sdma_instance *amdgpu_sdma_get_instance_from_ring(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
int i;
for (i = 0; i < adev->sdma.num_instances; i++)
- if (&adev->sdma.instance[i].ring == ring)
- break;
+ if (ring == &adev->sdma.instance[i].ring ||
+ ring == &adev->sdma.instance[i].page)
+ return &adev->sdma.instance[i];
- if (i < AMDGPU_MAX_SDMA_INSTANCES)
- return &adev->sdma.instance[i];
- else
- return NULL;
+ return NULL;
+}
+
+int amdgpu_sdma_get_index_from_ring(struct amdgpu_ring *ring, uint32_t *index)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int i;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (ring == &adev->sdma.instance[i].ring ||
+ ring == &adev->sdma.instance[i].page) {
+ *index = i;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index 500113ec65ca..16b1a6ae5ba6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -41,6 +41,7 @@ struct amdgpu_sdma_instance {
uint32_t feature_version;
struct amdgpu_ring ring;
+ struct amdgpu_ring page;
bool burst_nop;
};
@@ -50,6 +51,7 @@ struct amdgpu_sdma {
struct amdgpu_irq_src illegal_inst_irq;
int num_instances;
uint32_t srbm_soft_reset;
+ bool has_page_queue;
};
/*
@@ -92,6 +94,7 @@ struct amdgpu_buffer_funcs {
#define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
struct amdgpu_sdma_instance *
-amdgpu_get_sdma_instance(struct amdgpu_ring *ring);
+amdgpu_sdma_get_instance_from_ring(struct amdgpu_ring *ring);
+int amdgpu_sdma_get_index_from_ring(struct amdgpu_ring *ring, uint32_t *index);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index e9bf70e2ac51..d3ca2424b5fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -76,9 +76,10 @@ TRACE_EVENT(amdgpu_mm_wreg,
);
TRACE_EVENT(amdgpu_iv,
- TP_PROTO(struct amdgpu_iv_entry *iv),
- TP_ARGS(iv),
+ TP_PROTO(unsigned ih, struct amdgpu_iv_entry *iv),
+ TP_ARGS(ih, iv),
TP_STRUCT__entry(
+ __field(unsigned, ih)
__field(unsigned, client_id)
__field(unsigned, src_id)
__field(unsigned, ring_id)
@@ -90,6 +91,7 @@ TRACE_EVENT(amdgpu_iv,
__array(unsigned, src_data, 4)
),
TP_fast_assign(
+ __entry->ih = ih;
__entry->client_id = iv->client_id;
__entry->src_id = iv->src_id;
__entry->ring_id = iv->ring_id;
@@ -103,8 +105,9 @@ TRACE_EVENT(amdgpu_iv,
__entry->src_data[2] = iv->src_data[2];
__entry->src_data[3] = iv->src_data[3];
),
- TP_printk("client_id:%u src_id:%u ring:%u vmid:%u timestamp: %llu pasid:%u src_data: %08x %08x %08x %08x",
- __entry->client_id, __entry->src_id,
+ TP_printk("ih:%u client_id:%u src_id:%u ring:%u vmid:%u "
+ "timestamp: %llu pasid:%u src_data: %08x %08x %08x %08x",
+ __entry->ih, __entry->client_id, __entry->src_id,
__entry->ring_id, __entry->vmid,
__entry->timestamp, __entry->pasid,
__entry->src_data[0], __entry->src_data[1],
@@ -218,6 +221,7 @@ TRACE_EVENT(amdgpu_vm_grab_id,
TP_ARGS(vm, ring, job),
TP_STRUCT__entry(
__field(u32, pasid)
+ __string(ring, ring->name)
__field(u32, ring)
__field(u32, vmid)
__field(u32, vm_hub)
@@ -227,14 +231,14 @@ TRACE_EVENT(amdgpu_vm_grab_id,
TP_fast_assign(
__entry->pasid = vm->pasid;
- __entry->ring = ring->idx;
+ __assign_str(ring, ring->name)
__entry->vmid = job->vmid;
__entry->vm_hub = ring->funcs->vmhub,
__entry->pd_addr = job->vm_pd_addr;
__entry->needs_flush = job->vm_needs_flush;
),
- TP_printk("pasid=%d, ring=%u, id=%u, hub=%u, pd_addr=%010Lx needs_flush=%u",
- __entry->pasid, __entry->ring, __entry->vmid,
+ TP_printk("pasid=%d, ring=%s, id=%u, hub=%u, pd_addr=%010Lx needs_flush=%u",
+ __entry->pasid, __get_str(ring), __entry->vmid,
__entry->vm_hub, __entry->pd_addr, __entry->needs_flush)
);
@@ -366,20 +370,20 @@ TRACE_EVENT(amdgpu_vm_flush,
uint64_t pd_addr),
TP_ARGS(ring, vmid, pd_addr),
TP_STRUCT__entry(
- __field(u32, ring)
+ __string(ring, ring->name)
__field(u32, vmid)
__field(u32, vm_hub)
__field(u64, pd_addr)
),
TP_fast_assign(
- __entry->ring = ring->idx;
+ __assign_str(ring, ring->name)
__entry->vmid = vmid;
__entry->vm_hub = ring->funcs->vmhub;
__entry->pd_addr = pd_addr;
),
- TP_printk("ring=%u, id=%u, hub=%u, pd_addr=%010Lx",
- __entry->ring, __entry->vmid,
+ TP_printk("ring=%s, id=%u, hub=%u, pd_addr=%010Lx",
+ __get_str(ring), __entry->vmid,
__entry->vm_hub,__entry->pd_addr)
);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index a44fc12ae1f9..73e71e61dc99 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -61,100 +61,6 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
-/*
- * Global memory.
- */
-
-/**
- * amdgpu_ttm_mem_global_init - Initialize and acquire reference to
- * memory object
- *
- * @ref: Object for initialization.
- *
- * This is called by drm_global_item_ref() when an object is being
- * initialized.
- */
-static int amdgpu_ttm_mem_global_init(struct drm_global_reference *ref)
-{
- return ttm_mem_global_init(ref->object);
-}
-
-/**
- * amdgpu_ttm_mem_global_release - Drop reference to a memory object
- *
- * @ref: Object being removed
- *
- * This is called by drm_global_item_unref() when an object is being
- * released.
- */
-static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref)
-{
- ttm_mem_global_release(ref->object);
-}
-
-/**
- * amdgpu_ttm_global_init - Initialize global TTM memory reference structures.
- *
- * @adev: AMDGPU device for which the global structures need to be registered.
- *
- * This is called as part of the AMDGPU ttm init from amdgpu_ttm_init()
- * during bring up.
- */
-static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
-{
- struct drm_global_reference *global_ref;
- int r;
-
- /* ensure reference is false in case init fails */
- adev->mman.mem_global_referenced = false;
-
- global_ref = &adev->mman.mem_global_ref;
- global_ref->global_type = DRM_GLOBAL_TTM_MEM;
- global_ref->size = sizeof(struct ttm_mem_global);
- global_ref->init = &amdgpu_ttm_mem_global_init;
- global_ref->release = &amdgpu_ttm_mem_global_release;
- r = drm_global_item_ref(global_ref);
- if (r) {
- DRM_ERROR("Failed setting up TTM memory accounting "
- "subsystem.\n");
- goto error_mem;
- }
-
- adev->mman.bo_global_ref.mem_glob =
- adev->mman.mem_global_ref.object;
- global_ref = &adev->mman.bo_global_ref.ref;
- global_ref->global_type = DRM_GLOBAL_TTM_BO;
- global_ref->size = sizeof(struct ttm_bo_global);
- global_ref->init = &ttm_bo_global_init;
- global_ref->release = &ttm_bo_global_release;
- r = drm_global_item_ref(global_ref);
- if (r) {
- DRM_ERROR("Failed setting up TTM BO subsystem.\n");
- goto error_bo;
- }
-
- mutex_init(&adev->mman.gtt_window_lock);
-
- adev->mman.mem_global_referenced = true;
-
- return 0;
-
-error_bo:
- drm_global_item_unref(&adev->mman.mem_global_ref);
-error_mem:
- return r;
-}
-
-static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
-{
- if (adev->mman.mem_global_referenced) {
- mutex_destroy(&adev->mman.gtt_window_lock);
- drm_global_item_unref(&adev->mman.bo_global_ref.ref);
- drm_global_item_unref(&adev->mman.mem_global_ref);
- adev->mman.mem_global_referenced = false;
- }
-}
-
static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
{
return 0;
@@ -1640,7 +1546,8 @@ static struct ttm_bo_driver amdgpu_bo_driver = {
.io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
.io_mem_free = &amdgpu_ttm_io_mem_free,
.io_mem_pfn = amdgpu_ttm_io_mem_pfn,
- .access_memory = &amdgpu_ttm_access_memory
+ .access_memory = &amdgpu_ttm_access_memory,
+ .del_from_lru_notify = &amdgpu_vm_del_from_lru_notify
};
/*
@@ -1758,14 +1665,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
int r;
u64 vis_vram_limit;
- /* initialize global references for vram/gtt */
- r = amdgpu_ttm_global_init(adev);
- if (r) {
- return r;
- }
+ mutex_init(&adev->mman.gtt_window_lock);
+
/* No others user of address space so set it to 0 */
r = ttm_bo_device_init(&adev->mman.bdev,
- adev->mman.bo_global_ref.ref.object,
&amdgpu_bo_driver,
adev->ddev->anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET,
@@ -1853,7 +1756,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
}
r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size,
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_GDS,
+ 4, AMDGPU_GEM_DOMAIN_GDS,
&adev->gds.gds_gfx_bo, NULL, NULL);
if (r)
return r;
@@ -1866,7 +1769,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
}
r = amdgpu_bo_create_kernel(adev, adev->gds.gws.gfx_partition_size,
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_GWS,
+ 1, AMDGPU_GEM_DOMAIN_GWS,
&adev->gds.gws_gfx_bo, NULL, NULL);
if (r)
return r;
@@ -1879,7 +1782,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
}
r = amdgpu_bo_create_kernel(adev, adev->gds.oa.gfx_partition_size,
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_OA,
+ 1, AMDGPU_GEM_DOMAIN_OA,
&adev->gds.oa_gfx_bo, NULL, NULL);
if (r)
return r;
@@ -1922,7 +1825,6 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS);
ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA);
ttm_bo_device_release(&adev->mman.bdev);
- amdgpu_ttm_global_fini(adev);
adev->mman.initialized = false;
DRM_INFO("amdgpu: ttm finalized\n");
}
@@ -2069,7 +1971,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
unsigned i;
int r;
- if (direct_submit && !ring->ready) {
+ if (direct_submit && !ring->sched.ready) {
DRM_ERROR("Trying to move memory with ring turned off.\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index fe8f276e9811..b5b2d101f7db 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -39,8 +39,6 @@
#define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2
struct amdgpu_mman {
- struct ttm_bo_global_ref bo_global_ref;
- struct drm_global_reference mem_global_ref;
struct ttm_bo_device bdev;
bool mem_global_referenced;
bool initialized;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index aa6641b944a0..7ac25a1c7853 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -58,6 +58,17 @@ struct psp_firmware_header_v1_0 {
};
/* version_major=1, version_minor=0 */
+struct ta_firmware_header_v1_0 {
+ struct common_firmware_header header;
+ uint32_t ta_xgmi_ucode_version;
+ uint32_t ta_xgmi_offset_bytes;
+ uint32_t ta_xgmi_size_bytes;
+ uint32_t ta_ras_ucode_version;
+ uint32_t ta_ras_offset_bytes;
+ uint32_t ta_ras_size_bytes;
+};
+
+/* version_major=1, version_minor=0 */
struct gfx_firmware_header_v1_0 {
struct common_firmware_header header;
uint32_t ucode_feature_version;
@@ -170,6 +181,7 @@ union amdgpu_firmware_header {
struct mc_firmware_header_v1_0 mc;
struct smc_firmware_header_v1_0 smc;
struct psp_firmware_header_v1_0 psp;
+ struct ta_firmware_header_v1_0 ta;
struct gfx_firmware_header_v1_0 gfx;
struct rlc_firmware_header_v1_0 rlc;
struct rlc_firmware_header_v2_0 rlc_v2_0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index e5a6db6beab7..4e5d13e41f6a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -692,6 +692,8 @@ static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg,
buf_sizes[0x1] = dpb_size;
buf_sizes[0x2] = image_size;
buf_sizes[0x4] = min_ctx_size;
+ /* store image width to adjust nb memory pstate */
+ adev->uvd.decode_image_width = width;
return 0;
}
@@ -1243,30 +1245,20 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct dma_fence *fence;
long r;
- uint32_t ip_instance = ring->me;
r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
- if (r) {
- DRM_ERROR("amdgpu: (%d)failed to get create msg (%ld).\n", ip_instance, r);
+ if (r)
goto error;
- }
r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
- if (r) {
- DRM_ERROR("amdgpu: (%d)failed to get destroy ib (%ld).\n", ip_instance, r);
+ if (r)
goto error;
- }
r = dma_fence_wait_timeout(fence, false, timeout);
- if (r == 0) {
- DRM_ERROR("amdgpu: (%d)IB test timed out.\n", ip_instance);
+ if (r == 0)
r = -ETIMEDOUT;
- } else if (r < 0) {
- DRM_ERROR("amdgpu: (%d)fence wait failed (%ld).\n", ip_instance, r);
- } else {
- DRM_DEBUG("ib test on (%d)ring %d succeeded\n", ip_instance, ring->idx);
+ else if (r > 0)
r = 0;
- }
dma_fence_put(fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
index a3ab1a41060f..5eb63288d157 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
@@ -65,6 +65,8 @@ struct amdgpu_uvd {
struct drm_sched_entity entity;
struct delayed_work idle_work;
unsigned harvest_config;
+ /* store image width to adjust nb memory state */
+ unsigned decode_image_width;
};
int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 5f3f54073818..c021b114c8a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -1032,8 +1032,10 @@ out:
* @ib: the IB to execute
*
*/
-void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib,
+ uint32_t flags)
{
amdgpu_ring_write(ring, VCE_CMD_IB);
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
@@ -1079,11 +1081,9 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
return 0;
r = amdgpu_ring_alloc(ring, 16);
- if (r) {
- DRM_ERROR("amdgpu: vce failed to lock ring %d (%d).\n",
- ring->idx, r);
+ if (r)
return r;
- }
+
amdgpu_ring_write(ring, VCE_CMD_END);
amdgpu_ring_commit(ring);
@@ -1093,14 +1093,8 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
DRM_UDELAY(1);
}
- if (i < timeout) {
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
- ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed\n",
- ring->idx);
+ if (i >= timeout)
r = -ETIMEDOUT;
- }
return r;
}
@@ -1121,27 +1115,19 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
return 0;
r = amdgpu_vce_get_create_msg(ring, 1, NULL);
- if (r) {
- DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
+ if (r)
goto error;
- }
r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence);
- if (r) {
- DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
+ if (r)
goto error;
- }
r = dma_fence_wait_timeout(fence, false, timeout);
- if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out.\n");
+ if (r == 0)
r = -ETIMEDOUT;
- } else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
- } else {
- DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+ else if (r > 0)
r = 0;
- }
+
error:
dma_fence_put(fence);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index a1f209eed4c4..30ea54dd9117 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -65,8 +65,8 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx);
-void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch);
+void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job,
+ struct amdgpu_ib *ib, uint32_t flags);
void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
unsigned flags);
int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 27da13df2f11..ecf6f96df2ad 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -262,7 +262,7 @@ static int amdgpu_vcn_pause_dpg_mode(struct amdgpu_device *adev,
ring = &adev->vcn.ring_dec;
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
- RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2));
+ RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
@@ -322,7 +322,7 @@ static int amdgpu_vcn_pause_dpg_mode(struct amdgpu_device *adev,
ring = &adev->vcn.ring_dec;
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
- RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2));
+ RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
@@ -396,16 +396,26 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
struct dpg_pause_state new_state;
+ unsigned int fences = 0;
+ unsigned int i;
- if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
+ for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
+ }
+ if (fences)
new_state.fw_based = VCN_DPG_STATE__PAUSE;
else
- new_state.fw_based = adev->vcn.pause_state.fw_based;
+ new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
- if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
+ if (amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg))
new_state.jpeg = VCN_DPG_STATE__PAUSE;
else
- new_state.jpeg = adev->vcn.pause_state.jpeg;
+ new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
+ new_state.fw_based = VCN_DPG_STATE__PAUSE;
+ else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
+ new_state.jpeg = VCN_DPG_STATE__PAUSE;
amdgpu_vcn_pause_dpg_mode(adev, &new_state);
}
@@ -425,11 +435,9 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 3);
- if (r) {
- DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
- ring->idx, r);
+ if (r)
return r;
- }
+
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0));
amdgpu_ring_write(ring, 0xDEADBEEF);
@@ -441,14 +449,9 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
- ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
- ring->idx, tmp);
- r = -EINVAL;
- }
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+
return r;
}
@@ -570,30 +573,20 @@ int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL);
- if (r) {
- DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
+ if (r)
goto error;
- }
r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &fence);
- if (r) {
- DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
+ if (r)
goto error;
- }
r = dma_fence_wait_timeout(fence, false, timeout);
- if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out.\n");
+ if (r == 0)
r = -ETIMEDOUT;
- } else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
- } else {
- DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+ else if (r > 0)
r = 0;
- }
dma_fence_put(fence);
-
error:
return r;
}
@@ -606,11 +599,9 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
int r;
r = amdgpu_ring_alloc(ring, 16);
- if (r) {
- DRM_ERROR("amdgpu: vcn enc failed to lock ring %d (%d).\n",
- ring->idx, r);
+ if (r)
return r;
- }
+
amdgpu_ring_write(ring, VCN_ENC_CMD_END);
amdgpu_ring_commit(ring);
@@ -620,14 +611,8 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
- ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed\n",
- ring->idx);
+ if (i >= adev->usec_timeout)
r = -ETIMEDOUT;
- }
return r;
}
@@ -742,27 +727,19 @@ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
r = amdgpu_vcn_enc_get_create_msg(ring, 1, NULL);
- if (r) {
- DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
+ if (r)
goto error;
- }
r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &fence);
- if (r) {
- DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
+ if (r)
goto error;
- }
r = dma_fence_wait_timeout(fence, false, timeout);
- if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out.\n");
+ if (r == 0)
r = -ETIMEDOUT;
- } else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
- } else {
- DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+ else if (r > 0)
r = 0;
- }
+
error:
dma_fence_put(fence);
return r;
@@ -778,11 +755,8 @@ int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 3);
- if (r) {
- DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
- ring->idx, r);
+ if (r)
return r;
- }
amdgpu_ring_write(ring,
PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0, 0, 0));
@@ -796,14 +770,8 @@ int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
- ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
- ring->idx, tmp);
- r = -EINVAL;
- }
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
return r;
}
@@ -856,21 +824,18 @@ int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r = 0;
r = amdgpu_vcn_jpeg_set_reg(ring, 1, &fence);
- if (r) {
- DRM_ERROR("amdgpu: failed to set jpeg register (%ld).\n", r);
+ if (r)
goto error;
- }
r = dma_fence_wait_timeout(fence, false, timeout);
if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out.\n");
r = -ETIMEDOUT;
goto error;
} else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto error;
- } else
+ } else {
r = 0;
+ }
for (i = 0; i < adev->usec_timeout; i++) {
tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9));
@@ -879,15 +844,10 @@ int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout)
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout)
- DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
- else {
- DRM_ERROR("ib test failed (0x%08X)\n", tmp);
- r = -EINVAL;
- }
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
dma_fence_put(fence);
-
error:
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index f2f358aa0597..462a04e0f5e6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -23,16 +23,6 @@
#include "amdgpu.h"
-uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev)
-{
- uint64_t addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT;
-
- addr -= AMDGPU_VA_RESERVED_SIZE;
- addr = amdgpu_gmc_sign_extend(addr);
-
- return addr;
-}
-
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
{
/* By now all MMIO pages except mailbox are blocked */
@@ -41,88 +31,6 @@ bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
return RREG32_NO_KIQ(0xc040) == 0xffffffff;
}
-int amdgpu_allocate_static_csa(struct amdgpu_device *adev)
-{
- int r;
- void *ptr;
-
- r = amdgpu_bo_create_kernel(adev, AMDGPU_CSA_SIZE, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM, &adev->virt.csa_obj,
- &adev->virt.csa_vmid0_addr, &ptr);
- if (r)
- return r;
-
- memset(ptr, 0, AMDGPU_CSA_SIZE);
- return 0;
-}
-
-void amdgpu_free_static_csa(struct amdgpu_device *adev) {
- amdgpu_bo_free_kernel(&adev->virt.csa_obj,
- &adev->virt.csa_vmid0_addr,
- NULL);
-}
-
-/*
- * amdgpu_map_static_csa should be called during amdgpu_vm_init
- * it maps virtual address amdgpu_csa_vaddr() to this VM, and each command
- * submission of GFX should use this virtual address within META_DATA init
- * package to support SRIOV gfx preemption.
- */
-int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- struct amdgpu_bo_va **bo_va)
-{
- uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
- struct ww_acquire_ctx ticket;
- struct list_head list;
- struct amdgpu_bo_list_entry pd;
- struct ttm_validate_buffer csa_tv;
- int r;
-
- INIT_LIST_HEAD(&list);
- INIT_LIST_HEAD(&csa_tv.head);
- csa_tv.bo = &adev->virt.csa_obj->tbo;
- csa_tv.shared = true;
-
- list_add(&csa_tv.head, &list);
- amdgpu_vm_get_pd_bo(vm, &list, &pd);
-
- r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
- if (r) {
- DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
- return r;
- }
-
- *bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj);
- if (!*bo_va) {
- ttm_eu_backoff_reservation(&ticket, &list);
- DRM_ERROR("failed to create bo_va for static CSA\n");
- return -ENOMEM;
- }
-
- r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, csa_addr,
- AMDGPU_CSA_SIZE);
- if (r) {
- DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
- amdgpu_vm_bo_rmv(adev, *bo_va);
- ttm_eu_backoff_reservation(&ticket, &list);
- return r;
- }
-
- r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, AMDGPU_CSA_SIZE,
- AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
- AMDGPU_PTE_EXECUTABLE);
-
- if (r) {
- DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
- amdgpu_vm_bo_rmv(adev, *bo_va);
- ttm_eu_backoff_reservation(&ticket, &list);
- return r;
- }
-
- ttm_eu_backoff_reservation(&ticket, &list);
- return 0;
-}
-
void amdgpu_virt_init_setting(struct amdgpu_device *adev)
{
/* enable virtual display */
@@ -162,9 +70,7 @@ uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
goto failed_kiq_read;
- if (in_interrupt())
- might_sleep();
-
+ might_sleep();
while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
@@ -210,9 +116,7 @@ void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
goto failed_kiq_write;
- if (in_interrupt())
- might_sleep();
-
+ might_sleep();
while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
@@ -228,6 +132,46 @@ failed_kiq_write:
pr_err("failed to write reg:%x\n", reg);
}
+void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
+ uint32_t reg0, uint32_t reg1,
+ uint32_t ref, uint32_t mask)
+{
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+ struct amdgpu_ring *ring = &kiq->ring;
+ signed long r, cnt = 0;
+ unsigned long flags;
+ uint32_t seq;
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+ amdgpu_ring_alloc(ring, 32);
+ amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
+ ref, mask);
+ amdgpu_fence_emit_polling(ring, &seq);
+ amdgpu_ring_commit(ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+
+ /* don't wait anymore for IRQ context */
+ if (r < 1 && in_interrupt())
+ goto failed_kiq;
+
+ might_sleep();
+ while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+
+ msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+ }
+
+ if (cnt > MAX_KIQ_REG_TRY)
+ goto failed_kiq;
+
+ return;
+
+failed_kiq:
+ pr_err("failed to write reg %x wait reg %x\n", reg0, reg1);
+}
+
/**
* amdgpu_virt_request_full_gpu() - request full gpu access
* @amdgpu: amdgpu device.
@@ -390,7 +334,7 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
if (adev->fw_vram_usage.va != NULL) {
adev->virt.fw_reserve.p_pf2vf =
- (struct amdgim_pf2vf_info_header *)(
+ (struct amd_sriov_msg_pf2vf_info_header *)(
adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET);
AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size);
AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 880ac113a3a9..722deefc0a7e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -63,8 +63,8 @@ struct amdgpu_virt_ops {
* Firmware Reserve Frame buffer
*/
struct amdgpu_virt_fw_reserve {
- struct amdgim_pf2vf_info_header *p_pf2vf;
- struct amdgim_vf2pf_info_header *p_vf2pf;
+ struct amd_sriov_msg_pf2vf_info_header *p_pf2vf;
+ struct amd_sriov_msg_vf2pf_info_header *p_vf2pf;
unsigned int checksum_key;
};
/*
@@ -85,15 +85,17 @@ enum AMDGIM_FEATURE_FLAG {
AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4,
};
-struct amdgim_pf2vf_info_header {
+struct amd_sriov_msg_pf2vf_info_header {
/* the total structure size in byte. */
uint32_t size;
/* version of this structure, written by the GIM */
uint32_t version;
+ /* reserved */
+ uint32_t reserved[2];
} __aligned(4);
struct amdgim_pf2vf_info_v1 {
/* header contains size and version */
- struct amdgim_pf2vf_info_header header;
+ struct amd_sriov_msg_pf2vf_info_header header;
/* max_width * max_height */
unsigned int uvd_enc_max_pixels_count;
/* 16x16 pixels/sec, codec independent */
@@ -112,7 +114,7 @@ struct amdgim_pf2vf_info_v1 {
struct amdgim_pf2vf_info_v2 {
/* header contains size and version */
- struct amdgim_pf2vf_info_header header;
+ struct amd_sriov_msg_pf2vf_info_header header;
/* use private key from mailbox 2 to create chueksum */
uint32_t checksum;
/* The features flags of the GIM driver supports. */
@@ -137,20 +139,22 @@ struct amdgim_pf2vf_info_v2 {
uint64_t vcefw_kboffset;
/* VCE FW size in KB */
uint32_t vcefw_ksize;
- uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 0, 0, (9 + sizeof(struct amdgim_pf2vf_info_header)/sizeof(uint32_t)), 3)];
+ uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 0, 0, (9 + sizeof(struct amd_sriov_msg_pf2vf_info_header)/sizeof(uint32_t)), 3)];
} __aligned(4);
-struct amdgim_vf2pf_info_header {
+struct amd_sriov_msg_vf2pf_info_header {
/* the total structure size in byte. */
uint32_t size;
/*version of this structure, written by the guest */
uint32_t version;
+ /* reserved */
+ uint32_t reserved[2];
} __aligned(4);
struct amdgim_vf2pf_info_v1 {
/* header contains size and version */
- struct amdgim_vf2pf_info_header header;
+ struct amd_sriov_msg_vf2pf_info_header header;
/* driver version */
char driver_version[64];
/* driver certification, 1=WHQL, 0=None */
@@ -180,7 +184,7 @@ struct amdgim_vf2pf_info_v1 {
struct amdgim_vf2pf_info_v2 {
/* header contains size and version */
- struct amdgim_vf2pf_info_header header;
+ struct amd_sriov_msg_vf2pf_info_header header;
uint32_t checksum;
/* driver version */
uint8_t driver_version[64];
@@ -206,7 +210,7 @@ struct amdgim_vf2pf_info_v2 {
uint32_t uvd_enc_usage;
/* guest uvd engine usage percentage. 0xffff means N/A. */
uint32_t uvd_enc_health;
- uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 64, 0, (12 + sizeof(struct amdgim_vf2pf_info_header)/sizeof(uint32_t)), 0)];
+ uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 64, 0, (12 + sizeof(struct amd_sriov_msg_vf2pf_info_header)/sizeof(uint32_t)), 0)];
} __aligned(4);
#define AMDGPU_FW_VRAM_VF2PF_VER 2
@@ -238,7 +242,6 @@ typedef struct amdgim_vf2pf_info_v2 amdgim_vf2pf_info ;
struct amdgpu_virt {
uint32_t caps;
struct amdgpu_bo *csa_obj;
- uint64_t csa_vmid0_addr;
bool chained_ib_support;
uint32_t reg_val_offs;
struct amdgpu_irq_src ack_irq;
@@ -251,8 +254,6 @@ struct amdgpu_virt {
uint32_t gim_feature;
};
-#define AMDGPU_CSA_SIZE (8 * 1024)
-
#define amdgpu_sriov_enabled(adev) \
((adev)->virt.caps & AMDGPU_SRIOV_CAPS_ENABLE_IOV)
@@ -277,17 +278,13 @@ static inline bool is_virtual_machine(void)
#endif
}
-struct amdgpu_vm;
-
-uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev);
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
-int amdgpu_allocate_static_csa(struct amdgpu_device *adev);
-int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- struct amdgpu_bo_va **bo_va);
-void amdgpu_free_static_csa(struct amdgpu_device *adev);
void amdgpu_virt_init_setting(struct amdgpu_device *adev);
uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
+void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
+ uint32_t reg0, uint32_t rreg1,
+ uint32_t ref, uint32_t mask);
int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init);
int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init);
int amdgpu_virt_reset_gpu(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 0877ff9a9594..16fcb56c232b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -107,14 +107,6 @@ struct amdgpu_pte_update_params {
* DMA addresses to use for mapping, used during VM update by CPU
*/
dma_addr_t *pages_addr;
-
- /**
- * @kptr:
- *
- * Kernel pointer of PD/PT BO that needs to be updated,
- * used during VM update by CPU
- */
- void *kptr;
};
/**
@@ -617,11 +609,34 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
{
entry->priority = 0;
entry->tv.bo = &vm->root.base.bo->tbo;
- entry->tv.shared = true;
+ /* One for the VM updates, one for TTM and one for the CS job */
+ entry->tv.num_shared = 3;
entry->user_pages = NULL;
list_add(&entry->tv.head, validated);
}
+void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo)
+{
+ struct amdgpu_bo *abo;
+ struct amdgpu_vm_bo_base *bo_base;
+
+ if (!amdgpu_bo_is_amdgpu_bo(bo))
+ return;
+
+ if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT)
+ return;
+
+ abo = ttm_to_amdgpu_bo(bo);
+ if (!abo->parent)
+ return;
+ for (bo_base = abo->vm_bo; bo_base; bo_base = bo_base->next) {
+ struct amdgpu_vm *vm = bo_base->vm;
+
+ if (abo->tbo.resv == vm->root.base.bo->tbo.resv)
+ vm->bulk_moveable = false;
+ }
+
+}
/**
* amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
*
@@ -637,12 +652,14 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
struct ttm_bo_global *glob = adev->mman.bdev.glob;
struct amdgpu_vm_bo_base *bo_base;
+#if 0
if (vm->bulk_moveable) {
spin_lock(&glob->lru_lock);
ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
spin_unlock(&glob->lru_lock);
return;
}
+#endif
memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
@@ -773,10 +790,6 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
- r = reservation_object_reserve_shared(bo->tbo.resv);
- if (r)
- return r;
-
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
goto error;
@@ -802,15 +815,22 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
addr += ats_entries * 8;
}
- if (entries)
+ if (entries) {
+ uint64_t value = 0;
+
+ /* Workaround for fault priority problem on GMC9 */
+ if (level == AMDGPU_VM_PTB && adev->asic_type >= CHIP_VEGA10)
+ value = AMDGPU_PTE_EXECUTABLE;
+
amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
- entries, 0, 0);
+ entries, 0, value);
+ }
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > 64);
r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
- AMDGPU_FENCE_OWNER_UNDEFINED, false);
+ AMDGPU_FENCE_OWNER_KFD, false);
if (r)
goto error_free;
@@ -850,9 +870,6 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
bp->size = amdgpu_vm_bo_size(adev, level);
bp->byte_align = AMDGPU_GPU_PAGE_SIZE;
bp->domain = AMDGPU_GEM_DOMAIN_VRAM;
- if (bp->size <= PAGE_SIZE && adev->asic_type >= CHIP_VEGA10 &&
- adev->flags & AMD_IS_APU)
- bp->domain |= AMDGPU_GEM_DOMAIN_GTT;
bp->domain = amdgpu_bo_get_preferred_pin_domain(adev, bp->domain);
bp->flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
AMDGPU_GEM_CREATE_CPU_GTT_USWC;
@@ -932,10 +949,6 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
if (r)
return r;
- r = amdgpu_vm_clear_bo(adev, vm, pt, cursor.level, ats);
- if (r)
- goto error_free_pt;
-
if (vm->use_cpu_for_update) {
r = amdgpu_bo_kmap(pt, NULL);
if (r)
@@ -948,6 +961,10 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
pt->parent = amdgpu_bo_ref(cursor.parent->base.bo);
amdgpu_vm_bo_base_init(&entry->base, vm, pt);
+
+ r = amdgpu_vm_clear_bo(adev, vm, pt, cursor.level, ats);
+ if (r)
+ goto error_free_pt;
}
return 0;
@@ -1317,31 +1334,6 @@ static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
}
}
-
-/**
- * amdgpu_vm_wait_pd - Wait for PT BOs to be free.
- *
- * @adev: amdgpu_device pointer
- * @vm: related vm
- * @owner: fence owner
- *
- * Returns:
- * 0 on success, errno otherwise.
- */
-static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- void *owner)
-{
- struct amdgpu_sync sync;
- int r;
-
- amdgpu_sync_create(&sync);
- amdgpu_sync_resv(adev, &sync, vm->root.base.bo->tbo.resv, owner, false);
- r = amdgpu_sync_wait(&sync, true);
- amdgpu_sync_free(&sync);
-
- return r;
-}
-
/**
* amdgpu_vm_update_func - helper to call update function
*
@@ -1436,7 +1428,8 @@ restart:
params.adev = adev;
if (vm->use_cpu_for_update) {
- r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM);
+ r = amdgpu_bo_sync_wait(vm->root.base.bo,
+ AMDGPU_FENCE_OWNER_VM, true);
if (unlikely(r))
return r;
@@ -1509,20 +1502,27 @@ error:
}
/**
- * amdgpu_vm_update_huge - figure out parameters for PTE updates
+ * amdgpu_vm_update_flags - figure out flags for PTE updates
*
* Make sure to set the right flags for the PTEs at the desired level.
*/
-static void amdgpu_vm_update_huge(struct amdgpu_pte_update_params *params,
- struct amdgpu_bo *bo, unsigned level,
- uint64_t pe, uint64_t addr,
- unsigned count, uint32_t incr,
- uint64_t flags)
+static void amdgpu_vm_update_flags(struct amdgpu_pte_update_params *params,
+ struct amdgpu_bo *bo, unsigned level,
+ uint64_t pe, uint64_t addr,
+ unsigned count, uint32_t incr,
+ uint64_t flags)
{
if (level != AMDGPU_VM_PTB) {
flags |= AMDGPU_PDE_PTE;
amdgpu_gmc_get_vm_pde(params->adev, level, &addr, &flags);
+
+ } else if (params->adev->asic_type >= CHIP_VEGA10 &&
+ !(flags & AMDGPU_PTE_VALID) &&
+ !(flags & AMDGPU_PTE_PRT)) {
+
+ /* Workaround for fault priority problem on GMC9 */
+ flags |= AMDGPU_PTE_EXECUTABLE;
}
amdgpu_vm_update_func(params, bo, pe, addr, count, incr, flags);
@@ -1679,9 +1679,9 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
uint64_t upd_end = min(entry_end, frag_end);
unsigned nptes = (upd_end - frag_start) >> shift;
- amdgpu_vm_update_huge(params, pt, cursor.level,
- pe_start, dst, nptes, incr,
- flags | AMDGPU_PTE_FRAG(frag));
+ amdgpu_vm_update_flags(params, pt, cursor.level,
+ pe_start, dst, nptes, incr,
+ flags | AMDGPU_PTE_FRAG(frag));
pe_start += nptes * 8;
dst += (uint64_t)nptes * AMDGPU_GPU_PAGE_SIZE << shift;
@@ -1750,22 +1750,29 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
params.adev = adev;
params.vm = vm;
- /* sync to everything on unmapping */
+ /* sync to everything except eviction fences on unmapping */
if (!(flags & AMDGPU_PTE_VALID))
- owner = AMDGPU_FENCE_OWNER_UNDEFINED;
+ owner = AMDGPU_FENCE_OWNER_KFD;
if (vm->use_cpu_for_update) {
/* params.src is used as flag to indicate system Memory */
if (pages_addr)
params.src = ~0;
- /* Wait for PT BOs to be free. PTs share the same resv. object
+ /* Wait for PT BOs to be idle. PTs share the same resv. object
* as the root PD BO
*/
- r = amdgpu_vm_wait_pd(adev, vm, owner);
+ r = amdgpu_bo_sync_wait(vm->root.base.bo, owner, true);
if (unlikely(r))
return r;
+ /* Wait for any BO move to be completed */
+ if (exclusive) {
+ r = dma_fence_wait(exclusive, true);
+ if (unlikely(r))
+ return r;
+ }
+
params.func = amdgpu_vm_cpu_set_ptes;
params.pages_addr = pages_addr;
return amdgpu_vm_update_ptes(&params, start, last + 1,
@@ -1779,13 +1786,12 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
/*
* reserve space for two commands every (1 << BLOCK_SIZE)
* entries or 2k dwords (whatever is smaller)
- *
- * The second command is for the shadow pagetables.
*/
+ ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1);
+
+ /* The second command is for the shadow pagetables. */
if (vm->root.base.bo->shadow)
- ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1) * 2;
- else
- ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1);
+ ncmds *= 2;
/* padding, etc. */
ndw = 64;
@@ -1804,10 +1810,11 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
ndw += ncmds * 10;
/* extra commands for begin/end fragments */
+ ncmds = 2 * adev->vm_manager.fragment_size;
if (vm->root.base.bo->shadow)
- ndw += 2 * 10 * adev->vm_manager.fragment_size * 2;
- else
- ndw += 2 * 10 * adev->vm_manager.fragment_size;
+ ncmds *= 2;
+
+ ndw += 10 * ncmds;
params.func = amdgpu_vm_do_set_ptes;
}
@@ -1844,10 +1851,6 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
if (r)
goto error_free;
- r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
- if (r)
- goto error_free;
-
r = amdgpu_vm_update_ptes(&params, start, last + 1, addr, flags);
if (r)
goto error_free;
@@ -3013,7 +3016,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
}
DRM_DEBUG_DRIVER("VM update mode is %s\n",
vm->use_cpu_for_update ? "CPU" : "SDMA");
- WARN_ONCE((vm->use_cpu_for_update & !amdgpu_gmc_vram_full_visible(&adev->gmc)),
+ WARN_ONCE((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)),
"CPU update of VM recommended only for large BAR system\n");
vm->last_update = NULL;
@@ -3028,13 +3031,18 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (r)
goto error_free_root;
+ r = reservation_object_reserve_shared(root->tbo.resv, 1);
+ if (r)
+ goto error_unreserve;
+
+ amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
+
r = amdgpu_vm_clear_bo(adev, vm, root,
adev->vm_manager.root_level,
vm->pte_support_ats);
if (r)
goto error_unreserve;
- amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
amdgpu_bo_unreserve(vm->root.base.bo);
if (pasid) {
@@ -3057,7 +3065,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
}
INIT_KFIFO(vm->faults);
- vm->fault_credit = 16;
return 0;
@@ -3140,7 +3147,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, uns
vm->pte_support_ats = pte_support_ats;
DRM_DEBUG_DRIVER("VM update mode is %s\n",
vm->use_cpu_for_update ? "CPU" : "SDMA");
- WARN_ONCE((vm->use_cpu_for_update & !amdgpu_gmc_vram_full_visible(&adev->gmc)),
+ WARN_ONCE((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)),
"CPU update of VM recommended only for large BAR system\n");
if (vm->pasid) {
@@ -3270,42 +3277,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
}
/**
- * amdgpu_vm_pasid_fault_credit - Check fault credit for given PASID
- *
- * @adev: amdgpu_device pointer
- * @pasid: PASID do identify the VM
- *
- * This function is expected to be called in interrupt context.
- *
- * Returns:
- * True if there was fault credit, false otherwise
- */
-bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
- unsigned int pasid)
-{
- struct amdgpu_vm *vm;
-
- spin_lock(&adev->vm_manager.pasid_lock);
- vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
- if (!vm) {
- /* VM not found, can't track fault credit */
- spin_unlock(&adev->vm_manager.pasid_lock);
- return true;
- }
-
- /* No lock needed. only accessed by IRQ handler */
- if (!vm->fault_credit) {
- /* Too many faults in this VM */
- spin_unlock(&adev->vm_manager.pasid_lock);
- return false;
- }
-
- vm->fault_credit--;
- spin_unlock(&adev->vm_manager.pasid_lock);
- return true;
-}
-
-/**
* amdgpu_vm_manager_init - init the VM manager
*
* @adev: amdgpu_device pointer
@@ -3406,14 +3377,15 @@ void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
struct amdgpu_task_info *task_info)
{
struct amdgpu_vm *vm;
+ unsigned long flags;
- spin_lock(&adev->vm_manager.pasid_lock);
+ spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
if (vm)
*task_info = vm->task_info;
- spin_unlock(&adev->vm_manager.pasid_lock);
+ spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 2a8898d19c8b..81ff8177f092 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -229,9 +229,6 @@ struct amdgpu_vm {
/* Up to 128 pending retry page faults */
DECLARE_KFIFO(faults, u64, 128);
- /* Limit non-retry fault storms */
- unsigned int fault_credit;
-
/* Points to the KFD process VM info */
struct amdkfd_process_info *process_info;
@@ -299,8 +296,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid);
void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
-bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
- unsigned int pasid);
void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
struct list_head *validated,
struct amdgpu_bo_list_entry *entry);
@@ -368,4 +363,6 @@ int amdgpu_vm_add_fault(struct amdgpu_retryfault_hashtable *fault_hash, u64 key)
void amdgpu_vm_clear_fault(struct amdgpu_retryfault_hashtable *fault_hash, u64 key);
+void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 897afbb348c1..407dd16cc35c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -23,7 +23,7 @@
*/
#include <linux/list.h>
#include "amdgpu.h"
-#include "amdgpu_psp.h"
+#include "amdgpu_xgmi.h"
static DEFINE_MUTEX(xgmi_mutex);
@@ -31,89 +31,154 @@ static DEFINE_MUTEX(xgmi_mutex);
#define AMDGPU_MAX_XGMI_HIVE 8
#define AMDGPU_MAX_XGMI_DEVICE_PER_HIVE 4
-struct amdgpu_hive_info {
- uint64_t hive_id;
- struct list_head device_list;
-};
-
static struct amdgpu_hive_info xgmi_hives[AMDGPU_MAX_XGMI_HIVE];
static unsigned hive_count = 0;
-static struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
+
+void *amdgpu_xgmi_hive_try_lock(struct amdgpu_hive_info *hive)
+{
+ return &hive->device_list;
+}
+
+struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock)
{
int i;
struct amdgpu_hive_info *tmp;
if (!adev->gmc.xgmi.hive_id)
return NULL;
+
+ mutex_lock(&xgmi_mutex);
+
for (i = 0 ; i < hive_count; ++i) {
tmp = &xgmi_hives[i];
- if (tmp->hive_id == adev->gmc.xgmi.hive_id)
+ if (tmp->hive_id == adev->gmc.xgmi.hive_id) {
+ if (lock)
+ mutex_lock(&tmp->hive_lock);
+ mutex_unlock(&xgmi_mutex);
return tmp;
+ }
}
- if (i >= AMDGPU_MAX_XGMI_HIVE)
+ if (i >= AMDGPU_MAX_XGMI_HIVE) {
+ mutex_unlock(&xgmi_mutex);
return NULL;
+ }
/* initialize new hive if not exist */
tmp = &xgmi_hives[hive_count++];
tmp->hive_id = adev->gmc.xgmi.hive_id;
INIT_LIST_HEAD(&tmp->device_list);
+ mutex_init(&tmp->hive_lock);
+ mutex_init(&tmp->reset_lock);
+ if (lock)
+ mutex_lock(&tmp->hive_lock);
+
+ mutex_unlock(&xgmi_mutex);
+
return tmp;
}
+int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev)
+{
+ int ret = -EINVAL;
+
+ /* Each psp need to set the latest topology */
+ ret = psp_xgmi_set_topology_info(&adev->psp,
+ hive->number_devices,
+ &hive->topology_info);
+ if (ret)
+ dev_err(adev->dev,
+ "XGMI: Set topology failure on device %llx, hive %llx, ret %d",
+ adev->gmc.xgmi.node_id,
+ adev->gmc.xgmi.hive_id, ret);
+
+ return ret;
+}
+
int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
{
- struct psp_xgmi_topology_info tmp_topology[AMDGPU_MAX_XGMI_DEVICE_PER_HIVE];
+ struct psp_xgmi_topology_info *hive_topology;
struct amdgpu_hive_info *hive;
struct amdgpu_xgmi *entry;
- struct amdgpu_device *tmp_adev;
+ struct amdgpu_device *tmp_adev = NULL;
int count = 0, ret = -EINVAL;
- if ((adev->asic_type < CHIP_VEGA20) ||
- (adev->flags & AMD_IS_APU) )
+ if (!adev->gmc.xgmi.supported)
return 0;
- adev->gmc.xgmi.device_id = psp_xgmi_get_device_id(&adev->psp);
- adev->gmc.xgmi.hive_id = psp_xgmi_get_hive_id(&adev->psp);
-
- memset(&tmp_topology[0], 0, sizeof(tmp_topology));
- mutex_lock(&xgmi_mutex);
- hive = amdgpu_get_xgmi_hive(adev);
- if (!hive)
- goto exit;
- list_add_tail(&adev->gmc.xgmi.head, &hive->device_list);
- list_for_each_entry(entry, &hive->device_list, head)
- tmp_topology[count++].device_id = entry->device_id;
+ ret = psp_xgmi_get_node_id(&adev->psp, &adev->gmc.xgmi.node_id);
+ if (ret) {
+ dev_err(adev->dev,
+ "XGMI: Failed to get node id\n");
+ return ret;
+ }
- ret = psp_xgmi_get_topology_info(&adev->psp, count, tmp_topology);
+ ret = psp_xgmi_get_hive_id(&adev->psp, &adev->gmc.xgmi.hive_id);
if (ret) {
dev_err(adev->dev,
- "XGMI: Get topology failure on device %llx, hive %llx, ret %d",
- adev->gmc.xgmi.device_id,
- adev->gmc.xgmi.hive_id, ret);
+ "XGMI: Failed to get hive id\n");
+ return ret;
+ }
+
+ hive = amdgpu_get_xgmi_hive(adev, 1);
+ if (!hive) {
+ ret = -EINVAL;
+ dev_err(adev->dev,
+ "XGMI: node 0x%llx, can not match hive 0x%llx in the hive list.\n",
+ adev->gmc.xgmi.node_id, adev->gmc.xgmi.hive_id);
goto exit;
}
- /* Each psp need to set the latest topology */
+
+ hive_topology = &hive->topology_info;
+
+ list_add_tail(&adev->gmc.xgmi.head, &hive->device_list);
+ list_for_each_entry(entry, &hive->device_list, head)
+ hive_topology->nodes[count++].node_id = entry->node_id;
+ hive->number_devices = count;
+
+ /* Each psp need to get the latest topology */
list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
- ret = psp_xgmi_set_topology_info(&tmp_adev->psp, count, tmp_topology);
+ ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count, hive_topology);
if (ret) {
dev_err(tmp_adev->dev,
- "XGMI: Set topology failure on device %llx, hive %llx, ret %d",
- tmp_adev->gmc.xgmi.device_id,
+ "XGMI: Get topology failure on device %llx, hive %llx, ret %d",
+ tmp_adev->gmc.xgmi.node_id,
tmp_adev->gmc.xgmi.hive_id, ret);
- /* To do : continue with some node failed or disable the whole hive */
+ /* To do : continue with some node failed or disable the whole hive */
break;
}
}
- if (!ret)
- dev_info(adev->dev, "XGMI: Add node %d to hive 0x%llx.\n",
- adev->gmc.xgmi.physical_node_id,
- adev->gmc.xgmi.hive_id);
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+ ret = amdgpu_xgmi_update_topology(hive, tmp_adev);
+ if (ret)
+ break;
+ }
+
+ dev_info(adev->dev, "XGMI: Add node %d, hive 0x%llx.\n",
+ adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id);
+
+ mutex_unlock(&hive->hive_lock);
exit:
- mutex_unlock(&xgmi_mutex);
return ret;
}
+void amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
+{
+ struct amdgpu_hive_info *hive;
+
+ if (!adev->gmc.xgmi.supported)
+ return;
+
+ hive = amdgpu_get_xgmi_hive(adev, 1);
+ if (!hive)
+ return;
+ if (!(hive->number_devices--)) {
+ mutex_destroy(&hive->hive_lock);
+ mutex_destroy(&hive->reset_lock);
+ } else {
+ mutex_unlock(&hive->hive_lock);
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
new file mode 100644
index 000000000000..14bc60664159
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __AMDGPU_XGMI_H__
+#define __AMDGPU_XGMI_H__
+
+#include "amdgpu_psp.h"
+
+struct amdgpu_hive_info {
+ uint64_t hive_id;
+ struct list_head device_list;
+ struct psp_xgmi_topology_info topology_info;
+ int number_devices;
+ struct mutex hive_lock,
+ reset_lock;
+};
+
+struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock);
+int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev);
+int amdgpu_xgmi_add_device(struct amdgpu_device *adev);
+void amdgpu_xgmi_remove_device(struct amdgpu_device *adev);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
index e9934de1b9cf..dd30f4e61a8c 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.c
+++ b/drivers/gpu/drm/amd/amdgpu/atom.c
@@ -27,6 +27,8 @@
#include <linux/slab.h>
#include <asm/unaligned.h>
+#include <drm/drm_util.h>
+
#define ATOM_DEBUG
#include "atom.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
deleted file mode 100644
index 79220a91abe3..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ /dev/null
@@ -1,6844 +0,0 @@
-/*
- * Copyright 2013 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/firmware.h>
-#include <drm/drmP.h>
-#include "amdgpu.h"
-#include "amdgpu_pm.h"
-#include "amdgpu_ucode.h"
-#include "cikd.h"
-#include "amdgpu_dpm.h"
-#include "ci_dpm.h"
-#include "gfx_v7_0.h"
-#include "atom.h"
-#include "amd_pcie.h"
-#include <linux/seq_file.h>
-
-#include "smu/smu_7_0_1_d.h"
-#include "smu/smu_7_0_1_sh_mask.h"
-
-#include "dce/dce_8_0_d.h"
-#include "dce/dce_8_0_sh_mask.h"
-
-#include "bif/bif_4_1_d.h"
-#include "bif/bif_4_1_sh_mask.h"
-
-#include "gca/gfx_7_2_d.h"
-#include "gca/gfx_7_2_sh_mask.h"
-
-#include "gmc/gmc_7_1_d.h"
-#include "gmc/gmc_7_1_sh_mask.h"
-
-MODULE_FIRMWARE("amdgpu/bonaire_smc.bin");
-MODULE_FIRMWARE("amdgpu/bonaire_k_smc.bin");
-MODULE_FIRMWARE("amdgpu/hawaii_smc.bin");
-MODULE_FIRMWARE("amdgpu/hawaii_k_smc.bin");
-
-#define MC_CG_ARB_FREQ_F0 0x0a
-#define MC_CG_ARB_FREQ_F1 0x0b
-#define MC_CG_ARB_FREQ_F2 0x0c
-#define MC_CG_ARB_FREQ_F3 0x0d
-
-#define SMC_RAM_END 0x40000
-
-#define VOLTAGE_SCALE 4
-#define VOLTAGE_VID_OFFSET_SCALE1 625
-#define VOLTAGE_VID_OFFSET_SCALE2 100
-
-static const struct amd_pm_funcs ci_dpm_funcs;
-
-static const struct ci_pt_defaults defaults_hawaii_xt =
-{
- 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
- { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
- { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
-};
-
-static const struct ci_pt_defaults defaults_hawaii_pro =
-{
- 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
- { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
- { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
-};
-
-static const struct ci_pt_defaults defaults_bonaire_xt =
-{
- 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
- { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 },
- { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
-};
-
-#if 0
-static const struct ci_pt_defaults defaults_bonaire_pro =
-{
- 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
- { 0x8C, 0x23F, 0x244, 0xA6, 0x83, 0x85, 0x86, 0x86, 0x83, 0xDB, 0xDB, 0xDA, 0x67, 0x60, 0x5F },
- { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
-};
-#endif
-
-static const struct ci_pt_defaults defaults_saturn_xt =
-{
- 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
- { 0x8C, 0x247, 0x249, 0xA6, 0x80, 0x81, 0x8B, 0x89, 0x86, 0xC9, 0xCA, 0xC9, 0x4D, 0x4D, 0x4D },
- { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
-};
-
-#if 0
-static const struct ci_pt_defaults defaults_saturn_pro =
-{
- 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
- { 0x96, 0x21D, 0x23B, 0xA1, 0x85, 0x87, 0x83, 0x84, 0x81, 0xE6, 0xE6, 0xE6, 0x71, 0x6A, 0x6A },
- { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
-};
-#endif
-
-static const struct ci_pt_config_reg didt_config_ci[] =
-{
- { 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0xFFFFFFFF }
-};
-
-static u8 ci_get_memory_module_index(struct amdgpu_device *adev)
-{
- return (u8) ((RREG32(mmBIOS_SCRATCH_4) >> 16) & 0xff);
-}
-
-#define MC_CG_ARB_FREQ_F0 0x0a
-#define MC_CG_ARB_FREQ_F1 0x0b
-#define MC_CG_ARB_FREQ_F2 0x0c
-#define MC_CG_ARB_FREQ_F3 0x0d
-
-static int ci_copy_and_switch_arb_sets(struct amdgpu_device *adev,
- u32 arb_freq_src, u32 arb_freq_dest)
-{
- u32 mc_arb_dram_timing;
- u32 mc_arb_dram_timing2;
- u32 burst_time;
- u32 mc_cg_config;
-
- switch (arb_freq_src) {
- case MC_CG_ARB_FREQ_F0:
- mc_arb_dram_timing = RREG32(mmMC_ARB_DRAM_TIMING);
- mc_arb_dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2);
- burst_time = (RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE0_MASK) >>
- MC_ARB_BURST_TIME__STATE0__SHIFT;
- break;
- case MC_CG_ARB_FREQ_F1:
- mc_arb_dram_timing = RREG32(mmMC_ARB_DRAM_TIMING_1);
- mc_arb_dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2_1);
- burst_time = (RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE1_MASK) >>
- MC_ARB_BURST_TIME__STATE1__SHIFT;
- break;
- default:
- return -EINVAL;
- }
-
- switch (arb_freq_dest) {
- case MC_CG_ARB_FREQ_F0:
- WREG32(mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
- WREG32(mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
- WREG32_P(mmMC_ARB_BURST_TIME, (burst_time << MC_ARB_BURST_TIME__STATE0__SHIFT),
- ~MC_ARB_BURST_TIME__STATE0_MASK);
- break;
- case MC_CG_ARB_FREQ_F1:
- WREG32(mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
- WREG32(mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
- WREG32_P(mmMC_ARB_BURST_TIME, (burst_time << MC_ARB_BURST_TIME__STATE1__SHIFT),
- ~MC_ARB_BURST_TIME__STATE1_MASK);
- break;
- default:
- return -EINVAL;
- }
-
- mc_cg_config = RREG32(mmMC_CG_CONFIG) | 0x0000000F;
- WREG32(mmMC_CG_CONFIG, mc_cg_config);
- WREG32_P(mmMC_ARB_CG, (arb_freq_dest) << MC_ARB_CG__CG_ARB_REQ__SHIFT,
- ~MC_ARB_CG__CG_ARB_REQ_MASK);
-
- return 0;
-}
-
-static u8 ci_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
-{
- u8 mc_para_index;
-
- if (memory_clock < 10000)
- mc_para_index = 0;
- else if (memory_clock >= 80000)
- mc_para_index = 0x0f;
- else
- mc_para_index = (u8)((memory_clock - 10000) / 5000 + 1);
- return mc_para_index;
-}
-
-static u8 ci_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode)
-{
- u8 mc_para_index;
-
- if (strobe_mode) {
- if (memory_clock < 12500)
- mc_para_index = 0x00;
- else if (memory_clock > 47500)
- mc_para_index = 0x0f;
- else
- mc_para_index = (u8)((memory_clock - 10000) / 2500);
- } else {
- if (memory_clock < 65000)
- mc_para_index = 0x00;
- else if (memory_clock > 135000)
- mc_para_index = 0x0f;
- else
- mc_para_index = (u8)((memory_clock - 60000) / 5000);
- }
- return mc_para_index;
-}
-
-static void ci_trim_voltage_table_to_fit_state_table(struct amdgpu_device *adev,
- u32 max_voltage_steps,
- struct atom_voltage_table *voltage_table)
-{
- unsigned int i, diff;
-
- if (voltage_table->count <= max_voltage_steps)
- return;
-
- diff = voltage_table->count - max_voltage_steps;
-
- for (i = 0; i < max_voltage_steps; i++)
- voltage_table->entries[i] = voltage_table->entries[i + diff];
-
- voltage_table->count = max_voltage_steps;
-}
-
-static int ci_get_std_voltage_value_sidd(struct amdgpu_device *adev,
- struct atom_voltage_table_entry *voltage_table,
- u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd);
-static int ci_set_power_limit(struct amdgpu_device *adev, u32 n);
-static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev,
- u32 target_tdp);
-static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate);
-static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev);
-
-static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
- PPSMC_Msg msg, u32 parameter);
-static void ci_thermal_start_smc_fan_control(struct amdgpu_device *adev);
-static void ci_fan_ctrl_set_default_mode(struct amdgpu_device *adev);
-
-static struct ci_power_info *ci_get_pi(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = adev->pm.dpm.priv;
-
- return pi;
-}
-
-static struct ci_ps *ci_get_ps(struct amdgpu_ps *rps)
-{
- struct ci_ps *ps = rps->ps_priv;
-
- return ps;
-}
-
-static void ci_initialize_powertune_defaults(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
-
- switch (adev->pdev->device) {
- case 0x6649:
- case 0x6650:
- case 0x6651:
- case 0x6658:
- case 0x665C:
- case 0x665D:
- default:
- pi->powertune_defaults = &defaults_bonaire_xt;
- break;
- case 0x6640:
- case 0x6641:
- case 0x6646:
- case 0x6647:
- pi->powertune_defaults = &defaults_saturn_xt;
- break;
- case 0x67B8:
- case 0x67B0:
- pi->powertune_defaults = &defaults_hawaii_xt;
- break;
- case 0x67BA:
- case 0x67B1:
- pi->powertune_defaults = &defaults_hawaii_pro;
- break;
- case 0x67A0:
- case 0x67A1:
- case 0x67A2:
- case 0x67A8:
- case 0x67A9:
- case 0x67AA:
- case 0x67B9:
- case 0x67BE:
- pi->powertune_defaults = &defaults_bonaire_xt;
- break;
- }
-
- pi->dte_tj_offset = 0;
-
- pi->caps_power_containment = true;
- pi->caps_cac = false;
- pi->caps_sq_ramping = false;
- pi->caps_db_ramping = false;
- pi->caps_td_ramping = false;
- pi->caps_tcp_ramping = false;
-
- if (pi->caps_power_containment) {
- pi->caps_cac = true;
- if (adev->asic_type == CHIP_HAWAII)
- pi->enable_bapm_feature = false;
- else
- pi->enable_bapm_feature = true;
- pi->enable_tdc_limit_feature = true;
- pi->enable_pkg_pwr_tracking_feature = true;
- }
-}
-
-static u8 ci_convert_to_vid(u16 vddc)
-{
- return (6200 - (vddc * VOLTAGE_SCALE)) / 25;
-}
-
-static int ci_populate_bapm_vddc_vid_sidd(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
- u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
- u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2;
- u32 i;
-
- if (adev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL)
- return -EINVAL;
- if (adev->pm.dpm.dyn_state.cac_leakage_table.count > 8)
- return -EINVAL;
- if (adev->pm.dpm.dyn_state.cac_leakage_table.count !=
- adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count)
- return -EINVAL;
-
- for (i = 0; i < adev->pm.dpm.dyn_state.cac_leakage_table.count; i++) {
- if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
- lo_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1);
- hi_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2);
- hi2_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3);
- } else {
- lo_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc);
- hi_vid[i] = ci_convert_to_vid((u16)adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage);
- }
- }
- return 0;
-}
-
-static int ci_populate_vddc_vid(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- u8 *vid = pi->smc_powertune_table.VddCVid;
- u32 i;
-
- if (pi->vddc_voltage_table.count > 8)
- return -EINVAL;
-
- for (i = 0; i < pi->vddc_voltage_table.count; i++)
- vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value);
-
- return 0;
-}
-
-static int ci_populate_svi_load_line(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
-
- pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en;
- pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc;
- pi->smc_powertune_table.SviLoadLineTrimVddC = 3;
- pi->smc_powertune_table.SviLoadLineOffsetVddC = 0;
-
- return 0;
-}
-
-static int ci_populate_tdc_limit(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
- u16 tdc_limit;
-
- tdc_limit = adev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256;
- pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit);
- pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
- pt_defaults->tdc_vddc_throttle_release_limit_perc;
- pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt;
-
- return 0;
-}
-
-static int ci_populate_dw8(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
- int ret;
-
- ret = amdgpu_ci_read_smc_sram_dword(adev,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU7_Firmware_Header, PmFuseTable) +
- offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
- (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl,
- pi->sram_end);
- if (ret)
- return -EINVAL;
- else
- pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl;
-
- return 0;
-}
-
-static int ci_populate_fuzzy_fan(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
-
- if ((adev->pm.dpm.fan.fan_output_sensitivity & (1 << 15)) ||
- (adev->pm.dpm.fan.fan_output_sensitivity == 0))
- adev->pm.dpm.fan.fan_output_sensitivity =
- adev->pm.dpm.fan.default_fan_output_sensitivity;
-
- pi->smc_powertune_table.FuzzyFan_PwmSetDelta =
- cpu_to_be16(adev->pm.dpm.fan.fan_output_sensitivity);
-
- return 0;
-}
-
-static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
- u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
- int i, min, max;
-
- min = max = hi_vid[0];
- for (i = 0; i < 8; i++) {
- if (0 != hi_vid[i]) {
- if (min > hi_vid[i])
- min = hi_vid[i];
- if (max < hi_vid[i])
- max = hi_vid[i];
- }
-
- if (0 != lo_vid[i]) {
- if (min > lo_vid[i])
- min = lo_vid[i];
- if (max < lo_vid[i])
- max = lo_vid[i];
- }
- }
-
- if ((min == 0) || (max == 0))
- return -EINVAL;
- pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max;
- pi->smc_powertune_table.GnbLPMLMinVid = (u8)min;
-
- return 0;
-}
-
-static int ci_populate_bapm_vddc_base_leakage_sidd(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd;
- u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd;
- struct amdgpu_cac_tdp_table *cac_tdp_table =
- adev->pm.dpm.dyn_state.cac_tdp_table;
-
- hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256;
- lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256;
-
- pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd);
- pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd);
-
- return 0;
-}
-
-static int ci_populate_bapm_parameters_in_dpm_table(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
- SMU7_Discrete_DpmTable *dpm_table = &pi->smc_state_table;
- struct amdgpu_cac_tdp_table *cac_tdp_table =
- adev->pm.dpm.dyn_state.cac_tdp_table;
- struct amdgpu_ppm_table *ppm = adev->pm.dpm.dyn_state.ppm_table;
- int i, j, k;
- const u16 *def1;
- const u16 *def2;
-
- dpm_table->DefaultTdp = cac_tdp_table->tdp * 256;
- dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256;
-
- dpm_table->DTETjOffset = (u8)pi->dte_tj_offset;
- dpm_table->GpuTjMax =
- (u8)(pi->thermal_temp_setting.temperature_high / 1000);
- dpm_table->GpuTjHyst = 8;
-
- dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base;
-
- if (ppm) {
- dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000);
- dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256);
- } else {
- dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0);
- dpm_table->PPM_TemperatureLimit = cpu_to_be16(0);
- }
-
- dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient);
- def1 = pt_defaults->bapmti_r;
- def2 = pt_defaults->bapmti_rc;
-
- for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
- for (j = 0; j < SMU7_DTE_SOURCES; j++) {
- for (k = 0; k < SMU7_DTE_SINKS; k++) {
- dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1);
- dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2);
- def1++;
- def2++;
- }
- }
- }
-
- return 0;
-}
-
-static int ci_populate_pm_base(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- u32 pm_fuse_table_offset;
- int ret;
-
- if (pi->caps_power_containment) {
- ret = amdgpu_ci_read_smc_sram_dword(adev,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU7_Firmware_Header, PmFuseTable),
- &pm_fuse_table_offset, pi->sram_end);
- if (ret)
- return ret;
- ret = ci_populate_bapm_vddc_vid_sidd(adev);
- if (ret)
- return ret;
- ret = ci_populate_vddc_vid(adev);
- if (ret)
- return ret;
- ret = ci_populate_svi_load_line(adev);
- if (ret)
- return ret;
- ret = ci_populate_tdc_limit(adev);
- if (ret)
- return ret;
- ret = ci_populate_dw8(adev);
- if (ret)
- return ret;
- ret = ci_populate_fuzzy_fan(adev);
- if (ret)
- return ret;
- ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(adev);
- if (ret)
- return ret;
- ret = ci_populate_bapm_vddc_base_leakage_sidd(adev);
- if (ret)
- return ret;
- ret = amdgpu_ci_copy_bytes_to_smc(adev, pm_fuse_table_offset,
- (u8 *)&pi->smc_powertune_table,
- sizeof(SMU7_Discrete_PmFuses), pi->sram_end);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static void ci_do_enable_didt(struct amdgpu_device *adev, const bool enable)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- u32 data;
-
- if (pi->caps_sq_ramping) {
- data = RREG32_DIDT(ixDIDT_SQ_CTRL0);
- if (enable)
- data |= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
- else
- data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
- WREG32_DIDT(ixDIDT_SQ_CTRL0, data);
- }
-
- if (pi->caps_db_ramping) {
- data = RREG32_DIDT(ixDIDT_DB_CTRL0);
- if (enable)
- data |= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
- else
- data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
- WREG32_DIDT(ixDIDT_DB_CTRL0, data);
- }
-
- if (pi->caps_td_ramping) {
- data = RREG32_DIDT(ixDIDT_TD_CTRL0);
- if (enable)
- data |= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
- else
- data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
- WREG32_DIDT(ixDIDT_TD_CTRL0, data);
- }
-
- if (pi->caps_tcp_ramping) {
- data = RREG32_DIDT(ixDIDT_TCP_CTRL0);
- if (enable)
- data |= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
- else
- data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
- WREG32_DIDT(ixDIDT_TCP_CTRL0, data);
- }
-}
-
-static int ci_program_pt_config_registers(struct amdgpu_device *adev,
- const struct ci_pt_config_reg *cac_config_regs)
-{
- const struct ci_pt_config_reg *config_regs = cac_config_regs;
- u32 data;
- u32 cache = 0;
-
- if (config_regs == NULL)
- return -EINVAL;
-
- while (config_regs->offset != 0xFFFFFFFF) {
- if (config_regs->type == CISLANDS_CONFIGREG_CACHE) {
- cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
- } else {
- switch (config_regs->type) {
- case CISLANDS_CONFIGREG_SMC_IND:
- data = RREG32_SMC(config_regs->offset);
- break;
- case CISLANDS_CONFIGREG_DIDT_IND:
- data = RREG32_DIDT(config_regs->offset);
- break;
- default:
- data = RREG32(config_regs->offset);
- break;
- }
-
- data &= ~config_regs->mask;
- data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
- data |= cache;
-
- switch (config_regs->type) {
- case CISLANDS_CONFIGREG_SMC_IND:
- WREG32_SMC(config_regs->offset, data);
- break;
- case CISLANDS_CONFIGREG_DIDT_IND:
- WREG32_DIDT(config_regs->offset, data);
- break;
- default:
- WREG32(config_regs->offset, data);
- break;
- }
- cache = 0;
- }
- config_regs++;
- }
- return 0;
-}
-
-static int ci_enable_didt(struct amdgpu_device *adev, bool enable)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- int ret;
-
- if (pi->caps_sq_ramping || pi->caps_db_ramping ||
- pi->caps_td_ramping || pi->caps_tcp_ramping) {
- adev->gfx.rlc.funcs->enter_safe_mode(adev);
-
- if (enable) {
- ret = ci_program_pt_config_registers(adev, didt_config_ci);
- if (ret) {
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
- return ret;
- }
- }
-
- ci_do_enable_didt(adev, enable);
-
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
- }
-
- return 0;
-}
-
-static int ci_enable_power_containment(struct amdgpu_device *adev, bool enable)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- PPSMC_Result smc_result;
- int ret = 0;
-
- if (enable) {
- pi->power_containment_features = 0;
- if (pi->caps_power_containment) {
- if (pi->enable_bapm_feature) {
- smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableDTE);
- if (smc_result != PPSMC_Result_OK)
- ret = -EINVAL;
- else
- pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
- }
-
- if (pi->enable_tdc_limit_feature) {
- smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_TDCLimitEnable);
- if (smc_result != PPSMC_Result_OK)
- ret = -EINVAL;
- else
- pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit;
- }
-
- if (pi->enable_pkg_pwr_tracking_feature) {
- smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PkgPwrLimitEnable);
- if (smc_result != PPSMC_Result_OK) {
- ret = -EINVAL;
- } else {
- struct amdgpu_cac_tdp_table *cac_tdp_table =
- adev->pm.dpm.dyn_state.cac_tdp_table;
- u32 default_pwr_limit =
- (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
-
- pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit;
-
- ci_set_power_limit(adev, default_pwr_limit);
- }
- }
- }
- } else {
- if (pi->caps_power_containment && pi->power_containment_features) {
- if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit)
- amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_TDCLimitDisable);
-
- if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)
- amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableDTE);
-
- if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit)
- amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PkgPwrLimitDisable);
- pi->power_containment_features = 0;
- }
- }
-
- return ret;
-}
-
-static int ci_enable_smc_cac(struct amdgpu_device *adev, bool enable)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- PPSMC_Result smc_result;
- int ret = 0;
-
- if (pi->caps_cac) {
- if (enable) {
- smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableCac);
- if (smc_result != PPSMC_Result_OK) {
- ret = -EINVAL;
- pi->cac_enabled = false;
- } else {
- pi->cac_enabled = true;
- }
- } else if (pi->cac_enabled) {
- amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableCac);
- pi->cac_enabled = false;
- }
- }
-
- return ret;
-}
-
-static int ci_enable_thermal_based_sclk_dpm(struct amdgpu_device *adev,
- bool enable)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- PPSMC_Result smc_result = PPSMC_Result_OK;
-
- if (pi->thermal_sclk_dpm_enabled) {
- if (enable)
- smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_ENABLE_THERMAL_DPM);
- else
- smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DISABLE_THERMAL_DPM);
- }
-
- if (smc_result == PPSMC_Result_OK)
- return 0;
- else
- return -EINVAL;
-}
-
-static int ci_power_control_set_level(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct amdgpu_cac_tdp_table *cac_tdp_table =
- adev->pm.dpm.dyn_state.cac_tdp_table;
- s32 adjust_percent;
- s32 target_tdp;
- int ret = 0;
- bool adjust_polarity = false; /* ??? */
-
- if (pi->caps_power_containment) {
- adjust_percent = adjust_polarity ?
- adev->pm.dpm.tdp_adjustment : (-1 * adev->pm.dpm.tdp_adjustment);
- target_tdp = ((100 + adjust_percent) *
- (s32)cac_tdp_table->configurable_tdp) / 100;
-
- ret = ci_set_overdrive_target_tdp(adev, (u32)target_tdp);
- }
-
- return ret;
-}
-
-static void ci_dpm_powergate_uvd(void *handle, bool gate)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct ci_power_info *pi = ci_get_pi(adev);
-
- pi->uvd_power_gated = gate;
-
- if (gate) {
- /* stop the UVD block */
- amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
- AMD_PG_STATE_GATE);
- ci_update_uvd_dpm(adev, gate);
- } else {
- amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
- AMD_PG_STATE_UNGATE);
- ci_update_uvd_dpm(adev, gate);
- }
-}
-
-static bool ci_dpm_vblank_too_short(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
- u32 switch_limit = adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300;
-
- /* disable mclk switching if the refresh is >120Hz, even if the
- * blanking period would allow it
- */
- if (amdgpu_dpm_get_vrefresh(adev) > 120)
- return true;
-
- if (vblank_time < switch_limit)
- return true;
- else
- return false;
-
-}
-
-static void ci_apply_state_adjust_rules(struct amdgpu_device *adev,
- struct amdgpu_ps *rps)
-{
- struct ci_ps *ps = ci_get_ps(rps);
- struct ci_power_info *pi = ci_get_pi(adev);
- struct amdgpu_clock_and_voltage_limits *max_limits;
- bool disable_mclk_switching;
- u32 sclk, mclk;
- int i;
-
- if (rps->vce_active) {
- rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
- rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk;
- } else {
- rps->evclk = 0;
- rps->ecclk = 0;
- }
-
- if ((adev->pm.dpm.new_active_crtc_count > 1) ||
- ci_dpm_vblank_too_short(adev))
- disable_mclk_switching = true;
- else
- disable_mclk_switching = false;
-
- if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
- pi->battery_state = true;
- else
- pi->battery_state = false;
-
- if (adev->pm.ac_power)
- max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
- else
- max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
-
- if (adev->pm.ac_power == false) {
- for (i = 0; i < ps->performance_level_count; i++) {
- if (ps->performance_levels[i].mclk > max_limits->mclk)
- ps->performance_levels[i].mclk = max_limits->mclk;
- if (ps->performance_levels[i].sclk > max_limits->sclk)
- ps->performance_levels[i].sclk = max_limits->sclk;
- }
- }
-
- /* XXX validate the min clocks required for display */
-
- if (disable_mclk_switching) {
- mclk = ps->performance_levels[ps->performance_level_count - 1].mclk;
- sclk = ps->performance_levels[0].sclk;
- } else {
- mclk = ps->performance_levels[0].mclk;
- sclk = ps->performance_levels[0].sclk;
- }
-
- if (adev->pm.pm_display_cfg.min_core_set_clock > sclk)
- sclk = adev->pm.pm_display_cfg.min_core_set_clock;
-
- if (adev->pm.pm_display_cfg.min_mem_set_clock > mclk)
- mclk = adev->pm.pm_display_cfg.min_mem_set_clock;
-
- if (rps->vce_active) {
- if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk)
- sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk;
- if (mclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk)
- mclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk;
- }
-
- ps->performance_levels[0].sclk = sclk;
- ps->performance_levels[0].mclk = mclk;
-
- if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk)
- ps->performance_levels[1].sclk = ps->performance_levels[0].sclk;
-
- if (disable_mclk_switching) {
- if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk)
- ps->performance_levels[0].mclk = ps->performance_levels[1].mclk;
- } else {
- if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk)
- ps->performance_levels[1].mclk = ps->performance_levels[0].mclk;
- }
-}
-
-static int ci_thermal_set_temperature_range(struct amdgpu_device *adev,
- int min_temp, int max_temp)
-{
- int low_temp = 0 * 1000;
- int high_temp = 255 * 1000;
- u32 tmp;
-
- if (low_temp < min_temp)
- low_temp = min_temp;
- if (high_temp > max_temp)
- high_temp = max_temp;
- if (high_temp < low_temp) {
- DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
- return -EINVAL;
- }
-
- tmp = RREG32_SMC(ixCG_THERMAL_INT);
- tmp &= ~(CG_THERMAL_INT__DIG_THERM_INTH_MASK | CG_THERMAL_INT__DIG_THERM_INTL_MASK);
- tmp |= ((high_temp / 1000) << CG_THERMAL_INT__DIG_THERM_INTH__SHIFT) |
- ((low_temp / 1000)) << CG_THERMAL_INT__DIG_THERM_INTL__SHIFT;
- WREG32_SMC(ixCG_THERMAL_INT, tmp);
-
-#if 0
- /* XXX: need to figure out how to handle this properly */
- tmp = RREG32_SMC(ixCG_THERMAL_CTRL);
- tmp &= DIG_THERM_DPM_MASK;
- tmp |= DIG_THERM_DPM(high_temp / 1000);
- WREG32_SMC(ixCG_THERMAL_CTRL, tmp);
-#endif
-
- adev->pm.dpm.thermal.min_temp = low_temp;
- adev->pm.dpm.thermal.max_temp = high_temp;
- return 0;
-}
-
-static int ci_thermal_enable_alert(struct amdgpu_device *adev,
- bool enable)
-{
- u32 thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
- PPSMC_Result result;
-
- if (enable) {
- thermal_int &= ~(CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK |
- CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK);
- WREG32_SMC(ixCG_THERMAL_INT, thermal_int);
- result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Thermal_Cntl_Enable);
- if (result != PPSMC_Result_OK) {
- DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
- return -EINVAL;
- }
- } else {
- thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK |
- CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
- WREG32_SMC(ixCG_THERMAL_INT, thermal_int);
- result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Thermal_Cntl_Disable);
- if (result != PPSMC_Result_OK) {
- DRM_DEBUG_KMS("Could not disable thermal interrupts.\n");
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-static void ci_fan_ctrl_set_static_mode(struct amdgpu_device *adev, u32 mode)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- u32 tmp;
-
- if (pi->fan_ctrl_is_in_default_mode) {
- tmp = (RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK)
- >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
- pi->fan_ctrl_default_mode = tmp;
- tmp = (RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__TMIN_MASK)
- >> CG_FDO_CTRL2__TMIN__SHIFT;
- pi->t_min = tmp;
- pi->fan_ctrl_is_in_default_mode = false;
- }
-
- tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
- tmp |= 0 << CG_FDO_CTRL2__TMIN__SHIFT;
- WREG32_SMC(ixCG_FDO_CTRL2, tmp);
-
- tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
- tmp |= mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
- WREG32_SMC(ixCG_FDO_CTRL2, tmp);
-}
-
-static int ci_thermal_setup_fan_table(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
- u32 duty100;
- u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
- u16 fdo_min, slope1, slope2;
- u32 reference_clock, tmp;
- int ret;
- u64 tmp64;
-
- if (!pi->fan_table_start) {
- adev->pm.dpm.fan.ucode_fan_control = false;
- return 0;
- }
-
- duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
- >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
-
- if (duty100 == 0) {
- adev->pm.dpm.fan.ucode_fan_control = false;
- return 0;
- }
-
- tmp64 = (u64)adev->pm.dpm.fan.pwm_min * duty100;
- do_div(tmp64, 10000);
- fdo_min = (u16)tmp64;
-
- t_diff1 = adev->pm.dpm.fan.t_med - adev->pm.dpm.fan.t_min;
- t_diff2 = adev->pm.dpm.fan.t_high - adev->pm.dpm.fan.t_med;
-
- pwm_diff1 = adev->pm.dpm.fan.pwm_med - adev->pm.dpm.fan.pwm_min;
- pwm_diff2 = adev->pm.dpm.fan.pwm_high - adev->pm.dpm.fan.pwm_med;
-
- slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
- slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
-
- fan_table.TempMin = cpu_to_be16((50 + adev->pm.dpm.fan.t_min) / 100);
- fan_table.TempMed = cpu_to_be16((50 + adev->pm.dpm.fan.t_med) / 100);
- fan_table.TempMax = cpu_to_be16((50 + adev->pm.dpm.fan.t_max) / 100);
-
- fan_table.Slope1 = cpu_to_be16(slope1);
- fan_table.Slope2 = cpu_to_be16(slope2);
-
- fan_table.FdoMin = cpu_to_be16(fdo_min);
-
- fan_table.HystDown = cpu_to_be16(adev->pm.dpm.fan.t_hyst);
-
- fan_table.HystUp = cpu_to_be16(1);
-
- fan_table.HystSlope = cpu_to_be16(1);
-
- fan_table.TempRespLim = cpu_to_be16(5);
-
- reference_clock = amdgpu_asic_get_xclk(adev);
-
- fan_table.RefreshPeriod = cpu_to_be32((adev->pm.dpm.fan.cycle_delay *
- reference_clock) / 1600);
-
- fan_table.FdoMax = cpu_to_be16((u16)duty100);
-
- tmp = (RREG32_SMC(ixCG_MULT_THERMAL_CTRL) & CG_MULT_THERMAL_CTRL__TEMP_SEL_MASK)
- >> CG_MULT_THERMAL_CTRL__TEMP_SEL__SHIFT;
- fan_table.TempSrc = (uint8_t)tmp;
-
- ret = amdgpu_ci_copy_bytes_to_smc(adev,
- pi->fan_table_start,
- (u8 *)(&fan_table),
- sizeof(fan_table),
- pi->sram_end);
-
- if (ret) {
- DRM_ERROR("Failed to load fan table to the SMC.");
- adev->pm.dpm.fan.ucode_fan_control = false;
- }
-
- return 0;
-}
-
-static int ci_fan_ctrl_start_smc_fan_control(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- PPSMC_Result ret;
-
- if (pi->caps_od_fuzzy_fan_control_support) {
- ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
- PPSMC_StartFanControl,
- FAN_CONTROL_FUZZY);
- if (ret != PPSMC_Result_OK)
- return -EINVAL;
- ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_SetFanPwmMax,
- adev->pm.dpm.fan.default_max_fan_pwm);
- if (ret != PPSMC_Result_OK)
- return -EINVAL;
- } else {
- ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
- PPSMC_StartFanControl,
- FAN_CONTROL_TABLE);
- if (ret != PPSMC_Result_OK)
- return -EINVAL;
- }
-
- pi->fan_is_controlled_by_smc = true;
- return 0;
-}
-
-
-static int ci_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev)
-{
- PPSMC_Result ret;
- struct ci_power_info *pi = ci_get_pi(adev);
-
- ret = amdgpu_ci_send_msg_to_smc(adev, PPSMC_StopFanControl);
- if (ret == PPSMC_Result_OK) {
- pi->fan_is_controlled_by_smc = false;
- return 0;
- } else {
- return -EINVAL;
- }
-}
-
-static int ci_dpm_get_fan_speed_percent(void *handle,
- u32 *speed)
-{
- u32 duty, duty100;
- u64 tmp64;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->pm.no_fan)
- return -ENOENT;
-
- duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
- >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
- duty = (RREG32_SMC(ixCG_THERMAL_STATUS) & CG_THERMAL_STATUS__FDO_PWM_DUTY_MASK)
- >> CG_THERMAL_STATUS__FDO_PWM_DUTY__SHIFT;
-
- if (duty100 == 0)
- return -EINVAL;
-
- tmp64 = (u64)duty * 100;
- do_div(tmp64, duty100);
- *speed = (u32)tmp64;
-
- if (*speed > 100)
- *speed = 100;
-
- return 0;
-}
-
-static int ci_dpm_set_fan_speed_percent(void *handle,
- u32 speed)
-{
- u32 tmp;
- u32 duty, duty100;
- u64 tmp64;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct ci_power_info *pi = ci_get_pi(adev);
-
- if (adev->pm.no_fan)
- return -ENOENT;
-
- if (pi->fan_is_controlled_by_smc)
- return -EINVAL;
-
- if (speed > 100)
- return -EINVAL;
-
- duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
- >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
-
- if (duty100 == 0)
- return -EINVAL;
-
- tmp64 = (u64)speed * duty100;
- do_div(tmp64, 100);
- duty = (u32)tmp64;
-
- tmp = RREG32_SMC(ixCG_FDO_CTRL0) & ~CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK;
- tmp |= duty << CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT;
- WREG32_SMC(ixCG_FDO_CTRL0, tmp);
-
- return 0;
-}
-
-static void ci_dpm_set_fan_control_mode(void *handle, u32 mode)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- switch (mode) {
- case AMD_FAN_CTRL_NONE:
- if (adev->pm.dpm.fan.ucode_fan_control)
- ci_fan_ctrl_stop_smc_fan_control(adev);
- ci_dpm_set_fan_speed_percent(adev, 100);
- break;
- case AMD_FAN_CTRL_MANUAL:
- if (adev->pm.dpm.fan.ucode_fan_control)
- ci_fan_ctrl_stop_smc_fan_control(adev);
- break;
- case AMD_FAN_CTRL_AUTO:
- if (adev->pm.dpm.fan.ucode_fan_control)
- ci_thermal_start_smc_fan_control(adev);
- break;
- default:
- break;
- }
-}
-
-static u32 ci_dpm_get_fan_control_mode(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct ci_power_info *pi = ci_get_pi(adev);
-
- if (pi->fan_is_controlled_by_smc)
- return AMD_FAN_CTRL_AUTO;
- else
- return AMD_FAN_CTRL_MANUAL;
-}
-
-#if 0
-static int ci_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device *adev,
- u32 *speed)
-{
- u32 tach_period;
- u32 xclk = amdgpu_asic_get_xclk(adev);
-
- if (adev->pm.no_fan)
- return -ENOENT;
-
- if (adev->pm.fan_pulses_per_revolution == 0)
- return -ENOENT;
-
- tach_period = (RREG32_SMC(ixCG_TACH_STATUS) & CG_TACH_STATUS__TACH_PERIOD_MASK)
- >> CG_TACH_STATUS__TACH_PERIOD__SHIFT;
- if (tach_period == 0)
- return -ENOENT;
-
- *speed = 60 * xclk * 10000 / tach_period;
-
- return 0;
-}
-
-static int ci_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device *adev,
- u32 speed)
-{
- u32 tach_period, tmp;
- u32 xclk = amdgpu_asic_get_xclk(adev);
-
- if (adev->pm.no_fan)
- return -ENOENT;
-
- if (adev->pm.fan_pulses_per_revolution == 0)
- return -ENOENT;
-
- if ((speed < adev->pm.fan_min_rpm) ||
- (speed > adev->pm.fan_max_rpm))
- return -EINVAL;
-
- if (adev->pm.dpm.fan.ucode_fan_control)
- ci_fan_ctrl_stop_smc_fan_control(adev);
-
- tach_period = 60 * xclk * 10000 / (8 * speed);
- tmp = RREG32_SMC(ixCG_TACH_CTRL) & ~CG_TACH_CTRL__TARGET_PERIOD_MASK;
- tmp |= tach_period << CG_TACH_CTRL__TARGET_PERIOD__SHIFT;
- WREG32_SMC(CG_TACH_CTRL, tmp);
-
- ci_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC_RPM);
-
- return 0;
-}
-#endif
-
-static void ci_fan_ctrl_set_default_mode(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- u32 tmp;
-
- if (!pi->fan_ctrl_is_in_default_mode) {
- tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
- tmp |= pi->fan_ctrl_default_mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
- WREG32_SMC(ixCG_FDO_CTRL2, tmp);
-
- tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
- tmp |= pi->t_min << CG_FDO_CTRL2__TMIN__SHIFT;
- WREG32_SMC(ixCG_FDO_CTRL2, tmp);
- pi->fan_ctrl_is_in_default_mode = true;
- }
-}
-
-static void ci_thermal_start_smc_fan_control(struct amdgpu_device *adev)
-{
- if (adev->pm.dpm.fan.ucode_fan_control) {
- ci_fan_ctrl_start_smc_fan_control(adev);
- ci_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC);
- }
-}
-
-static void ci_thermal_initialize(struct amdgpu_device *adev)
-{
- u32 tmp;
-
- if (adev->pm.fan_pulses_per_revolution) {
- tmp = RREG32_SMC(ixCG_TACH_CTRL) & ~CG_TACH_CTRL__EDGE_PER_REV_MASK;
- tmp |= (adev->pm.fan_pulses_per_revolution - 1)
- << CG_TACH_CTRL__EDGE_PER_REV__SHIFT;
- WREG32_SMC(ixCG_TACH_CTRL, tmp);
- }
-
- tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK;
- tmp |= 0x28 << CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT;
- WREG32_SMC(ixCG_FDO_CTRL2, tmp);
-}
-
-static int ci_thermal_start_thermal_controller(struct amdgpu_device *adev)
-{
- int ret;
-
- ci_thermal_initialize(adev);
- ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN, CISLANDS_TEMP_RANGE_MAX);
- if (ret)
- return ret;
- ret = ci_thermal_enable_alert(adev, true);
- if (ret)
- return ret;
- if (adev->pm.dpm.fan.ucode_fan_control) {
- ret = ci_thermal_setup_fan_table(adev);
- if (ret)
- return ret;
- ci_thermal_start_smc_fan_control(adev);
- }
-
- return 0;
-}
-
-static void ci_thermal_stop_thermal_controller(struct amdgpu_device *adev)
-{
- if (!adev->pm.no_fan)
- ci_fan_ctrl_set_default_mode(adev);
-}
-
-static int ci_read_smc_soft_register(struct amdgpu_device *adev,
- u16 reg_offset, u32 *value)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
-
- return amdgpu_ci_read_smc_sram_dword(adev,
- pi->soft_regs_start + reg_offset,
- value, pi->sram_end);
-}
-
-static int ci_write_smc_soft_register(struct amdgpu_device *adev,
- u16 reg_offset, u32 value)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
-
- return amdgpu_ci_write_smc_sram_dword(adev,
- pi->soft_regs_start + reg_offset,
- value, pi->sram_end);
-}
-
-static void ci_init_fps_limits(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
-
- if (pi->caps_fps) {
- u16 tmp;
-
- tmp = 45;
- table->FpsHighT = cpu_to_be16(tmp);
-
- tmp = 30;
- table->FpsLowT = cpu_to_be16(tmp);
- }
-}
-
-static int ci_update_sclk_t(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- int ret = 0;
- u32 low_sclk_interrupt_t = 0;
-
- if (pi->caps_sclk_throttle_low_notification) {
- low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
-
- ret = amdgpu_ci_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT),
- (u8 *)&low_sclk_interrupt_t,
- sizeof(u32), pi->sram_end);
-
- }
-
- return ret;
-}
-
-static void ci_get_leakage_voltages(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- u16 leakage_id, virtual_voltage_id;
- u16 vddc, vddci;
- int i;
-
- pi->vddc_leakage.count = 0;
- pi->vddci_leakage.count = 0;
-
- if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
- for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
- virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
- if (amdgpu_atombios_get_voltage_evv(adev, virtual_voltage_id, &vddc) != 0)
- continue;
- if (vddc != 0 && vddc != virtual_voltage_id) {
- pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
- pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
- pi->vddc_leakage.count++;
- }
- }
- } else if (amdgpu_atombios_get_leakage_id_from_vbios(adev, &leakage_id) == 0) {
- for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
- virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
- if (amdgpu_atombios_get_leakage_vddc_based_on_leakage_params(adev, &vddc, &vddci,
- virtual_voltage_id,
- leakage_id) == 0) {
- if (vddc != 0 && vddc != virtual_voltage_id) {
- pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
- pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
- pi->vddc_leakage.count++;
- }
- if (vddci != 0 && vddci != virtual_voltage_id) {
- pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci;
- pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id;
- pi->vddci_leakage.count++;
- }
- }
- }
- }
-}
-
-static void ci_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- bool want_thermal_protection;
- enum amdgpu_dpm_event_src dpm_event_src;
- u32 tmp;
-
- switch (sources) {
- case 0:
- default:
- want_thermal_protection = false;
- break;
- case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL):
- want_thermal_protection = true;
- dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGITAL;
- break;
- case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
- want_thermal_protection = true;
- dpm_event_src = AMDGPU_DPM_EVENT_SRC_EXTERNAL;
- break;
- case ((1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
- (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL)):
- want_thermal_protection = true;
- dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
- break;
- }
-
- if (want_thermal_protection) {
-#if 0
- /* XXX: need to figure out how to handle this properly */
- tmp = RREG32_SMC(ixCG_THERMAL_CTRL);
- tmp &= DPM_EVENT_SRC_MASK;
- tmp |= DPM_EVENT_SRC(dpm_event_src);
- WREG32_SMC(ixCG_THERMAL_CTRL, tmp);
-#endif
-
- tmp = RREG32_SMC(ixGENERAL_PWRMGT);
- if (pi->thermal_protection)
- tmp &= ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
- else
- tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
- WREG32_SMC(ixGENERAL_PWRMGT, tmp);
- } else {
- tmp = RREG32_SMC(ixGENERAL_PWRMGT);
- tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
- WREG32_SMC(ixGENERAL_PWRMGT, tmp);
- }
-}
-
-static void ci_enable_auto_throttle_source(struct amdgpu_device *adev,
- enum amdgpu_dpm_auto_throttle_src source,
- bool enable)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
-
- if (enable) {
- if (!(pi->active_auto_throttle_sources & (1 << source))) {
- pi->active_auto_throttle_sources |= 1 << source;
- ci_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
- }
- } else {
- if (pi->active_auto_throttle_sources & (1 << source)) {
- pi->active_auto_throttle_sources &= ~(1 << source);
- ci_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
- }
- }
-}
-
-static void ci_enable_vr_hot_gpio_interrupt(struct amdgpu_device *adev)
-{
- if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
- amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableVRHotGPIOInterrupt);
-}
-
-static int ci_unfreeze_sclk_mclk_dpm(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- PPSMC_Result smc_result;
-
- if (!pi->need_update_smu7_dpm_table)
- return 0;
-
- if ((!pi->sclk_dpm_key_disabled) &&
- (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
- smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
- if (smc_result != PPSMC_Result_OK)
- return -EINVAL;
- }
-
- if ((!pi->mclk_dpm_key_disabled) &&
- (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
- smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
- if (smc_result != PPSMC_Result_OK)
- return -EINVAL;
- }
-
- pi->need_update_smu7_dpm_table = 0;
- return 0;
-}
-
-static int ci_enable_sclk_mclk_dpm(struct amdgpu_device *adev, bool enable)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- PPSMC_Result smc_result;
-
- if (enable) {
- if (!pi->sclk_dpm_key_disabled) {
- smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DPM_Enable);
- if (smc_result != PPSMC_Result_OK)
- return -EINVAL;
- }
-
- if (!pi->mclk_dpm_key_disabled) {
- smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_Enable);
- if (smc_result != PPSMC_Result_OK)
- return -EINVAL;
-
- WREG32_P(mmMC_SEQ_CNTL_3, MC_SEQ_CNTL_3__CAC_EN_MASK,
- ~MC_SEQ_CNTL_3__CAC_EN_MASK);
-
- WREG32_SMC(ixLCAC_MC0_CNTL, 0x05);
- WREG32_SMC(ixLCAC_MC1_CNTL, 0x05);
- WREG32_SMC(ixLCAC_CPL_CNTL, 0x100005);
-
- udelay(10);
-
- WREG32_SMC(ixLCAC_MC0_CNTL, 0x400005);
- WREG32_SMC(ixLCAC_MC1_CNTL, 0x400005);
- WREG32_SMC(ixLCAC_CPL_CNTL, 0x500005);
- }
- } else {
- if (!pi->sclk_dpm_key_disabled) {
- smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DPM_Disable);
- if (smc_result != PPSMC_Result_OK)
- return -EINVAL;
- }
-
- if (!pi->mclk_dpm_key_disabled) {
- smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_Disable);
- if (smc_result != PPSMC_Result_OK)
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-static int ci_start_dpm(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- PPSMC_Result smc_result;
- int ret;
- u32 tmp;
-
- tmp = RREG32_SMC(ixGENERAL_PWRMGT);
- tmp |= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;
- WREG32_SMC(ixGENERAL_PWRMGT, tmp);
-
- tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
- tmp |= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;
- WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
-
- ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000);
-
- WREG32_P(mmBIF_LNCNT_RESET, 0, ~BIF_LNCNT_RESET__RESET_LNCNT_EN_MASK);
-
- smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Voltage_Cntl_Enable);
- if (smc_result != PPSMC_Result_OK)
- return -EINVAL;
-
- ret = ci_enable_sclk_mclk_dpm(adev, true);
- if (ret)
- return ret;
-
- if (!pi->pcie_dpm_key_disabled) {
- smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_Enable);
- if (smc_result != PPSMC_Result_OK)
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int ci_freeze_sclk_mclk_dpm(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- PPSMC_Result smc_result;
-
- if (!pi->need_update_smu7_dpm_table)
- return 0;
-
- if ((!pi->sclk_dpm_key_disabled) &&
- (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
- smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_SCLKDPM_FreezeLevel);
- if (smc_result != PPSMC_Result_OK)
- return -EINVAL;
- }
-
- if ((!pi->mclk_dpm_key_disabled) &&
- (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
- smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_FreezeLevel);
- if (smc_result != PPSMC_Result_OK)
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int ci_stop_dpm(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- PPSMC_Result smc_result;
- int ret;
- u32 tmp;
-
- tmp = RREG32_SMC(ixGENERAL_PWRMGT);
- tmp &= ~GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;
- WREG32_SMC(ixGENERAL_PWRMGT, tmp);
-
- tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
- tmp &= ~SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;
- WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
-
- if (!pi->pcie_dpm_key_disabled) {
- smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_Disable);
- if (smc_result != PPSMC_Result_OK)
- return -EINVAL;
- }
-
- ret = ci_enable_sclk_mclk_dpm(adev, false);
- if (ret)
- return ret;
-
- smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Voltage_Cntl_Disable);
- if (smc_result != PPSMC_Result_OK)
- return -EINVAL;
-
- return 0;
-}
-
-static void ci_enable_sclk_control(struct amdgpu_device *adev, bool enable)
-{
- u32 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
-
- if (enable)
- tmp &= ~SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK;
- else
- tmp |= SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK;
- WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
-}
-
-#if 0
-static int ci_notify_hw_of_power_source(struct amdgpu_device *adev,
- bool ac_power)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct amdgpu_cac_tdp_table *cac_tdp_table =
- adev->pm.dpm.dyn_state.cac_tdp_table;
- u32 power_limit;
-
- if (ac_power)
- power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
- else
- power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
-
- ci_set_power_limit(adev, power_limit);
-
- if (pi->caps_automatic_dc_transition) {
- if (ac_power)
- amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_RunningOnAC);
- else
- amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Remove_DC_Clamp);
- }
-
- return 0;
-}
-#endif
-
-static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
- PPSMC_Msg msg, u32 parameter)
-{
- WREG32(mmSMC_MSG_ARG_0, parameter);
- return amdgpu_ci_send_msg_to_smc(adev, msg);
-}
-
-static PPSMC_Result amdgpu_ci_send_msg_to_smc_return_parameter(struct amdgpu_device *adev,
- PPSMC_Msg msg, u32 *parameter)
-{
- PPSMC_Result smc_result;
-
- smc_result = amdgpu_ci_send_msg_to_smc(adev, msg);
-
- if ((smc_result == PPSMC_Result_OK) && parameter)
- *parameter = RREG32(mmSMC_MSG_ARG_0);
-
- return smc_result;
-}
-
-static int ci_dpm_force_state_sclk(struct amdgpu_device *adev, u32 n)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
-
- if (!pi->sclk_dpm_key_disabled) {
- PPSMC_Result smc_result =
- amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SCLKDPM_SetEnabledMask, 1 << n);
- if (smc_result != PPSMC_Result_OK)
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int ci_dpm_force_state_mclk(struct amdgpu_device *adev, u32 n)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
-
- if (!pi->mclk_dpm_key_disabled) {
- PPSMC_Result smc_result =
- amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_MCLKDPM_SetEnabledMask, 1 << n);
- if (smc_result != PPSMC_Result_OK)
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int ci_dpm_force_state_pcie(struct amdgpu_device *adev, u32 n)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
-
- if (!pi->pcie_dpm_key_disabled) {
- PPSMC_Result smc_result =
- amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_PCIeDPM_ForceLevel, n);
- if (smc_result != PPSMC_Result_OK)
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int ci_set_power_limit(struct amdgpu_device *adev, u32 n)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
-
- if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
- PPSMC_Result smc_result =
- amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_PkgPwrSetLimit, n);
- if (smc_result != PPSMC_Result_OK)
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev,
- u32 target_tdp)
-{
- PPSMC_Result smc_result =
- amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
- if (smc_result != PPSMC_Result_OK)
- return -EINVAL;
- return 0;
-}
-
-#if 0
-static int ci_set_boot_state(struct amdgpu_device *adev)
-{
- return ci_enable_sclk_mclk_dpm(adev, false);
-}
-#endif
-
-static u32 ci_get_average_sclk_freq(struct amdgpu_device *adev)
-{
- u32 sclk_freq;
- PPSMC_Result smc_result =
- amdgpu_ci_send_msg_to_smc_return_parameter(adev,
- PPSMC_MSG_API_GetSclkFrequency,
- &sclk_freq);
- if (smc_result != PPSMC_Result_OK)
- sclk_freq = 0;
-
- return sclk_freq;
-}
-
-static u32 ci_get_average_mclk_freq(struct amdgpu_device *adev)
-{
- u32 mclk_freq;
- PPSMC_Result smc_result =
- amdgpu_ci_send_msg_to_smc_return_parameter(adev,
- PPSMC_MSG_API_GetMclkFrequency,
- &mclk_freq);
- if (smc_result != PPSMC_Result_OK)
- mclk_freq = 0;
-
- return mclk_freq;
-}
-
-static void ci_dpm_start_smc(struct amdgpu_device *adev)
-{
- int i;
-
- amdgpu_ci_program_jump_on_start(adev);
- amdgpu_ci_start_smc_clock(adev);
- amdgpu_ci_start_smc(adev);
- for (i = 0; i < adev->usec_timeout; i++) {
- if (RREG32_SMC(ixFIRMWARE_FLAGS) & FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
- break;
- }
-}
-
-static void ci_dpm_stop_smc(struct amdgpu_device *adev)
-{
- amdgpu_ci_reset_smc(adev);
- amdgpu_ci_stop_smc_clock(adev);
-}
-
-static int ci_process_firmware_header(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- u32 tmp;
- int ret;
-
- ret = amdgpu_ci_read_smc_sram_dword(adev,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU7_Firmware_Header, DpmTable),
- &tmp, pi->sram_end);
- if (ret)
- return ret;
-
- pi->dpm_table_start = tmp;
-
- ret = amdgpu_ci_read_smc_sram_dword(adev,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU7_Firmware_Header, SoftRegisters),
- &tmp, pi->sram_end);
- if (ret)
- return ret;
-
- pi->soft_regs_start = tmp;
-
- ret = amdgpu_ci_read_smc_sram_dword(adev,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU7_Firmware_Header, mcRegisterTable),
- &tmp, pi->sram_end);
- if (ret)
- return ret;
-
- pi->mc_reg_table_start = tmp;
-
- ret = amdgpu_ci_read_smc_sram_dword(adev,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU7_Firmware_Header, FanTable),
- &tmp, pi->sram_end);
- if (ret)
- return ret;
-
- pi->fan_table_start = tmp;
-
- ret = amdgpu_ci_read_smc_sram_dword(adev,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
- &tmp, pi->sram_end);
- if (ret)
- return ret;
-
- pi->arb_table_start = tmp;
-
- return 0;
-}
-
-static void ci_read_clock_registers(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
-
- pi->clock_registers.cg_spll_func_cntl =
- RREG32_SMC(ixCG_SPLL_FUNC_CNTL);
- pi->clock_registers.cg_spll_func_cntl_2 =
- RREG32_SMC(ixCG_SPLL_FUNC_CNTL_2);
- pi->clock_registers.cg_spll_func_cntl_3 =
- RREG32_SMC(ixCG_SPLL_FUNC_CNTL_3);
- pi->clock_registers.cg_spll_func_cntl_4 =
- RREG32_SMC(ixCG_SPLL_FUNC_CNTL_4);
- pi->clock_registers.cg_spll_spread_spectrum =
- RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM);
- pi->clock_registers.cg_spll_spread_spectrum_2 =
- RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM_2);
- pi->clock_registers.dll_cntl = RREG32(mmDLL_CNTL);
- pi->clock_registers.mclk_pwrmgt_cntl = RREG32(mmMCLK_PWRMGT_CNTL);
- pi->clock_registers.mpll_ad_func_cntl = RREG32(mmMPLL_AD_FUNC_CNTL);
- pi->clock_registers.mpll_dq_func_cntl = RREG32(mmMPLL_DQ_FUNC_CNTL);
- pi->clock_registers.mpll_func_cntl = RREG32(mmMPLL_FUNC_CNTL);
- pi->clock_registers.mpll_func_cntl_1 = RREG32(mmMPLL_FUNC_CNTL_1);
- pi->clock_registers.mpll_func_cntl_2 = RREG32(mmMPLL_FUNC_CNTL_2);
- pi->clock_registers.mpll_ss1 = RREG32(mmMPLL_SS1);
- pi->clock_registers.mpll_ss2 = RREG32(mmMPLL_SS2);
-}
-
-static void ci_init_sclk_t(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
-
- pi->low_sclk_interrupt_t = 0;
-}
-
-static void ci_enable_thermal_protection(struct amdgpu_device *adev,
- bool enable)
-{
- u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
-
- if (enable)
- tmp &= ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
- else
- tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
- WREG32_SMC(ixGENERAL_PWRMGT, tmp);
-}
-
-static void ci_enable_acpi_power_management(struct amdgpu_device *adev)
-{
- u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
-
- tmp |= GENERAL_PWRMGT__STATIC_PM_EN_MASK;
-
- WREG32_SMC(ixGENERAL_PWRMGT, tmp);
-}
-
-#if 0
-static int ci_enter_ulp_state(struct amdgpu_device *adev)
-{
-
- WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
-
- udelay(25000);
-
- return 0;
-}
-
-static int ci_exit_ulp_state(struct amdgpu_device *adev)
-{
- int i;
-
- WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
-
- udelay(7000);
-
- for (i = 0; i < adev->usec_timeout; i++) {
- if (RREG32(mmSMC_RESP_0) == 1)
- break;
- udelay(1000);
- }
-
- return 0;
-}
-#endif
-
-static int ci_notify_smc_display_change(struct amdgpu_device *adev,
- bool has_display)
-{
- PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
-
- return (amdgpu_ci_send_msg_to_smc(adev, msg) == PPSMC_Result_OK) ? 0 : -EINVAL;
-}
-
-static int ci_enable_ds_master_switch(struct amdgpu_device *adev,
- bool enable)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
-
- if (enable) {
- if (pi->caps_sclk_ds) {
- if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK)
- return -EINVAL;
- } else {
- if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
- return -EINVAL;
- }
- } else {
- if (pi->caps_sclk_ds) {
- if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-static void ci_program_display_gap(struct amdgpu_device *adev)
-{
- u32 tmp = RREG32_SMC(ixCG_DISPLAY_GAP_CNTL);
- u32 pre_vbi_time_in_us;
- u32 frame_time_in_us;
- u32 ref_clock = adev->clock.spll.reference_freq;
- u32 refresh_rate = amdgpu_dpm_get_vrefresh(adev);
- u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
-
- tmp &= ~CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK;
- if (adev->pm.dpm.new_active_crtc_count > 0)
- tmp |= (AMDGPU_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT);
- else
- tmp |= (AMDGPU_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT);
- WREG32_SMC(ixCG_DISPLAY_GAP_CNTL, tmp);
-
- if (refresh_rate == 0)
- refresh_rate = 60;
- if (vblank_time == 0xffffffff)
- vblank_time = 500;
- frame_time_in_us = 1000000 / refresh_rate;
- pre_vbi_time_in_us =
- frame_time_in_us - 200 - vblank_time;
- tmp = pre_vbi_time_in_us * (ref_clock / 100);
-
- WREG32_SMC(ixCG_DISPLAY_GAP_CNTL2, tmp);
- ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64);
- ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
-
-
- ci_notify_smc_display_change(adev, (adev->pm.dpm.new_active_crtc_count == 1));
-
-}
-
-static void ci_enable_spread_spectrum(struct amdgpu_device *adev, bool enable)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- u32 tmp;
-
- if (enable) {
- if (pi->caps_sclk_ss_support) {
- tmp = RREG32_SMC(ixGENERAL_PWRMGT);
- tmp |= GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK;
- WREG32_SMC(ixGENERAL_PWRMGT, tmp);
- }
- } else {
- tmp = RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM);
- tmp &= ~CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK;
- WREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM, tmp);
-
- tmp = RREG32_SMC(ixGENERAL_PWRMGT);
- tmp &= ~GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK;
- WREG32_SMC(ixGENERAL_PWRMGT, tmp);
- }
-}
-
-static void ci_program_sstp(struct amdgpu_device *adev)
-{
- WREG32_SMC(ixCG_STATIC_SCREEN_PARAMETER,
- ((CISLANDS_SSTU_DFLT << CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD_UNIT__SHIFT) |
- (CISLANDS_SST_DFLT << CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD__SHIFT)));
-}
-
-static void ci_enable_display_gap(struct amdgpu_device *adev)
-{
- u32 tmp = RREG32_SMC(ixCG_DISPLAY_GAP_CNTL);
-
- tmp &= ~(CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK |
- CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG_MASK);
- tmp |= ((AMDGPU_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT) |
- (AMDGPU_PM_DISPLAY_GAP_VBLANK << CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG__SHIFT));
-
- WREG32_SMC(ixCG_DISPLAY_GAP_CNTL, tmp);
-}
-
-static void ci_program_vc(struct amdgpu_device *adev)
-{
- u32 tmp;
-
- tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
- tmp &= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
- WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
-
- WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, CISLANDS_VRC_DFLT0);
- WREG32_SMC(ixCG_FREQ_TRAN_VOTING_1, CISLANDS_VRC_DFLT1);
- WREG32_SMC(ixCG_FREQ_TRAN_VOTING_2, CISLANDS_VRC_DFLT2);
- WREG32_SMC(ixCG_FREQ_TRAN_VOTING_3, CISLANDS_VRC_DFLT3);
- WREG32_SMC(ixCG_FREQ_TRAN_VOTING_4, CISLANDS_VRC_DFLT4);
- WREG32_SMC(ixCG_FREQ_TRAN_VOTING_5, CISLANDS_VRC_DFLT5);
- WREG32_SMC(ixCG_FREQ_TRAN_VOTING_6, CISLANDS_VRC_DFLT6);
- WREG32_SMC(ixCG_FREQ_TRAN_VOTING_7, CISLANDS_VRC_DFLT7);
-}
-
-static void ci_clear_vc(struct amdgpu_device *adev)
-{
- u32 tmp;
-
- tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
- tmp |= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
- WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
-
- WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0);
- WREG32_SMC(ixCG_FREQ_TRAN_VOTING_1, 0);
- WREG32_SMC(ixCG_FREQ_TRAN_VOTING_2, 0);
- WREG32_SMC(ixCG_FREQ_TRAN_VOTING_3, 0);
- WREG32_SMC(ixCG_FREQ_TRAN_VOTING_4, 0);
- WREG32_SMC(ixCG_FREQ_TRAN_VOTING_5, 0);
- WREG32_SMC(ixCG_FREQ_TRAN_VOTING_6, 0);
- WREG32_SMC(ixCG_FREQ_TRAN_VOTING_7, 0);
-}
-
-static int ci_upload_firmware(struct amdgpu_device *adev)
-{
- int i, ret;
-
- if (amdgpu_ci_is_smc_running(adev)) {
- DRM_INFO("smc is running, no need to load smc firmware\n");
- return 0;
- }
-
- for (i = 0; i < adev->usec_timeout; i++) {
- if (RREG32_SMC(ixRCU_UC_EVENTS) & RCU_UC_EVENTS__boot_seq_done_MASK)
- break;
- }
- WREG32_SMC(ixSMC_SYSCON_MISC_CNTL, 1);
-
- amdgpu_ci_stop_smc_clock(adev);
- amdgpu_ci_reset_smc(adev);
-
- ret = amdgpu_ci_load_smc_ucode(adev, SMC_RAM_END);
-
- return ret;
-
-}
-
-static int ci_get_svi2_voltage_table(struct amdgpu_device *adev,
- struct amdgpu_clock_voltage_dependency_table *voltage_dependency_table,
- struct atom_voltage_table *voltage_table)
-{
- u32 i;
-
- if (voltage_dependency_table == NULL)
- return -EINVAL;
-
- voltage_table->mask_low = 0;
- voltage_table->phase_delay = 0;
-
- voltage_table->count = voltage_dependency_table->count;
- for (i = 0; i < voltage_table->count; i++) {
- voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
- voltage_table->entries[i].smio_low = 0;
- }
-
- return 0;
-}
-
-static int ci_construct_voltage_tables(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- int ret;
-
- if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
- ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC,
- VOLTAGE_OBJ_GPIO_LUT,
- &pi->vddc_voltage_table);
- if (ret)
- return ret;
- } else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
- ret = ci_get_svi2_voltage_table(adev,
- &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
- &pi->vddc_voltage_table);
- if (ret)
- return ret;
- }
-
- if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC)
- ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_VDDC,
- &pi->vddc_voltage_table);
-
- if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
- ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDCI,
- VOLTAGE_OBJ_GPIO_LUT,
- &pi->vddci_voltage_table);
- if (ret)
- return ret;
- } else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
- ret = ci_get_svi2_voltage_table(adev,
- &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
- &pi->vddci_voltage_table);
- if (ret)
- return ret;
- }
-
- if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI)
- ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_VDDCI,
- &pi->vddci_voltage_table);
-
- if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
- ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_MVDDC,
- VOLTAGE_OBJ_GPIO_LUT,
- &pi->mvdd_voltage_table);
- if (ret)
- return ret;
- } else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
- ret = ci_get_svi2_voltage_table(adev,
- &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
- &pi->mvdd_voltage_table);
- if (ret)
- return ret;
- }
-
- if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD)
- ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_MVDD,
- &pi->mvdd_voltage_table);
-
- return 0;
-}
-
-static void ci_populate_smc_voltage_table(struct amdgpu_device *adev,
- struct atom_voltage_table_entry *voltage_table,
- SMU7_Discrete_VoltageLevel *smc_voltage_table)
-{
- int ret;
-
- ret = ci_get_std_voltage_value_sidd(adev, voltage_table,
- &smc_voltage_table->StdVoltageHiSidd,
- &smc_voltage_table->StdVoltageLoSidd);
-
- if (ret) {
- smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE;
- smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE;
- }
-
- smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE);
- smc_voltage_table->StdVoltageHiSidd =
- cpu_to_be16(smc_voltage_table->StdVoltageHiSidd);
- smc_voltage_table->StdVoltageLoSidd =
- cpu_to_be16(smc_voltage_table->StdVoltageLoSidd);
-}
-
-static int ci_populate_smc_vddc_table(struct amdgpu_device *adev,
- SMU7_Discrete_DpmTable *table)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- unsigned int count;
-
- table->VddcLevelCount = pi->vddc_voltage_table.count;
- for (count = 0; count < table->VddcLevelCount; count++) {
- ci_populate_smc_voltage_table(adev,
- &pi->vddc_voltage_table.entries[count],
- &table->VddcLevel[count]);
-
- if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
- table->VddcLevel[count].Smio |=
- pi->vddc_voltage_table.entries[count].smio_low;
- else
- table->VddcLevel[count].Smio = 0;
- }
- table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount);
-
- return 0;
-}
-
-static int ci_populate_smc_vddci_table(struct amdgpu_device *adev,
- SMU7_Discrete_DpmTable *table)
-{
- unsigned int count;
- struct ci_power_info *pi = ci_get_pi(adev);
-
- table->VddciLevelCount = pi->vddci_voltage_table.count;
- for (count = 0; count < table->VddciLevelCount; count++) {
- ci_populate_smc_voltage_table(adev,
- &pi->vddci_voltage_table.entries[count],
- &table->VddciLevel[count]);
-
- if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
- table->VddciLevel[count].Smio |=
- pi->vddci_voltage_table.entries[count].smio_low;
- else
- table->VddciLevel[count].Smio = 0;
- }
- table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount);
-
- return 0;
-}
-
-static int ci_populate_smc_mvdd_table(struct amdgpu_device *adev,
- SMU7_Discrete_DpmTable *table)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- unsigned int count;
-
- table->MvddLevelCount = pi->mvdd_voltage_table.count;
- for (count = 0; count < table->MvddLevelCount; count++) {
- ci_populate_smc_voltage_table(adev,
- &pi->mvdd_voltage_table.entries[count],
- &table->MvddLevel[count]);
-
- if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
- table->MvddLevel[count].Smio |=
- pi->mvdd_voltage_table.entries[count].smio_low;
- else
- table->MvddLevel[count].Smio = 0;
- }
- table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount);
-
- return 0;
-}
-
-static int ci_populate_smc_voltage_tables(struct amdgpu_device *adev,
- SMU7_Discrete_DpmTable *table)
-{
- int ret;
-
- ret = ci_populate_smc_vddc_table(adev, table);
- if (ret)
- return ret;
-
- ret = ci_populate_smc_vddci_table(adev, table);
- if (ret)
- return ret;
-
- ret = ci_populate_smc_mvdd_table(adev, table);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int ci_populate_mvdd_value(struct amdgpu_device *adev, u32 mclk,
- SMU7_Discrete_VoltageLevel *voltage)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- u32 i = 0;
-
- if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
- for (i = 0; i < adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) {
- if (mclk <= adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) {
- voltage->Voltage = pi->mvdd_voltage_table.entries[i].value;
- break;
- }
- }
-
- if (i >= adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count)
- return -EINVAL;
- }
-
- return -EINVAL;
-}
-
-static int ci_get_std_voltage_value_sidd(struct amdgpu_device *adev,
- struct atom_voltage_table_entry *voltage_table,
- u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd)
-{
- u16 v_index, idx;
- bool voltage_found = false;
- *std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE;
- *std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE;
-
- if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
- return -EINVAL;
-
- if (adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
- for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
- if (voltage_table->value ==
- adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
- voltage_found = true;
- if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
- idx = v_index;
- else
- idx = adev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
- *std_voltage_lo_sidd =
- adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
- *std_voltage_hi_sidd =
- adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
- break;
- }
- }
-
- if (!voltage_found) {
- for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
- if (voltage_table->value <=
- adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
- voltage_found = true;
- if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
- idx = v_index;
- else
- idx = adev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
- *std_voltage_lo_sidd =
- adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
- *std_voltage_hi_sidd =
- adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
- break;
- }
- }
- }
- }
-
- return 0;
-}
-
-static void ci_populate_phase_value_based_on_sclk(struct amdgpu_device *adev,
- const struct amdgpu_phase_shedding_limits_table *limits,
- u32 sclk,
- u32 *phase_shedding)
-{
- unsigned int i;
-
- *phase_shedding = 1;
-
- for (i = 0; i < limits->count; i++) {
- if (sclk < limits->entries[i].sclk) {
- *phase_shedding = i;
- break;
- }
- }
-}
-
-static void ci_populate_phase_value_based_on_mclk(struct amdgpu_device *adev,
- const struct amdgpu_phase_shedding_limits_table *limits,
- u32 mclk,
- u32 *phase_shedding)
-{
- unsigned int i;
-
- *phase_shedding = 1;
-
- for (i = 0; i < limits->count; i++) {
- if (mclk < limits->entries[i].mclk) {
- *phase_shedding = i;
- break;
- }
- }
-}
-
-static int ci_init_arb_table_index(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- u32 tmp;
- int ret;
-
- ret = amdgpu_ci_read_smc_sram_dword(adev, pi->arb_table_start,
- &tmp, pi->sram_end);
- if (ret)
- return ret;
-
- tmp &= 0x00FFFFFF;
- tmp |= MC_CG_ARB_FREQ_F1 << 24;
-
- return amdgpu_ci_write_smc_sram_dword(adev, pi->arb_table_start,
- tmp, pi->sram_end);
-}
-
-static int ci_get_dependency_volt_by_clk(struct amdgpu_device *adev,
- struct amdgpu_clock_voltage_dependency_table *allowed_clock_voltage_table,
- u32 clock, u32 *voltage)
-{
- u32 i = 0;
-
- if (allowed_clock_voltage_table->count == 0)
- return -EINVAL;
-
- for (i = 0; i < allowed_clock_voltage_table->count; i++) {
- if (allowed_clock_voltage_table->entries[i].clk >= clock) {
- *voltage = allowed_clock_voltage_table->entries[i].v;
- return 0;
- }
- }
-
- *voltage = allowed_clock_voltage_table->entries[i-1].v;
-
- return 0;
-}
-
-static u8 ci_get_sleep_divider_id_from_clock(u32 sclk, u32 min_sclk_in_sr)
-{
- u32 i;
- u32 tmp;
- u32 min = max(min_sclk_in_sr, (u32)CISLAND_MINIMUM_ENGINE_CLOCK);
-
- if (sclk < min)
- return 0;
-
- for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
- tmp = sclk >> i;
- if (tmp >= min || i == 0)
- break;
- }
-
- return (u8)i;
-}
-
-static int ci_initial_switch_from_arb_f0_to_f1(struct amdgpu_device *adev)
-{
- return ci_copy_and_switch_arb_sets(adev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
-}
-
-static int ci_reset_to_default(struct amdgpu_device *adev)
-{
- return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
- 0 : -EINVAL;
-}
-
-static int ci_force_switch_to_arb_f0(struct amdgpu_device *adev)
-{
- u32 tmp;
-
- tmp = (RREG32_SMC(ixSMC_SCRATCH9) & 0x0000ff00) >> 8;
-
- if (tmp == MC_CG_ARB_FREQ_F0)
- return 0;
-
- return ci_copy_and_switch_arb_sets(adev, tmp, MC_CG_ARB_FREQ_F0);
-}
-
-static void ci_register_patching_mc_arb(struct amdgpu_device *adev,
- const u32 engine_clock,
- const u32 memory_clock,
- u32 *dram_timimg2)
-{
- bool patch;
- u32 tmp, tmp2;
-
- tmp = RREG32(mmMC_SEQ_MISC0);
- patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
-
- if (patch &&
- ((adev->pdev->device == 0x67B0) ||
- (adev->pdev->device == 0x67B1))) {
- if ((memory_clock > 100000) && (memory_clock <= 125000)) {
- tmp2 = (((0x31 * engine_clock) / 125000) - 1) & 0xff;
- *dram_timimg2 &= ~0x00ff0000;
- *dram_timimg2 |= tmp2 << 16;
- } else if ((memory_clock > 125000) && (memory_clock <= 137500)) {
- tmp2 = (((0x36 * engine_clock) / 137500) - 1) & 0xff;
- *dram_timimg2 &= ~0x00ff0000;
- *dram_timimg2 |= tmp2 << 16;
- }
- }
-}
-
-static int ci_populate_memory_timing_parameters(struct amdgpu_device *adev,
- u32 sclk,
- u32 mclk,
- SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs)
-{
- u32 dram_timing;
- u32 dram_timing2;
- u32 burst_time;
-
- amdgpu_atombios_set_engine_dram_timings(adev, sclk, mclk);
-
- dram_timing = RREG32(mmMC_ARB_DRAM_TIMING);
- dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2);
- burst_time = RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE0_MASK;
-
- ci_register_patching_mc_arb(adev, sclk, mclk, &dram_timing2);
-
- arb_regs->McArbDramTiming = cpu_to_be32(dram_timing);
- arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
- arb_regs->McArbBurstTime = (u8)burst_time;
-
- return 0;
-}
-
-static int ci_do_program_memory_timing_parameters(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- SMU7_Discrete_MCArbDramTimingTable arb_regs;
- u32 i, j;
- int ret = 0;
-
- memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable));
-
- for (i = 0; i < pi->dpm_table.sclk_table.count; i++) {
- for (j = 0; j < pi->dpm_table.mclk_table.count; j++) {
- ret = ci_populate_memory_timing_parameters(adev,
- pi->dpm_table.sclk_table.dpm_levels[i].value,
- pi->dpm_table.mclk_table.dpm_levels[j].value,
- &arb_regs.entries[i][j]);
- if (ret)
- break;
- }
- }
-
- if (ret == 0)
- ret = amdgpu_ci_copy_bytes_to_smc(adev,
- pi->arb_table_start,
- (u8 *)&arb_regs,
- sizeof(SMU7_Discrete_MCArbDramTimingTable),
- pi->sram_end);
-
- return ret;
-}
-
-static int ci_program_memory_timing_parameters(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
-
- if (pi->need_update_smu7_dpm_table == 0)
- return 0;
-
- return ci_do_program_memory_timing_parameters(adev);
-}
-
-static void ci_populate_smc_initial_state(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_boot_state)
-{
- struct ci_ps *boot_state = ci_get_ps(amdgpu_boot_state);
- struct ci_power_info *pi = ci_get_pi(adev);
- u32 level = 0;
-
- for (level = 0; level < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) {
- if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >=
- boot_state->performance_levels[0].sclk) {
- pi->smc_state_table.GraphicsBootLevel = level;
- break;
- }
- }
-
- for (level = 0; level < adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) {
- if (adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >=
- boot_state->performance_levels[0].mclk) {
- pi->smc_state_table.MemoryBootLevel = level;
- break;
- }
- }
-}
-
-static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table)
-{
- u32 i;
- u32 mask_value = 0;
-
- for (i = dpm_table->count; i > 0; i--) {
- mask_value = mask_value << 1;
- if (dpm_table->dpm_levels[i-1].enabled)
- mask_value |= 0x1;
- else
- mask_value &= 0xFFFFFFFE;
- }
-
- return mask_value;
-}
-
-static void ci_populate_smc_link_level(struct amdgpu_device *adev,
- SMU7_Discrete_DpmTable *table)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct ci_dpm_table *dpm_table = &pi->dpm_table;
- u32 i;
-
- for (i = 0; i < dpm_table->pcie_speed_table.count; i++) {
- table->LinkLevel[i].PcieGenSpeed =
- (u8)dpm_table->pcie_speed_table.dpm_levels[i].value;
- table->LinkLevel[i].PcieLaneCount =
- amdgpu_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
- table->LinkLevel[i].EnabledForActivity = 1;
- table->LinkLevel[i].DownT = cpu_to_be32(5);
- table->LinkLevel[i].UpT = cpu_to_be32(30);
- }
-
- pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count;
- pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
- ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
-}
-
-static int ci_populate_smc_uvd_level(struct amdgpu_device *adev,
- SMU7_Discrete_DpmTable *table)
-{
- u32 count;
- struct atom_clock_dividers dividers;
- int ret = -EINVAL;
-
- table->UvdLevelCount =
- adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count;
-
- for (count = 0; count < table->UvdLevelCount; count++) {
- table->UvdLevel[count].VclkFrequency =
- adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk;
- table->UvdLevel[count].DclkFrequency =
- adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk;
- table->UvdLevel[count].MinVddc =
- adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
- table->UvdLevel[count].MinVddcPhases = 1;
-
- ret = amdgpu_atombios_get_clock_dividers(adev,
- COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
- table->UvdLevel[count].VclkFrequency, false, &dividers);
- if (ret)
- return ret;
-
- table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider;
-
- ret = amdgpu_atombios_get_clock_dividers(adev,
- COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
- table->UvdLevel[count].DclkFrequency, false, &dividers);
- if (ret)
- return ret;
-
- table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider;
-
- table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency);
- table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency);
- table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc);
- }
-
- return ret;
-}
-
-static int ci_populate_smc_vce_level(struct amdgpu_device *adev,
- SMU7_Discrete_DpmTable *table)
-{
- u32 count;
- struct atom_clock_dividers dividers;
- int ret = -EINVAL;
-
- table->VceLevelCount =
- adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count;
-
- for (count = 0; count < table->VceLevelCount; count++) {
- table->VceLevel[count].Frequency =
- adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk;
- table->VceLevel[count].MinVoltage =
- (u16)adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
- table->VceLevel[count].MinPhases = 1;
-
- ret = amdgpu_atombios_get_clock_dividers(adev,
- COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
- table->VceLevel[count].Frequency, false, &dividers);
- if (ret)
- return ret;
-
- table->VceLevel[count].Divider = (u8)dividers.post_divider;
-
- table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency);
- table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage);
- }
-
- return ret;
-
-}
-
-static int ci_populate_smc_acp_level(struct amdgpu_device *adev,
- SMU7_Discrete_DpmTable *table)
-{
- u32 count;
- struct atom_clock_dividers dividers;
- int ret = -EINVAL;
-
- table->AcpLevelCount = (u8)
- (adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count);
-
- for (count = 0; count < table->AcpLevelCount; count++) {
- table->AcpLevel[count].Frequency =
- adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk;
- table->AcpLevel[count].MinVoltage =
- adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v;
- table->AcpLevel[count].MinPhases = 1;
-
- ret = amdgpu_atombios_get_clock_dividers(adev,
- COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
- table->AcpLevel[count].Frequency, false, &dividers);
- if (ret)
- return ret;
-
- table->AcpLevel[count].Divider = (u8)dividers.post_divider;
-
- table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency);
- table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage);
- }
-
- return ret;
-}
-
-static int ci_populate_smc_samu_level(struct amdgpu_device *adev,
- SMU7_Discrete_DpmTable *table)
-{
- u32 count;
- struct atom_clock_dividers dividers;
- int ret = -EINVAL;
-
- table->SamuLevelCount =
- adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count;
-
- for (count = 0; count < table->SamuLevelCount; count++) {
- table->SamuLevel[count].Frequency =
- adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk;
- table->SamuLevel[count].MinVoltage =
- adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
- table->SamuLevel[count].MinPhases = 1;
-
- ret = amdgpu_atombios_get_clock_dividers(adev,
- COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
- table->SamuLevel[count].Frequency, false, &dividers);
- if (ret)
- return ret;
-
- table->SamuLevel[count].Divider = (u8)dividers.post_divider;
-
- table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency);
- table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage);
- }
-
- return ret;
-}
-
-static int ci_calculate_mclk_params(struct amdgpu_device *adev,
- u32 memory_clock,
- SMU7_Discrete_MemoryLevel *mclk,
- bool strobe_mode,
- bool dll_state_on)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- u32 dll_cntl = pi->clock_registers.dll_cntl;
- u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
- u32 mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl;
- u32 mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl;
- u32 mpll_func_cntl = pi->clock_registers.mpll_func_cntl;
- u32 mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1;
- u32 mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2;
- u32 mpll_ss1 = pi->clock_registers.mpll_ss1;
- u32 mpll_ss2 = pi->clock_registers.mpll_ss2;
- struct atom_mpll_param mpll_param;
- int ret;
-
- ret = amdgpu_atombios_get_memory_pll_dividers(adev, memory_clock, strobe_mode, &mpll_param);
- if (ret)
- return ret;
-
- mpll_func_cntl &= ~MPLL_FUNC_CNTL__BWCTRL_MASK;
- mpll_func_cntl |= (mpll_param.bwcntl << MPLL_FUNC_CNTL__BWCTRL__SHIFT);
-
- mpll_func_cntl_1 &= ~(MPLL_FUNC_CNTL_1__CLKF_MASK | MPLL_FUNC_CNTL_1__CLKFRAC_MASK |
- MPLL_FUNC_CNTL_1__VCO_MODE_MASK);
- mpll_func_cntl_1 |= (mpll_param.clkf) << MPLL_FUNC_CNTL_1__CLKF__SHIFT |
- (mpll_param.clkfrac << MPLL_FUNC_CNTL_1__CLKFRAC__SHIFT) |
- (mpll_param.vco_mode << MPLL_FUNC_CNTL_1__VCO_MODE__SHIFT);
-
- mpll_ad_func_cntl &= ~MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK;
- mpll_ad_func_cntl |= (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT);
-
- if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
- mpll_dq_func_cntl &= ~(MPLL_DQ_FUNC_CNTL__YCLK_SEL_MASK |
- MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK);
- mpll_dq_func_cntl |= (mpll_param.yclk_sel << MPLL_DQ_FUNC_CNTL__YCLK_SEL__SHIFT) |
- (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT);
- }
-
- if (pi->caps_mclk_ss_support) {
- struct amdgpu_atom_ss ss;
- u32 freq_nom;
- u32 tmp;
- u32 reference_clock = adev->clock.mpll.reference_freq;
-
- if (mpll_param.qdr == 1)
- freq_nom = memory_clock * 4 * (1 << mpll_param.post_div);
- else
- freq_nom = memory_clock * 2 * (1 << mpll_param.post_div);
-
- tmp = (freq_nom / reference_clock);
- tmp = tmp * tmp;
- if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
- ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
- u32 clks = reference_clock * 5 / ss.rate;
- u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
-
- mpll_ss1 &= ~MPLL_SS1__CLKV_MASK;
- mpll_ss1 |= (clkv << MPLL_SS1__CLKV__SHIFT);
-
- mpll_ss2 &= ~MPLL_SS2__CLKS_MASK;
- mpll_ss2 |= (clks << MPLL_SS2__CLKS__SHIFT);
- }
- }
-
- mclk_pwrmgt_cntl &= ~MCLK_PWRMGT_CNTL__DLL_SPEED_MASK;
- mclk_pwrmgt_cntl |= (mpll_param.dll_speed << MCLK_PWRMGT_CNTL__DLL_SPEED__SHIFT);
-
- if (dll_state_on)
- mclk_pwrmgt_cntl |= MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
- MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK;
- else
- mclk_pwrmgt_cntl &= ~(MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
- MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK);
-
- mclk->MclkFrequency = memory_clock;
- mclk->MpllFuncCntl = mpll_func_cntl;
- mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
- mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
- mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
- mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
- mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
- mclk->DllCntl = dll_cntl;
- mclk->MpllSs1 = mpll_ss1;
- mclk->MpllSs2 = mpll_ss2;
-
- return 0;
-}
-
-static int ci_populate_single_memory_level(struct amdgpu_device *adev,
- u32 memory_clock,
- SMU7_Discrete_MemoryLevel *memory_level)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- int ret;
- bool dll_state_on;
-
- if (adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) {
- ret = ci_get_dependency_volt_by_clk(adev,
- &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
- memory_clock, &memory_level->MinVddc);
- if (ret)
- return ret;
- }
-
- if (adev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) {
- ret = ci_get_dependency_volt_by_clk(adev,
- &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
- memory_clock, &memory_level->MinVddci);
- if (ret)
- return ret;
- }
-
- if (adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) {
- ret = ci_get_dependency_volt_by_clk(adev,
- &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
- memory_clock, &memory_level->MinMvdd);
- if (ret)
- return ret;
- }
-
- memory_level->MinVddcPhases = 1;
-
- if (pi->vddc_phase_shed_control)
- ci_populate_phase_value_based_on_mclk(adev,
- &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
- memory_clock,
- &memory_level->MinVddcPhases);
-
- memory_level->EnabledForActivity = 1;
- memory_level->EnabledForThrottle = 1;
- memory_level->UpH = 0;
- memory_level->DownH = 100;
- memory_level->VoltageDownH = 0;
- memory_level->ActivityLevel = (u16)pi->mclk_activity_target;
-
- memory_level->StutterEnable = false;
- memory_level->StrobeEnable = false;
- memory_level->EdcReadEnable = false;
- memory_level->EdcWriteEnable = false;
- memory_level->RttEnable = false;
-
- memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
- if (pi->mclk_stutter_mode_threshold &&
- (memory_clock <= pi->mclk_stutter_mode_threshold) &&
- (!pi->uvd_enabled) &&
- (RREG32(mmDPG_PIPE_STUTTER_CONTROL) & DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK) &&
- (adev->pm.dpm.new_active_crtc_count <= 2))
- memory_level->StutterEnable = true;
-
- if (pi->mclk_strobe_mode_threshold &&
- (memory_clock <= pi->mclk_strobe_mode_threshold))
- memory_level->StrobeEnable = 1;
-
- if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
- memory_level->StrobeRatio =
- ci_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable);
- if (pi->mclk_edc_enable_threshold &&
- (memory_clock > pi->mclk_edc_enable_threshold))
- memory_level->EdcReadEnable = true;
-
- if (pi->mclk_edc_wr_enable_threshold &&
- (memory_clock > pi->mclk_edc_wr_enable_threshold))
- memory_level->EdcWriteEnable = true;
-
- if (memory_level->StrobeEnable) {
- if (ci_get_mclk_frequency_ratio(memory_clock, true) >=
- ((RREG32(mmMC_SEQ_MISC7) >> 16) & 0xf))
- dll_state_on = ((RREG32(mmMC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
- else
- dll_state_on = ((RREG32(mmMC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
- } else {
- dll_state_on = pi->dll_default_on;
- }
- } else {
- memory_level->StrobeRatio = ci_get_ddr3_mclk_frequency_ratio(memory_clock);
- dll_state_on = ((RREG32(mmMC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
- }
-
- ret = ci_calculate_mclk_params(adev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
- if (ret)
- return ret;
-
- memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE);
- memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases);
- memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
- memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
-
- memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency);
- memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel);
- memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl);
- memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1);
- memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2);
- memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl);
- memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl);
- memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl);
- memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl);
- memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1);
- memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2);
-
- return 0;
-}
-
-static int ci_populate_smc_acpi_level(struct amdgpu_device *adev,
- SMU7_Discrete_DpmTable *table)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct atom_clock_dividers dividers;
- SMU7_Discrete_VoltageLevel voltage_level;
- u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl;
- u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2;
- u32 dll_cntl = pi->clock_registers.dll_cntl;
- u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
- int ret;
-
- table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
-
- if (pi->acpi_vddc)
- table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE);
- else
- table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE);
-
- table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1;
-
- table->ACPILevel.SclkFrequency = adev->clock.spll.reference_freq;
-
- ret = amdgpu_atombios_get_clock_dividers(adev,
- COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
- table->ACPILevel.SclkFrequency, false, &dividers);
- if (ret)
- return ret;
-
- table->ACPILevel.SclkDid = (u8)dividers.post_divider;
- table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
- table->ACPILevel.DeepSleepDivId = 0;
-
- spll_func_cntl &= ~CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK;
- spll_func_cntl |= CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK;
-
- spll_func_cntl_2 &= ~CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK;
- spll_func_cntl_2 |= (4 << CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT);
-
- table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
- table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
- table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3;
- table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4;
- table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum;
- table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2;
- table->ACPILevel.CcPwrDynRm = 0;
- table->ACPILevel.CcPwrDynRm1 = 0;
-
- table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags);
- table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases);
- table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency);
- table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl);
- table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2);
- table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3);
- table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4);
- table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum);
- table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2);
- table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm);
- table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1);
-
- table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
- table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
-
- if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
- if (pi->acpi_vddci)
- table->MemoryACPILevel.MinVddci =
- cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE);
- else
- table->MemoryACPILevel.MinVddci =
- cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE);
- }
-
- if (ci_populate_mvdd_value(adev, 0, &voltage_level))
- table->MemoryACPILevel.MinMvdd = 0;
- else
- table->MemoryACPILevel.MinMvdd =
- cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE);
-
- mclk_pwrmgt_cntl |= MCLK_PWRMGT_CNTL__MRDCK0_RESET_MASK |
- MCLK_PWRMGT_CNTL__MRDCK1_RESET_MASK;
- mclk_pwrmgt_cntl &= ~(MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
- MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK);
-
- dll_cntl &= ~(DLL_CNTL__MRDCK0_BYPASS_MASK | DLL_CNTL__MRDCK1_BYPASS_MASK);
-
- table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl);
- table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl);
- table->MemoryACPILevel.MpllAdFuncCntl =
- cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl);
- table->MemoryACPILevel.MpllDqFuncCntl =
- cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl);
- table->MemoryACPILevel.MpllFuncCntl =
- cpu_to_be32(pi->clock_registers.mpll_func_cntl);
- table->MemoryACPILevel.MpllFuncCntl_1 =
- cpu_to_be32(pi->clock_registers.mpll_func_cntl_1);
- table->MemoryACPILevel.MpllFuncCntl_2 =
- cpu_to_be32(pi->clock_registers.mpll_func_cntl_2);
- table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1);
- table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2);
-
- table->MemoryACPILevel.EnabledForThrottle = 0;
- table->MemoryACPILevel.EnabledForActivity = 0;
- table->MemoryACPILevel.UpH = 0;
- table->MemoryACPILevel.DownH = 100;
- table->MemoryACPILevel.VoltageDownH = 0;
- table->MemoryACPILevel.ActivityLevel =
- cpu_to_be16((u16)pi->mclk_activity_target);
-
- table->MemoryACPILevel.StutterEnable = false;
- table->MemoryACPILevel.StrobeEnable = false;
- table->MemoryACPILevel.EdcReadEnable = false;
- table->MemoryACPILevel.EdcWriteEnable = false;
- table->MemoryACPILevel.RttEnable = false;
-
- return 0;
-}
-
-
-static int ci_enable_ulv(struct amdgpu_device *adev, bool enable)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct ci_ulv_parm *ulv = &pi->ulv;
-
- if (ulv->supported) {
- if (enable)
- return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
- 0 : -EINVAL;
- else
- return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
- 0 : -EINVAL;
- }
-
- return 0;
-}
-
-static int ci_populate_ulv_level(struct amdgpu_device *adev,
- SMU7_Discrete_Ulv *state)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- u16 ulv_voltage = adev->pm.dpm.backbias_response_time;
-
- state->CcPwrDynRm = 0;
- state->CcPwrDynRm1 = 0;
-
- if (ulv_voltage == 0) {
- pi->ulv.supported = false;
- return 0;
- }
-
- if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
- if (ulv_voltage > adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
- state->VddcOffset = 0;
- else
- state->VddcOffset =
- adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage;
- } else {
- if (ulv_voltage > adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
- state->VddcOffsetVid = 0;
- else
- state->VddcOffsetVid = (u8)
- ((adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) *
- VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
- }
- state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1;
-
- state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm);
- state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1);
- state->VddcOffset = cpu_to_be16(state->VddcOffset);
-
- return 0;
-}
-
-static int ci_calculate_sclk_params(struct amdgpu_device *adev,
- u32 engine_clock,
- SMU7_Discrete_GraphicsLevel *sclk)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct atom_clock_dividers dividers;
- u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3;
- u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4;
- u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum;
- u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2;
- u32 reference_clock = adev->clock.spll.reference_freq;
- u32 reference_divider;
- u32 fbdiv;
- int ret;
-
- ret = amdgpu_atombios_get_clock_dividers(adev,
- COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
- engine_clock, false, &dividers);
- if (ret)
- return ret;
-
- reference_divider = 1 + dividers.ref_div;
- fbdiv = dividers.fb_div & 0x3FFFFFF;
-
- spll_func_cntl_3 &= ~CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK;
- spll_func_cntl_3 |= (fbdiv << CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT);
- spll_func_cntl_3 |= CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN_MASK;
-
- if (pi->caps_sclk_ss_support) {
- struct amdgpu_atom_ss ss;
- u32 vco_freq = engine_clock * dividers.post_div;
-
- if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
- ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
- u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
- u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
-
- cg_spll_spread_spectrum &= ~(CG_SPLL_SPREAD_SPECTRUM__CLKS_MASK | CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK);
- cg_spll_spread_spectrum |= (clk_s << CG_SPLL_SPREAD_SPECTRUM__CLKS__SHIFT);
- cg_spll_spread_spectrum |= (1 << CG_SPLL_SPREAD_SPECTRUM__SSEN__SHIFT);
-
- cg_spll_spread_spectrum_2 &= ~CG_SPLL_SPREAD_SPECTRUM_2__CLKV_MASK;
- cg_spll_spread_spectrum_2 |= (clk_v << CG_SPLL_SPREAD_SPECTRUM_2__CLKV__SHIFT);
- }
- }
-
- sclk->SclkFrequency = engine_clock;
- sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
- sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
- sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
- sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
- sclk->SclkDid = (u8)dividers.post_divider;
-
- return 0;
-}
-
-static int ci_populate_single_graphic_level(struct amdgpu_device *adev,
- u32 engine_clock,
- u16 sclk_activity_level_t,
- SMU7_Discrete_GraphicsLevel *graphic_level)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- int ret;
-
- ret = ci_calculate_sclk_params(adev, engine_clock, graphic_level);
- if (ret)
- return ret;
-
- ret = ci_get_dependency_volt_by_clk(adev,
- &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
- engine_clock, &graphic_level->MinVddc);
- if (ret)
- return ret;
-
- graphic_level->SclkFrequency = engine_clock;
-
- graphic_level->Flags = 0;
- graphic_level->MinVddcPhases = 1;
-
- if (pi->vddc_phase_shed_control)
- ci_populate_phase_value_based_on_sclk(adev,
- &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
- engine_clock,
- &graphic_level->MinVddcPhases);
-
- graphic_level->ActivityLevel = sclk_activity_level_t;
-
- graphic_level->CcPwrDynRm = 0;
- graphic_level->CcPwrDynRm1 = 0;
- graphic_level->EnabledForThrottle = 1;
- graphic_level->UpH = 0;
- graphic_level->DownH = 0;
- graphic_level->VoltageDownH = 0;
- graphic_level->PowerThrottle = 0;
-
- if (pi->caps_sclk_ds)
- graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(engine_clock,
- CISLAND_MINIMUM_ENGINE_CLOCK);
-
- graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
- graphic_level->Flags = cpu_to_be32(graphic_level->Flags);
- graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
- graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases);
- graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency);
- graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel);
- graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3);
- graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4);
- graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum);
- graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2);
- graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm);
- graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1);
-
- return 0;
-}
-
-static int ci_populate_all_graphic_levels(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct ci_dpm_table *dpm_table = &pi->dpm_table;
- u32 level_array_address = pi->dpm_table_start +
- offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
- u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) *
- SMU7_MAX_LEVELS_GRAPHICS;
- SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel;
- u32 i, ret;
-
- memset(levels, 0, level_array_size);
-
- for (i = 0; i < dpm_table->sclk_table.count; i++) {
- ret = ci_populate_single_graphic_level(adev,
- dpm_table->sclk_table.dpm_levels[i].value,
- (u16)pi->activity_target[i],
- &pi->smc_state_table.GraphicsLevel[i]);
- if (ret)
- return ret;
- if (i > 1)
- pi->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
- if (i == (dpm_table->sclk_table.count - 1))
- pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
- PPSMC_DISPLAY_WATERMARK_HIGH;
- }
- pi->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
-
- pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
- pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
- ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
-
- ret = amdgpu_ci_copy_bytes_to_smc(adev, level_array_address,
- (u8 *)levels, level_array_size,
- pi->sram_end);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int ci_populate_ulv_state(struct amdgpu_device *adev,
- SMU7_Discrete_Ulv *ulv_level)
-{
- return ci_populate_ulv_level(adev, ulv_level);
-}
-
-static int ci_populate_all_memory_levels(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct ci_dpm_table *dpm_table = &pi->dpm_table;
- u32 level_array_address = pi->dpm_table_start +
- offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
- u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) *
- SMU7_MAX_LEVELS_MEMORY;
- SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel;
- u32 i, ret;
-
- memset(levels, 0, level_array_size);
-
- for (i = 0; i < dpm_table->mclk_table.count; i++) {
- if (dpm_table->mclk_table.dpm_levels[i].value == 0)
- return -EINVAL;
- ret = ci_populate_single_memory_level(adev,
- dpm_table->mclk_table.dpm_levels[i].value,
- &pi->smc_state_table.MemoryLevel[i]);
- if (ret)
- return ret;
- }
-
- if ((dpm_table->mclk_table.count >= 2) &&
- ((adev->pdev->device == 0x67B0) || (adev->pdev->device == 0x67B1))) {
- pi->smc_state_table.MemoryLevel[1].MinVddc =
- pi->smc_state_table.MemoryLevel[0].MinVddc;
- pi->smc_state_table.MemoryLevel[1].MinVddcPhases =
- pi->smc_state_table.MemoryLevel[0].MinVddcPhases;
- }
-
- pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
-
- pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
- pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
- ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
-
- pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark =
- PPSMC_DISPLAY_WATERMARK_HIGH;
-
- ret = amdgpu_ci_copy_bytes_to_smc(adev, level_array_address,
- (u8 *)levels, level_array_size,
- pi->sram_end);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static void ci_reset_single_dpm_table(struct amdgpu_device *adev,
- struct ci_single_dpm_table* dpm_table,
- u32 count)
-{
- u32 i;
-
- dpm_table->count = count;
- for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
- dpm_table->dpm_levels[i].enabled = false;
-}
-
-static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table,
- u32 index, u32 pcie_gen, u32 pcie_lanes)
-{
- dpm_table->dpm_levels[index].value = pcie_gen;
- dpm_table->dpm_levels[index].param1 = pcie_lanes;
- dpm_table->dpm_levels[index].enabled = true;
-}
-
-static int ci_setup_default_pcie_tables(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
-
- if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels)
- return -EINVAL;
-
- if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) {
- pi->pcie_gen_powersaving = pi->pcie_gen_performance;
- pi->pcie_lane_powersaving = pi->pcie_lane_performance;
- } else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) {
- pi->pcie_gen_performance = pi->pcie_gen_powersaving;
- pi->pcie_lane_performance = pi->pcie_lane_powersaving;
- }
-
- ci_reset_single_dpm_table(adev,
- &pi->dpm_table.pcie_speed_table,
- SMU7_MAX_LEVELS_LINK);
-
- if (adev->asic_type == CHIP_BONAIRE)
- ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
- pi->pcie_gen_powersaving.min,
- pi->pcie_lane_powersaving.max);
- else
- ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
- pi->pcie_gen_powersaving.min,
- pi->pcie_lane_powersaving.min);
- ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
- pi->pcie_gen_performance.min,
- pi->pcie_lane_performance.min);
- ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2,
- pi->pcie_gen_powersaving.min,
- pi->pcie_lane_powersaving.max);
- ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3,
- pi->pcie_gen_performance.min,
- pi->pcie_lane_performance.max);
- ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4,
- pi->pcie_gen_powersaving.max,
- pi->pcie_lane_powersaving.max);
- ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5,
- pi->pcie_gen_performance.max,
- pi->pcie_lane_performance.max);
-
- pi->dpm_table.pcie_speed_table.count = 6;
-
- return 0;
-}
-
-static int ci_setup_default_dpm_tables(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct amdgpu_clock_voltage_dependency_table *allowed_sclk_vddc_table =
- &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
- struct amdgpu_clock_voltage_dependency_table *allowed_mclk_table =
- &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
- struct amdgpu_cac_leakage_table *std_voltage_table =
- &adev->pm.dpm.dyn_state.cac_leakage_table;
- u32 i;
-
- if (allowed_sclk_vddc_table == NULL)
- return -EINVAL;
- if (allowed_sclk_vddc_table->count < 1)
- return -EINVAL;
- if (allowed_mclk_table == NULL)
- return -EINVAL;
- if (allowed_mclk_table->count < 1)
- return -EINVAL;
-
- memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table));
-
- ci_reset_single_dpm_table(adev,
- &pi->dpm_table.sclk_table,
- SMU7_MAX_LEVELS_GRAPHICS);
- ci_reset_single_dpm_table(adev,
- &pi->dpm_table.mclk_table,
- SMU7_MAX_LEVELS_MEMORY);
- ci_reset_single_dpm_table(adev,
- &pi->dpm_table.vddc_table,
- SMU7_MAX_LEVELS_VDDC);
- ci_reset_single_dpm_table(adev,
- &pi->dpm_table.vddci_table,
- SMU7_MAX_LEVELS_VDDCI);
- ci_reset_single_dpm_table(adev,
- &pi->dpm_table.mvdd_table,
- SMU7_MAX_LEVELS_MVDD);
-
- pi->dpm_table.sclk_table.count = 0;
- for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
- if ((i == 0) ||
- (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value !=
- allowed_sclk_vddc_table->entries[i].clk)) {
- pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
- allowed_sclk_vddc_table->entries[i].clk;
- pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled =
- (i == 0) ? true : false;
- pi->dpm_table.sclk_table.count++;
- }
- }
-
- pi->dpm_table.mclk_table.count = 0;
- for (i = 0; i < allowed_mclk_table->count; i++) {
- if ((i == 0) ||
- (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
- allowed_mclk_table->entries[i].clk)) {
- pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
- allowed_mclk_table->entries[i].clk;
- pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled =
- (i == 0) ? true : false;
- pi->dpm_table.mclk_table.count++;
- }
- }
-
- for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
- pi->dpm_table.vddc_table.dpm_levels[i].value =
- allowed_sclk_vddc_table->entries[i].v;
- pi->dpm_table.vddc_table.dpm_levels[i].param1 =
- std_voltage_table->entries[i].leakage;
- pi->dpm_table.vddc_table.dpm_levels[i].enabled = true;
- }
- pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count;
-
- allowed_mclk_table = &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
- if (allowed_mclk_table) {
- for (i = 0; i < allowed_mclk_table->count; i++) {
- pi->dpm_table.vddci_table.dpm_levels[i].value =
- allowed_mclk_table->entries[i].v;
- pi->dpm_table.vddci_table.dpm_levels[i].enabled = true;
- }
- pi->dpm_table.vddci_table.count = allowed_mclk_table->count;
- }
-
- allowed_mclk_table = &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk;
- if (allowed_mclk_table) {
- for (i = 0; i < allowed_mclk_table->count; i++) {
- pi->dpm_table.mvdd_table.dpm_levels[i].value =
- allowed_mclk_table->entries[i].v;
- pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
- }
- pi->dpm_table.mvdd_table.count = allowed_mclk_table->count;
- }
-
- ci_setup_default_pcie_tables(adev);
-
- /* save a copy of the default DPM table */
- memcpy(&(pi->golden_dpm_table), &(pi->dpm_table),
- sizeof(struct ci_dpm_table));
-
- return 0;
-}
-
-static int ci_find_boot_level(struct ci_single_dpm_table *table,
- u32 value, u32 *boot_level)
-{
- u32 i;
- int ret = -EINVAL;
-
- for(i = 0; i < table->count; i++) {
- if (value == table->dpm_levels[i].value) {
- *boot_level = i;
- ret = 0;
- }
- }
-
- return ret;
-}
-
-static int ci_init_smc_table(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct ci_ulv_parm *ulv = &pi->ulv;
- struct amdgpu_ps *amdgpu_boot_state = adev->pm.dpm.boot_ps;
- SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
- int ret;
-
- ret = ci_setup_default_dpm_tables(adev);
- if (ret)
- return ret;
-
- if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE)
- ci_populate_smc_voltage_tables(adev, table);
-
- ci_init_fps_limits(adev);
-
- if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
- table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
-
- if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
- table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
-
- if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
- table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
-
- if (ulv->supported) {
- ret = ci_populate_ulv_state(adev, &pi->smc_state_table.Ulv);
- if (ret)
- return ret;
- WREG32_SMC(ixCG_ULV_PARAMETER, ulv->cg_ulv_parameter);
- }
-
- ret = ci_populate_all_graphic_levels(adev);
- if (ret)
- return ret;
-
- ret = ci_populate_all_memory_levels(adev);
- if (ret)
- return ret;
-
- ci_populate_smc_link_level(adev, table);
-
- ret = ci_populate_smc_acpi_level(adev, table);
- if (ret)
- return ret;
-
- ret = ci_populate_smc_vce_level(adev, table);
- if (ret)
- return ret;
-
- ret = ci_populate_smc_acp_level(adev, table);
- if (ret)
- return ret;
-
- ret = ci_populate_smc_samu_level(adev, table);
- if (ret)
- return ret;
-
- ret = ci_do_program_memory_timing_parameters(adev);
- if (ret)
- return ret;
-
- ret = ci_populate_smc_uvd_level(adev, table);
- if (ret)
- return ret;
-
- table->UvdBootLevel = 0;
- table->VceBootLevel = 0;
- table->AcpBootLevel = 0;
- table->SamuBootLevel = 0;
- table->GraphicsBootLevel = 0;
- table->MemoryBootLevel = 0;
-
- ret = ci_find_boot_level(&pi->dpm_table.sclk_table,
- pi->vbios_boot_state.sclk_bootup_value,
- (u32 *)&pi->smc_state_table.GraphicsBootLevel);
-
- ret = ci_find_boot_level(&pi->dpm_table.mclk_table,
- pi->vbios_boot_state.mclk_bootup_value,
- (u32 *)&pi->smc_state_table.MemoryBootLevel);
-
- table->BootVddc = pi->vbios_boot_state.vddc_bootup_value;
- table->BootVddci = pi->vbios_boot_state.vddci_bootup_value;
- table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value;
-
- ci_populate_smc_initial_state(adev, amdgpu_boot_state);
-
- ret = ci_populate_bapm_parameters_in_dpm_table(adev);
- if (ret)
- return ret;
-
- table->UVDInterval = 1;
- table->VCEInterval = 1;
- table->ACPInterval = 1;
- table->SAMUInterval = 1;
- table->GraphicsVoltageChangeEnable = 1;
- table->GraphicsThermThrottleEnable = 1;
- table->GraphicsInterval = 1;
- table->VoltageInterval = 1;
- table->ThermalInterval = 1;
- table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high *
- CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
- table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low *
- CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
- table->MemoryVoltageChangeEnable = 1;
- table->MemoryInterval = 1;
- table->VoltageResponseTime = 0;
- table->VddcVddciDelta = 4000;
- table->PhaseResponseTime = 0;
- table->MemoryThermThrottleEnable = 1;
- table->PCIeBootLinkLevel = pi->dpm_table.pcie_speed_table.count - 1;
- table->PCIeGenInterval = 1;
- if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
- table->SVI2Enable = 1;
- else
- table->SVI2Enable = 0;
-
- table->ThermGpio = 17;
- table->SclkStepSize = 0x4000;
-
- table->SystemFlags = cpu_to_be32(table->SystemFlags);
- table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid);
- table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase);
- table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid);
- table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid);
- table->SclkStepSize = cpu_to_be32(table->SclkStepSize);
- table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh);
- table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow);
- table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta);
- table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime);
- table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime);
- table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE);
- table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE);
- table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE);
-
- ret = amdgpu_ci_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Discrete_DpmTable, SystemFlags),
- (u8 *)&table->SystemFlags,
- sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController),
- pi->sram_end);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static void ci_trim_single_dpm_states(struct amdgpu_device *adev,
- struct ci_single_dpm_table *dpm_table,
- u32 low_limit, u32 high_limit)
-{
- u32 i;
-
- for (i = 0; i < dpm_table->count; i++) {
- if ((dpm_table->dpm_levels[i].value < low_limit) ||
- (dpm_table->dpm_levels[i].value > high_limit))
- dpm_table->dpm_levels[i].enabled = false;
- else
- dpm_table->dpm_levels[i].enabled = true;
- }
-}
-
-static void ci_trim_pcie_dpm_states(struct amdgpu_device *adev,
- u32 speed_low, u32 lanes_low,
- u32 speed_high, u32 lanes_high)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
- u32 i, j;
-
- for (i = 0; i < pcie_table->count; i++) {
- if ((pcie_table->dpm_levels[i].value < speed_low) ||
- (pcie_table->dpm_levels[i].param1 < lanes_low) ||
- (pcie_table->dpm_levels[i].value > speed_high) ||
- (pcie_table->dpm_levels[i].param1 > lanes_high))
- pcie_table->dpm_levels[i].enabled = false;
- else
- pcie_table->dpm_levels[i].enabled = true;
- }
-
- for (i = 0; i < pcie_table->count; i++) {
- if (pcie_table->dpm_levels[i].enabled) {
- for (j = i + 1; j < pcie_table->count; j++) {
- if (pcie_table->dpm_levels[j].enabled) {
- if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) &&
- (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1))
- pcie_table->dpm_levels[j].enabled = false;
- }
- }
- }
- }
-}
-
-static int ci_trim_dpm_states(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_state)
-{
- struct ci_ps *state = ci_get_ps(amdgpu_state);
- struct ci_power_info *pi = ci_get_pi(adev);
- u32 high_limit_count;
-
- if (state->performance_level_count < 1)
- return -EINVAL;
-
- if (state->performance_level_count == 1)
- high_limit_count = 0;
- else
- high_limit_count = 1;
-
- ci_trim_single_dpm_states(adev,
- &pi->dpm_table.sclk_table,
- state->performance_levels[0].sclk,
- state->performance_levels[high_limit_count].sclk);
-
- ci_trim_single_dpm_states(adev,
- &pi->dpm_table.mclk_table,
- state->performance_levels[0].mclk,
- state->performance_levels[high_limit_count].mclk);
-
- ci_trim_pcie_dpm_states(adev,
- state->performance_levels[0].pcie_gen,
- state->performance_levels[0].pcie_lane,
- state->performance_levels[high_limit_count].pcie_gen,
- state->performance_levels[high_limit_count].pcie_lane);
-
- return 0;
-}
-
-static int ci_apply_disp_minimum_voltage_request(struct amdgpu_device *adev)
-{
- struct amdgpu_clock_voltage_dependency_table *disp_voltage_table =
- &adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk;
- struct amdgpu_clock_voltage_dependency_table *vddc_table =
- &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
- u32 requested_voltage = 0;
- u32 i;
-
- if (disp_voltage_table == NULL)
- return -EINVAL;
- if (!disp_voltage_table->count)
- return -EINVAL;
-
- for (i = 0; i < disp_voltage_table->count; i++) {
- if (adev->clock.current_dispclk == disp_voltage_table->entries[i].clk)
- requested_voltage = disp_voltage_table->entries[i].v;
- }
-
- for (i = 0; i < vddc_table->count; i++) {
- if (requested_voltage <= vddc_table->entries[i].v) {
- requested_voltage = vddc_table->entries[i].v;
- return (amdgpu_ci_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_VddC_Request,
- requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ?
- 0 : -EINVAL;
- }
- }
-
- return -EINVAL;
-}
-
-static int ci_upload_dpm_level_enable_mask(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- PPSMC_Result result;
-
- ci_apply_disp_minimum_voltage_request(adev);
-
- if (!pi->sclk_dpm_key_disabled) {
- if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
- result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_SCLKDPM_SetEnabledMask,
- pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
- if (result != PPSMC_Result_OK)
- return -EINVAL;
- }
- }
-
- if (!pi->mclk_dpm_key_disabled) {
- if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
- result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_MCLKDPM_SetEnabledMask,
- pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
- if (result != PPSMC_Result_OK)
- return -EINVAL;
- }
- }
-
-#if 0
- if (!pi->pcie_dpm_key_disabled) {
- if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
- result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_PCIeDPM_SetEnabledMask,
- pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
- if (result != PPSMC_Result_OK)
- return -EINVAL;
- }
- }
-#endif
-
- return 0;
-}
-
-static void ci_find_dpm_states_clocks_in_dpm_table(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_state)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct ci_ps *state = ci_get_ps(amdgpu_state);
- struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
- u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
- struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
- u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
- u32 i;
-
- pi->need_update_smu7_dpm_table = 0;
-
- for (i = 0; i < sclk_table->count; i++) {
- if (sclk == sclk_table->dpm_levels[i].value)
- break;
- }
-
- if (i >= sclk_table->count) {
- pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
- } else {
- /* XXX check display min clock requirements */
- if (CISLAND_MINIMUM_ENGINE_CLOCK != CISLAND_MINIMUM_ENGINE_CLOCK)
- pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
- }
-
- for (i = 0; i < mclk_table->count; i++) {
- if (mclk == mclk_table->dpm_levels[i].value)
- break;
- }
-
- if (i >= mclk_table->count)
- pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
-
- if (adev->pm.dpm.current_active_crtc_count !=
- adev->pm.dpm.new_active_crtc_count)
- pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
-}
-
-static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_state)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct ci_ps *state = ci_get_ps(amdgpu_state);
- u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
- u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
- struct ci_dpm_table *dpm_table = &pi->dpm_table;
- int ret;
-
- if (!pi->need_update_smu7_dpm_table)
- return 0;
-
- if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
- dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk;
-
- if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
- dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk;
-
- if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) {
- ret = ci_populate_all_graphic_levels(adev);
- if (ret)
- return ret;
- }
-
- if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
- ret = ci_populate_all_memory_levels(adev);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static int ci_enable_uvd_dpm(struct amdgpu_device *adev, bool enable)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- const struct amdgpu_clock_and_voltage_limits *max_limits;
- int i;
-
- if (adev->pm.ac_power)
- max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
- else
- max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
-
- if (enable) {
- pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
-
- for (i = adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
- if (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
- pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
-
- if (!pi->caps_uvd_dpm)
- break;
- }
- }
-
- amdgpu_ci_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_UVDDPM_SetEnabledMask,
- pi->dpm_level_enable_mask.uvd_dpm_enable_mask);
-
- if (pi->last_mclk_dpm_enable_mask & 0x1) {
- pi->uvd_enabled = true;
- pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
- amdgpu_ci_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_MCLKDPM_SetEnabledMask,
- pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
- }
- } else {
- if (pi->uvd_enabled) {
- pi->uvd_enabled = false;
- pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
- amdgpu_ci_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_MCLKDPM_SetEnabledMask,
- pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
- }
- }
-
- return (amdgpu_ci_send_msg_to_smc(adev, enable ?
- PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ?
- 0 : -EINVAL;
-}
-
-static int ci_enable_vce_dpm(struct amdgpu_device *adev, bool enable)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- const struct amdgpu_clock_and_voltage_limits *max_limits;
- int i;
-
- if (adev->pm.ac_power)
- max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
- else
- max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
-
- if (enable) {
- pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
- for (i = adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
- if (adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
- pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
-
- if (!pi->caps_vce_dpm)
- break;
- }
- }
-
- amdgpu_ci_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_VCEDPM_SetEnabledMask,
- pi->dpm_level_enable_mask.vce_dpm_enable_mask);
- }
-
- return (amdgpu_ci_send_msg_to_smc(adev, enable ?
- PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ?
- 0 : -EINVAL;
-}
-
-#if 0
-static int ci_enable_samu_dpm(struct amdgpu_device *adev, bool enable)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- const struct amdgpu_clock_and_voltage_limits *max_limits;
- int i;
-
- if (adev->pm.ac_power)
- max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
- else
- max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
-
- if (enable) {
- pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0;
- for (i = adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
- if (adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
- pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i;
-
- if (!pi->caps_samu_dpm)
- break;
- }
- }
-
- amdgpu_ci_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_SAMUDPM_SetEnabledMask,
- pi->dpm_level_enable_mask.samu_dpm_enable_mask);
- }
- return (amdgpu_ci_send_msg_to_smc(adev, enable ?
- PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ?
- 0 : -EINVAL;
-}
-
-static int ci_enable_acp_dpm(struct amdgpu_device *adev, bool enable)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- const struct amdgpu_clock_and_voltage_limits *max_limits;
- int i;
-
- if (adev->pm.ac_power)
- max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
- else
- max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
-
- if (enable) {
- pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0;
- for (i = adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
- if (adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
- pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i;
-
- if (!pi->caps_acp_dpm)
- break;
- }
- }
-
- amdgpu_ci_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_ACPDPM_SetEnabledMask,
- pi->dpm_level_enable_mask.acp_dpm_enable_mask);
- }
-
- return (amdgpu_ci_send_msg_to_smc(adev, enable ?
- PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ?
- 0 : -EINVAL;
-}
-#endif
-
-static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- u32 tmp;
- int ret = 0;
-
- if (!gate) {
- /* turn the clocks on when decoding */
- if (pi->caps_uvd_dpm ||
- (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
- pi->smc_state_table.UvdBootLevel = 0;
- else
- pi->smc_state_table.UvdBootLevel =
- adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1;
-
- tmp = RREG32_SMC(ixDPM_TABLE_475);
- tmp &= ~DPM_TABLE_475__UvdBootLevel_MASK;
- tmp |= (pi->smc_state_table.UvdBootLevel << DPM_TABLE_475__UvdBootLevel__SHIFT);
- WREG32_SMC(ixDPM_TABLE_475, tmp);
- ret = ci_enable_uvd_dpm(adev, true);
- } else {
- ret = ci_enable_uvd_dpm(adev, false);
- if (ret)
- return ret;
- }
-
- return ret;
-}
-
-static u8 ci_get_vce_boot_level(struct amdgpu_device *adev)
-{
- u8 i;
- u32 min_evclk = 30000; /* ??? */
- struct amdgpu_vce_clock_voltage_dependency_table *table =
- &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
-
- for (i = 0; i < table->count; i++) {
- if (table->entries[i].evclk >= min_evclk)
- return i;
- }
-
- return table->count - 1;
-}
-
-static int ci_update_vce_dpm(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_new_state,
- struct amdgpu_ps *amdgpu_current_state)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- int ret = 0;
- u32 tmp;
-
- if (amdgpu_current_state->evclk != amdgpu_new_state->evclk) {
- if (amdgpu_new_state->evclk) {
- pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(adev);
- tmp = RREG32_SMC(ixDPM_TABLE_475);
- tmp &= ~DPM_TABLE_475__VceBootLevel_MASK;
- tmp |= (pi->smc_state_table.VceBootLevel << DPM_TABLE_475__VceBootLevel__SHIFT);
- WREG32_SMC(ixDPM_TABLE_475, tmp);
-
- ret = ci_enable_vce_dpm(adev, true);
- } else {
- ret = ci_enable_vce_dpm(adev, false);
- if (ret)
- return ret;
- }
- }
- return ret;
-}
-
-#if 0
-static int ci_update_samu_dpm(struct amdgpu_device *adev, bool gate)
-{
- return ci_enable_samu_dpm(adev, gate);
-}
-
-static int ci_update_acp_dpm(struct amdgpu_device *adev, bool gate)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- u32 tmp;
-
- if (!gate) {
- pi->smc_state_table.AcpBootLevel = 0;
-
- tmp = RREG32_SMC(ixDPM_TABLE_475);
- tmp &= ~AcpBootLevel_MASK;
- tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel);
- WREG32_SMC(ixDPM_TABLE_475, tmp);
- }
-
- return ci_enable_acp_dpm(adev, !gate);
-}
-#endif
-
-static int ci_generate_dpm_level_enable_mask(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_state)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- int ret;
-
- ret = ci_trim_dpm_states(adev, amdgpu_state);
- if (ret)
- return ret;
-
- pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
- ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table);
- pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
- ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table);
- pi->last_mclk_dpm_enable_mask =
- pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
- if (pi->uvd_enabled) {
- if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
- pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
- }
- pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
- ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table);
-
- return 0;
-}
-
-static u32 ci_get_lowest_enabled_level(struct amdgpu_device *adev,
- u32 level_mask)
-{
- u32 level = 0;
-
- while ((level_mask & (1 << level)) == 0)
- level++;
-
- return level;
-}
-
-
-static int ci_dpm_force_performance_level(void *handle,
- enum amd_dpm_forced_level level)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct ci_power_info *pi = ci_get_pi(adev);
- u32 tmp, levels, i;
- int ret;
-
- if (level == AMD_DPM_FORCED_LEVEL_HIGH) {
- if ((!pi->pcie_dpm_key_disabled) &&
- pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
- levels = 0;
- tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
- while (tmp >>= 1)
- levels++;
- if (levels) {
- ret = ci_dpm_force_state_pcie(adev, level);
- if (ret)
- return ret;
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) &
- TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >>
- TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT;
- if (tmp == levels)
- break;
- udelay(1);
- }
- }
- }
- if ((!pi->sclk_dpm_key_disabled) &&
- pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
- levels = 0;
- tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
- while (tmp >>= 1)
- levels++;
- if (levels) {
- ret = ci_dpm_force_state_sclk(adev, levels);
- if (ret)
- return ret;
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
- TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
- TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
- if (tmp == levels)
- break;
- udelay(1);
- }
- }
- }
- if ((!pi->mclk_dpm_key_disabled) &&
- pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
- levels = 0;
- tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
- while (tmp >>= 1)
- levels++;
- if (levels) {
- ret = ci_dpm_force_state_mclk(adev, levels);
- if (ret)
- return ret;
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
- TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK) >>
- TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT;
- if (tmp == levels)
- break;
- udelay(1);
- }
- }
- }
- } else if (level == AMD_DPM_FORCED_LEVEL_LOW) {
- if ((!pi->sclk_dpm_key_disabled) &&
- pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
- levels = ci_get_lowest_enabled_level(adev,
- pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
- ret = ci_dpm_force_state_sclk(adev, levels);
- if (ret)
- return ret;
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
- TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
- TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
- if (tmp == levels)
- break;
- udelay(1);
- }
- }
- if ((!pi->mclk_dpm_key_disabled) &&
- pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
- levels = ci_get_lowest_enabled_level(adev,
- pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
- ret = ci_dpm_force_state_mclk(adev, levels);
- if (ret)
- return ret;
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
- TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK) >>
- TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT;
- if (tmp == levels)
- break;
- udelay(1);
- }
- }
- if ((!pi->pcie_dpm_key_disabled) &&
- pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
- levels = ci_get_lowest_enabled_level(adev,
- pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
- ret = ci_dpm_force_state_pcie(adev, levels);
- if (ret)
- return ret;
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) &
- TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >>
- TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT;
- if (tmp == levels)
- break;
- udelay(1);
- }
- }
- } else if (level == AMD_DPM_FORCED_LEVEL_AUTO) {
- if (!pi->pcie_dpm_key_disabled) {
- PPSMC_Result smc_result;
-
- smc_result = amdgpu_ci_send_msg_to_smc(adev,
- PPSMC_MSG_PCIeDPM_UnForceLevel);
- if (smc_result != PPSMC_Result_OK)
- return -EINVAL;
- }
- ret = ci_upload_dpm_level_enable_mask(adev);
- if (ret)
- return ret;
- }
-
- adev->pm.dpm.forced_level = level;
-
- return 0;
-}
-
-static int ci_set_mc_special_registers(struct amdgpu_device *adev,
- struct ci_mc_reg_table *table)
-{
- u8 i, j, k;
- u32 temp_reg;
-
- for (i = 0, j = table->last; i < table->last; i++) {
- if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
- return -EINVAL;
- switch(table->mc_reg_address[i].s1) {
- case mmMC_SEQ_MISC1:
- temp_reg = RREG32(mmMC_PMG_CMD_EMRS);
- table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
- table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
- for (k = 0; k < table->num_entries; k++) {
- table->mc_reg_table_entry[k].mc_data[j] =
- ((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
- }
- j++;
-
- if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
- return -EINVAL;
- temp_reg = RREG32(mmMC_PMG_CMD_MRS);
- table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
- table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
- for (k = 0; k < table->num_entries; k++) {
- table->mc_reg_table_entry[k].mc_data[j] =
- (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
- if (adev->gmc.vram_type != AMDGPU_VRAM_TYPE_GDDR5)
- table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
- }
- j++;
-
- if (adev->gmc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) {
- if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
- return -EINVAL;
- table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
- table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
- for (k = 0; k < table->num_entries; k++) {
- table->mc_reg_table_entry[k].mc_data[j] =
- (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
- }
- j++;
- }
- break;
- case mmMC_SEQ_RESERVE_M:
- temp_reg = RREG32(mmMC_PMG_CMD_MRS1);
- table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
- table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
- for (k = 0; k < table->num_entries; k++) {
- table->mc_reg_table_entry[k].mc_data[j] =
- (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
- }
- j++;
- break;
- default:
- break;
- }
-
- }
-
- table->last = j;
-
- return 0;
-}
-
-static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
-{
- bool result = true;
-
- switch(in_reg) {
- case mmMC_SEQ_RAS_TIMING:
- *out_reg = mmMC_SEQ_RAS_TIMING_LP;
- break;
- case mmMC_SEQ_DLL_STBY:
- *out_reg = mmMC_SEQ_DLL_STBY_LP;
- break;
- case mmMC_SEQ_G5PDX_CMD0:
- *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
- break;
- case mmMC_SEQ_G5PDX_CMD1:
- *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
- break;
- case mmMC_SEQ_G5PDX_CTRL:
- *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
- break;
- case mmMC_SEQ_CAS_TIMING:
- *out_reg = mmMC_SEQ_CAS_TIMING_LP;
- break;
- case mmMC_SEQ_MISC_TIMING:
- *out_reg = mmMC_SEQ_MISC_TIMING_LP;
- break;
- case mmMC_SEQ_MISC_TIMING2:
- *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
- break;
- case mmMC_SEQ_PMG_DVS_CMD:
- *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
- break;
- case mmMC_SEQ_PMG_DVS_CTL:
- *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
- break;
- case mmMC_SEQ_RD_CTL_D0:
- *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
- break;
- case mmMC_SEQ_RD_CTL_D1:
- *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
- break;
- case mmMC_SEQ_WR_CTL_D0:
- *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
- break;
- case mmMC_SEQ_WR_CTL_D1:
- *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
- break;
- case mmMC_PMG_CMD_EMRS:
- *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
- break;
- case mmMC_PMG_CMD_MRS:
- *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
- break;
- case mmMC_PMG_CMD_MRS1:
- *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
- break;
- case mmMC_SEQ_PMG_TIMING:
- *out_reg = mmMC_SEQ_PMG_TIMING_LP;
- break;
- case mmMC_PMG_CMD_MRS2:
- *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
- break;
- case mmMC_SEQ_WR_CTL_2:
- *out_reg = mmMC_SEQ_WR_CTL_2_LP;
- break;
- default:
- result = false;
- break;
- }
-
- return result;
-}
-
-static void ci_set_valid_flag(struct ci_mc_reg_table *table)
-{
- u8 i, j;
-
- for (i = 0; i < table->last; i++) {
- for (j = 1; j < table->num_entries; j++) {
- if (table->mc_reg_table_entry[j-1].mc_data[i] !=
- table->mc_reg_table_entry[j].mc_data[i]) {
- table->valid_flag |= 1 << i;
- break;
- }
- }
- }
-}
-
-static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
-{
- u32 i;
- u16 address;
-
- for (i = 0; i < table->last; i++) {
- table->mc_reg_address[i].s0 =
- ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
- address : table->mc_reg_address[i].s1;
- }
-}
-
-static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
- struct ci_mc_reg_table *ci_table)
-{
- u8 i, j;
-
- if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
- return -EINVAL;
- if (table->num_entries > MAX_AC_TIMING_ENTRIES)
- return -EINVAL;
-
- for (i = 0; i < table->last; i++)
- ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
-
- ci_table->last = table->last;
-
- for (i = 0; i < table->num_entries; i++) {
- ci_table->mc_reg_table_entry[i].mclk_max =
- table->mc_reg_table_entry[i].mclk_max;
- for (j = 0; j < table->last; j++)
- ci_table->mc_reg_table_entry[i].mc_data[j] =
- table->mc_reg_table_entry[i].mc_data[j];
- }
- ci_table->num_entries = table->num_entries;
-
- return 0;
-}
-
-static int ci_register_patching_mc_seq(struct amdgpu_device *adev,
- struct ci_mc_reg_table *table)
-{
- u8 i, k;
- u32 tmp;
- bool patch;
-
- tmp = RREG32(mmMC_SEQ_MISC0);
- patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
-
- if (patch &&
- ((adev->pdev->device == 0x67B0) ||
- (adev->pdev->device == 0x67B1))) {
- for (i = 0; i < table->last; i++) {
- if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
- return -EINVAL;
- switch (table->mc_reg_address[i].s1) {
- case mmMC_SEQ_MISC1:
- for (k = 0; k < table->num_entries; k++) {
- if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
- (table->mc_reg_table_entry[k].mclk_max == 137500))
- table->mc_reg_table_entry[k].mc_data[i] =
- (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFF8) |
- 0x00000007;
- }
- break;
- case mmMC_SEQ_WR_CTL_D0:
- for (k = 0; k < table->num_entries; k++) {
- if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
- (table->mc_reg_table_entry[k].mclk_max == 137500))
- table->mc_reg_table_entry[k].mc_data[i] =
- (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
- 0x0000D0DD;
- }
- break;
- case mmMC_SEQ_WR_CTL_D1:
- for (k = 0; k < table->num_entries; k++) {
- if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
- (table->mc_reg_table_entry[k].mclk_max == 137500))
- table->mc_reg_table_entry[k].mc_data[i] =
- (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
- 0x0000D0DD;
- }
- break;
- case mmMC_SEQ_WR_CTL_2:
- for (k = 0; k < table->num_entries; k++) {
- if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
- (table->mc_reg_table_entry[k].mclk_max == 137500))
- table->mc_reg_table_entry[k].mc_data[i] = 0;
- }
- break;
- case mmMC_SEQ_CAS_TIMING:
- for (k = 0; k < table->num_entries; k++) {
- if (table->mc_reg_table_entry[k].mclk_max == 125000)
- table->mc_reg_table_entry[k].mc_data[i] =
- (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
- 0x000C0140;
- else if (table->mc_reg_table_entry[k].mclk_max == 137500)
- table->mc_reg_table_entry[k].mc_data[i] =
- (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
- 0x000C0150;
- }
- break;
- case mmMC_SEQ_MISC_TIMING:
- for (k = 0; k < table->num_entries; k++) {
- if (table->mc_reg_table_entry[k].mclk_max == 125000)
- table->mc_reg_table_entry[k].mc_data[i] =
- (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
- 0x00000030;
- else if (table->mc_reg_table_entry[k].mclk_max == 137500)
- table->mc_reg_table_entry[k].mc_data[i] =
- (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
- 0x00000035;
- }
- break;
- default:
- break;
- }
- }
-
- WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 3);
- tmp = RREG32(mmMC_SEQ_IO_DEBUG_DATA);
- tmp = (tmp & 0xFFF8FFFF) | (1 << 16);
- WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 3);
- WREG32(mmMC_SEQ_IO_DEBUG_DATA, tmp);
- }
-
- return 0;
-}
-
-static int ci_initialize_mc_reg_table(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct atom_mc_reg_table *table;
- struct ci_mc_reg_table *ci_table = &pi->mc_reg_table;
- u8 module_index = ci_get_memory_module_index(adev);
- int ret;
-
- table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
- if (!table)
- return -ENOMEM;
-
- WREG32(mmMC_SEQ_RAS_TIMING_LP, RREG32(mmMC_SEQ_RAS_TIMING));
- WREG32(mmMC_SEQ_CAS_TIMING_LP, RREG32(mmMC_SEQ_CAS_TIMING));
- WREG32(mmMC_SEQ_DLL_STBY_LP, RREG32(mmMC_SEQ_DLL_STBY));
- WREG32(mmMC_SEQ_G5PDX_CMD0_LP, RREG32(mmMC_SEQ_G5PDX_CMD0));
- WREG32(mmMC_SEQ_G5PDX_CMD1_LP, RREG32(mmMC_SEQ_G5PDX_CMD1));
- WREG32(mmMC_SEQ_G5PDX_CTRL_LP, RREG32(mmMC_SEQ_G5PDX_CTRL));
- WREG32(mmMC_SEQ_PMG_DVS_CMD_LP, RREG32(mmMC_SEQ_PMG_DVS_CMD));
- WREG32(mmMC_SEQ_PMG_DVS_CTL_LP, RREG32(mmMC_SEQ_PMG_DVS_CTL));
- WREG32(mmMC_SEQ_MISC_TIMING_LP, RREG32(mmMC_SEQ_MISC_TIMING));
- WREG32(mmMC_SEQ_MISC_TIMING2_LP, RREG32(mmMC_SEQ_MISC_TIMING2));
- WREG32(mmMC_SEQ_PMG_CMD_EMRS_LP, RREG32(mmMC_PMG_CMD_EMRS));
- WREG32(mmMC_SEQ_PMG_CMD_MRS_LP, RREG32(mmMC_PMG_CMD_MRS));
- WREG32(mmMC_SEQ_PMG_CMD_MRS1_LP, RREG32(mmMC_PMG_CMD_MRS1));
- WREG32(mmMC_SEQ_WR_CTL_D0_LP, RREG32(mmMC_SEQ_WR_CTL_D0));
- WREG32(mmMC_SEQ_WR_CTL_D1_LP, RREG32(mmMC_SEQ_WR_CTL_D1));
- WREG32(mmMC_SEQ_RD_CTL_D0_LP, RREG32(mmMC_SEQ_RD_CTL_D0));
- WREG32(mmMC_SEQ_RD_CTL_D1_LP, RREG32(mmMC_SEQ_RD_CTL_D1));
- WREG32(mmMC_SEQ_PMG_TIMING_LP, RREG32(mmMC_SEQ_PMG_TIMING));
- WREG32(mmMC_SEQ_PMG_CMD_MRS2_LP, RREG32(mmMC_PMG_CMD_MRS2));
- WREG32(mmMC_SEQ_WR_CTL_2_LP, RREG32(mmMC_SEQ_WR_CTL_2));
-
- ret = amdgpu_atombios_init_mc_reg_table(adev, module_index, table);
- if (ret)
- goto init_mc_done;
-
- ret = ci_copy_vbios_mc_reg_table(table, ci_table);
- if (ret)
- goto init_mc_done;
-
- ci_set_s0_mc_reg_index(ci_table);
-
- ret = ci_register_patching_mc_seq(adev, ci_table);
- if (ret)
- goto init_mc_done;
-
- ret = ci_set_mc_special_registers(adev, ci_table);
- if (ret)
- goto init_mc_done;
-
- ci_set_valid_flag(ci_table);
-
-init_mc_done:
- kfree(table);
-
- return ret;
-}
-
-static int ci_populate_mc_reg_addresses(struct amdgpu_device *adev,
- SMU7_Discrete_MCRegisters *mc_reg_table)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- u32 i, j;
-
- for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) {
- if (pi->mc_reg_table.valid_flag & (1 << j)) {
- if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
- return -EINVAL;
- mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0);
- mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1);
- i++;
- }
- }
-
- mc_reg_table->last = (u8)i;
-
- return 0;
-}
-
-static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry,
- SMU7_Discrete_MCRegisterSet *data,
- u32 num_entries, u32 valid_flag)
-{
- u32 i, j;
-
- for (i = 0, j = 0; j < num_entries; j++) {
- if (valid_flag & (1 << j)) {
- data->value[i] = cpu_to_be32(entry->mc_data[j]);
- i++;
- }
- }
-}
-
-static void ci_convert_mc_reg_table_entry_to_smc(struct amdgpu_device *adev,
- const u32 memory_clock,
- SMU7_Discrete_MCRegisterSet *mc_reg_table_data)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- u32 i = 0;
-
- for(i = 0; i < pi->mc_reg_table.num_entries; i++) {
- if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
- break;
- }
-
- if ((i == pi->mc_reg_table.num_entries) && (i > 0))
- --i;
-
- ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i],
- mc_reg_table_data, pi->mc_reg_table.last,
- pi->mc_reg_table.valid_flag);
-}
-
-static void ci_convert_mc_reg_table_to_smc(struct amdgpu_device *adev,
- SMU7_Discrete_MCRegisters *mc_reg_table)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- u32 i;
-
- for (i = 0; i < pi->dpm_table.mclk_table.count; i++)
- ci_convert_mc_reg_table_entry_to_smc(adev,
- pi->dpm_table.mclk_table.dpm_levels[i].value,
- &mc_reg_table->data[i]);
-}
-
-static int ci_populate_initial_mc_reg_table(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- int ret;
-
- memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
-
- ret = ci_populate_mc_reg_addresses(adev, &pi->smc_mc_reg_table);
- if (ret)
- return ret;
- ci_convert_mc_reg_table_to_smc(adev, &pi->smc_mc_reg_table);
-
- return amdgpu_ci_copy_bytes_to_smc(adev,
- pi->mc_reg_table_start,
- (u8 *)&pi->smc_mc_reg_table,
- sizeof(SMU7_Discrete_MCRegisters),
- pi->sram_end);
-}
-
-static int ci_update_and_upload_mc_reg_table(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
-
- if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
- return 0;
-
- memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
-
- ci_convert_mc_reg_table_to_smc(adev, &pi->smc_mc_reg_table);
-
- return amdgpu_ci_copy_bytes_to_smc(adev,
- pi->mc_reg_table_start +
- offsetof(SMU7_Discrete_MCRegisters, data[0]),
- (u8 *)&pi->smc_mc_reg_table.data[0],
- sizeof(SMU7_Discrete_MCRegisterSet) *
- pi->dpm_table.mclk_table.count,
- pi->sram_end);
-}
-
-static void ci_enable_voltage_control(struct amdgpu_device *adev)
-{
- u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
-
- tmp |= GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK;
- WREG32_SMC(ixGENERAL_PWRMGT, tmp);
-}
-
-static enum amdgpu_pcie_gen ci_get_maximum_link_speed(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_state)
-{
- struct ci_ps *state = ci_get_ps(amdgpu_state);
- int i;
- u16 pcie_speed, max_speed = 0;
-
- for (i = 0; i < state->performance_level_count; i++) {
- pcie_speed = state->performance_levels[i].pcie_gen;
- if (max_speed < pcie_speed)
- max_speed = pcie_speed;
- }
-
- return max_speed;
-}
-
-static u16 ci_get_current_pcie_speed(struct amdgpu_device *adev)
-{
- u32 speed_cntl = 0;
-
- speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL) &
- PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK;
- speed_cntl >>= PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
-
- return (u16)speed_cntl;
-}
-
-static int ci_get_current_pcie_lane_number(struct amdgpu_device *adev)
-{
- u32 link_width = 0;
-
- link_width = RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL) &
- PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK;
- link_width >>= PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
-
- switch (link_width) {
- case 1:
- return 1;
- case 2:
- return 2;
- case 3:
- return 4;
- case 4:
- return 8;
- case 0:
- case 6:
- default:
- return 16;
- }
-}
-
-static void ci_request_link_speed_change_before_state_change(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_new_state,
- struct amdgpu_ps *amdgpu_current_state)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- enum amdgpu_pcie_gen target_link_speed =
- ci_get_maximum_link_speed(adev, amdgpu_new_state);
- enum amdgpu_pcie_gen current_link_speed;
-
- if (pi->force_pcie_gen == AMDGPU_PCIE_GEN_INVALID)
- current_link_speed = ci_get_maximum_link_speed(adev, amdgpu_current_state);
- else
- current_link_speed = pi->force_pcie_gen;
-
- pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
- pi->pspp_notify_required = false;
- if (target_link_speed > current_link_speed) {
- switch (target_link_speed) {
-#ifdef CONFIG_ACPI
- case AMDGPU_PCIE_GEN3:
- if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
- break;
- pi->force_pcie_gen = AMDGPU_PCIE_GEN2;
- if (current_link_speed == AMDGPU_PCIE_GEN2)
- break;
- case AMDGPU_PCIE_GEN2:
- if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
- break;
-#endif
- default:
- pi->force_pcie_gen = ci_get_current_pcie_speed(adev);
- break;
- }
- } else {
- if (target_link_speed < current_link_speed)
- pi->pspp_notify_required = true;
- }
-}
-
-static void ci_notify_link_speed_change_after_state_change(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_new_state,
- struct amdgpu_ps *amdgpu_current_state)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- enum amdgpu_pcie_gen target_link_speed =
- ci_get_maximum_link_speed(adev, amdgpu_new_state);
- u8 request;
-
- if (pi->pspp_notify_required) {
- if (target_link_speed == AMDGPU_PCIE_GEN3)
- request = PCIE_PERF_REQ_PECI_GEN3;
- else if (target_link_speed == AMDGPU_PCIE_GEN2)
- request = PCIE_PERF_REQ_PECI_GEN2;
- else
- request = PCIE_PERF_REQ_PECI_GEN1;
-
- if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
- (ci_get_current_pcie_speed(adev) > 0))
- return;
-
-#ifdef CONFIG_ACPI
- amdgpu_acpi_pcie_performance_request(adev, request, false);
-#endif
- }
-}
-
-static int ci_set_private_data_variables_based_on_pptable(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct amdgpu_clock_voltage_dependency_table *allowed_sclk_vddc_table =
- &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
- struct amdgpu_clock_voltage_dependency_table *allowed_mclk_vddc_table =
- &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
- struct amdgpu_clock_voltage_dependency_table *allowed_mclk_vddci_table =
- &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
-
- if (allowed_sclk_vddc_table == NULL)
- return -EINVAL;
- if (allowed_sclk_vddc_table->count < 1)
- return -EINVAL;
- if (allowed_mclk_vddc_table == NULL)
- return -EINVAL;
- if (allowed_mclk_vddc_table->count < 1)
- return -EINVAL;
- if (allowed_mclk_vddci_table == NULL)
- return -EINVAL;
- if (allowed_mclk_vddci_table->count < 1)
- return -EINVAL;
-
- pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v;
- pi->max_vddc_in_pp_table =
- allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
-
- pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v;
- pi->max_vddci_in_pp_table =
- allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
-
- adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk =
- allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
- adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk =
- allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
- adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc =
- allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
- adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
- allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
-
- return 0;
-}
-
-static void ci_patch_with_vddc_leakage(struct amdgpu_device *adev, u16 *vddc)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage;
- u32 leakage_index;
-
- for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
- if (leakage_table->leakage_id[leakage_index] == *vddc) {
- *vddc = leakage_table->actual_voltage[leakage_index];
- break;
- }
- }
-}
-
-static void ci_patch_with_vddci_leakage(struct amdgpu_device *adev, u16 *vddci)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage;
- u32 leakage_index;
-
- for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
- if (leakage_table->leakage_id[leakage_index] == *vddci) {
- *vddci = leakage_table->actual_voltage[leakage_index];
- break;
- }
- }
-}
-
-static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
- struct amdgpu_clock_voltage_dependency_table *table)
-{
- u32 i;
-
- if (table) {
- for (i = 0; i < table->count; i++)
- ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
- }
-}
-
-static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct amdgpu_device *adev,
- struct amdgpu_clock_voltage_dependency_table *table)
-{
- u32 i;
-
- if (table) {
- for (i = 0; i < table->count; i++)
- ci_patch_with_vddci_leakage(adev, &table->entries[i].v);
- }
-}
-
-static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
- struct amdgpu_vce_clock_voltage_dependency_table *table)
-{
- u32 i;
-
- if (table) {
- for (i = 0; i < table->count; i++)
- ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
- }
-}
-
-static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
- struct amdgpu_uvd_clock_voltage_dependency_table *table)
-{
- u32 i;
-
- if (table) {
- for (i = 0; i < table->count; i++)
- ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
- }
-}
-
-static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct amdgpu_device *adev,
- struct amdgpu_phase_shedding_limits_table *table)
-{
- u32 i;
-
- if (table) {
- for (i = 0; i < table->count; i++)
- ci_patch_with_vddc_leakage(adev, &table->entries[i].voltage);
- }
-}
-
-static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct amdgpu_device *adev,
- struct amdgpu_clock_and_voltage_limits *table)
-{
- if (table) {
- ci_patch_with_vddc_leakage(adev, (u16 *)&table->vddc);
- ci_patch_with_vddci_leakage(adev, (u16 *)&table->vddci);
- }
-}
-
-static void ci_patch_cac_leakage_table_with_vddc_leakage(struct amdgpu_device *adev,
- struct amdgpu_cac_leakage_table *table)
-{
- u32 i;
-
- if (table) {
- for (i = 0; i < table->count; i++)
- ci_patch_with_vddc_leakage(adev, &table->entries[i].vddc);
- }
-}
-
-static void ci_patch_dependency_tables_with_leakage(struct amdgpu_device *adev)
-{
-
- ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
- &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
- ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
- &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
- ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
- &adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk);
- ci_patch_clock_voltage_dependency_table_with_vddci_leakage(adev,
- &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
- ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(adev,
- &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table);
- ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(adev,
- &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table);
- ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
- &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table);
- ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
- &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table);
- ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(adev,
- &adev->pm.dpm.dyn_state.phase_shedding_limits_table);
- ci_patch_clock_voltage_limits_with_vddc_leakage(adev,
- &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
- ci_patch_clock_voltage_limits_with_vddc_leakage(adev,
- &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc);
- ci_patch_cac_leakage_table_with_vddc_leakage(adev,
- &adev->pm.dpm.dyn_state.cac_leakage_table);
-
-}
-
-static void ci_update_current_ps(struct amdgpu_device *adev,
- struct amdgpu_ps *rps)
-{
- struct ci_ps *new_ps = ci_get_ps(rps);
- struct ci_power_info *pi = ci_get_pi(adev);
-
- pi->current_rps = *rps;
- pi->current_ps = *new_ps;
- pi->current_rps.ps_priv = &pi->current_ps;
- adev->pm.dpm.current_ps = &pi->current_rps;
-}
-
-static void ci_update_requested_ps(struct amdgpu_device *adev,
- struct amdgpu_ps *rps)
-{
- struct ci_ps *new_ps = ci_get_ps(rps);
- struct ci_power_info *pi = ci_get_pi(adev);
-
- pi->requested_rps = *rps;
- pi->requested_ps = *new_ps;
- pi->requested_rps.ps_priv = &pi->requested_ps;
- adev->pm.dpm.requested_ps = &pi->requested_rps;
-}
-
-static int ci_dpm_pre_set_power_state(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct ci_power_info *pi = ci_get_pi(adev);
- struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
- struct amdgpu_ps *new_ps = &requested_ps;
-
- ci_update_requested_ps(adev, new_ps);
-
- ci_apply_state_adjust_rules(adev, &pi->requested_rps);
-
- return 0;
-}
-
-static void ci_dpm_post_set_power_state(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct ci_power_info *pi = ci_get_pi(adev);
- struct amdgpu_ps *new_ps = &pi->requested_rps;
-
- ci_update_current_ps(adev, new_ps);
-}
-
-
-static void ci_dpm_setup_asic(struct amdgpu_device *adev)
-{
- ci_read_clock_registers(adev);
- ci_enable_acpi_power_management(adev);
- ci_init_sclk_t(adev);
-}
-
-static int ci_dpm_enable(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
- int ret;
-
- if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
- ci_enable_voltage_control(adev);
- ret = ci_construct_voltage_tables(adev);
- if (ret) {
- DRM_ERROR("ci_construct_voltage_tables failed\n");
- return ret;
- }
- }
- if (pi->caps_dynamic_ac_timing) {
- ret = ci_initialize_mc_reg_table(adev);
- if (ret)
- pi->caps_dynamic_ac_timing = false;
- }
- if (pi->dynamic_ss)
- ci_enable_spread_spectrum(adev, true);
- if (pi->thermal_protection)
- ci_enable_thermal_protection(adev, true);
- ci_program_sstp(adev);
- ci_enable_display_gap(adev);
- ci_program_vc(adev);
- ret = ci_upload_firmware(adev);
- if (ret) {
- DRM_ERROR("ci_upload_firmware failed\n");
- return ret;
- }
- ret = ci_process_firmware_header(adev);
- if (ret) {
- DRM_ERROR("ci_process_firmware_header failed\n");
- return ret;
- }
- ret = ci_initial_switch_from_arb_f0_to_f1(adev);
- if (ret) {
- DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n");
- return ret;
- }
- ret = ci_init_smc_table(adev);
- if (ret) {
- DRM_ERROR("ci_init_smc_table failed\n");
- return ret;
- }
- ret = ci_init_arb_table_index(adev);
- if (ret) {
- DRM_ERROR("ci_init_arb_table_index failed\n");
- return ret;
- }
- if (pi->caps_dynamic_ac_timing) {
- ret = ci_populate_initial_mc_reg_table(adev);
- if (ret) {
- DRM_ERROR("ci_populate_initial_mc_reg_table failed\n");
- return ret;
- }
- }
- ret = ci_populate_pm_base(adev);
- if (ret) {
- DRM_ERROR("ci_populate_pm_base failed\n");
- return ret;
- }
- ci_dpm_start_smc(adev);
- ci_enable_vr_hot_gpio_interrupt(adev);
- ret = ci_notify_smc_display_change(adev, false);
- if (ret) {
- DRM_ERROR("ci_notify_smc_display_change failed\n");
- return ret;
- }
- ci_enable_sclk_control(adev, true);
- ret = ci_enable_ulv(adev, true);
- if (ret) {
- DRM_ERROR("ci_enable_ulv failed\n");
- return ret;
- }
- ret = ci_enable_ds_master_switch(adev, true);
- if (ret) {
- DRM_ERROR("ci_enable_ds_master_switch failed\n");
- return ret;
- }
- ret = ci_start_dpm(adev);
- if (ret) {
- DRM_ERROR("ci_start_dpm failed\n");
- return ret;
- }
- ret = ci_enable_didt(adev, true);
- if (ret) {
- DRM_ERROR("ci_enable_didt failed\n");
- return ret;
- }
- ret = ci_enable_smc_cac(adev, true);
- if (ret) {
- DRM_ERROR("ci_enable_smc_cac failed\n");
- return ret;
- }
- ret = ci_enable_power_containment(adev, true);
- if (ret) {
- DRM_ERROR("ci_enable_power_containment failed\n");
- return ret;
- }
-
- ret = ci_power_control_set_level(adev);
- if (ret) {
- DRM_ERROR("ci_power_control_set_level failed\n");
- return ret;
- }
-
- ci_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
-
- ret = ci_enable_thermal_based_sclk_dpm(adev, true);
- if (ret) {
- DRM_ERROR("ci_enable_thermal_based_sclk_dpm failed\n");
- return ret;
- }
-
- ci_thermal_start_thermal_controller(adev);
-
- ci_update_current_ps(adev, boot_ps);
-
- return 0;
-}
-
-static void ci_dpm_disable(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
-
- amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
- AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
- amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
- AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
-
- ci_dpm_powergate_uvd(adev, true);
-
- if (!amdgpu_ci_is_smc_running(adev))
- return;
-
- ci_thermal_stop_thermal_controller(adev);
-
- if (pi->thermal_protection)
- ci_enable_thermal_protection(adev, false);
- ci_enable_power_containment(adev, false);
- ci_enable_smc_cac(adev, false);
- ci_enable_didt(adev, false);
- ci_enable_spread_spectrum(adev, false);
- ci_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
- ci_stop_dpm(adev);
- ci_enable_ds_master_switch(adev, false);
- ci_enable_ulv(adev, false);
- ci_clear_vc(adev);
- ci_reset_to_default(adev);
- ci_dpm_stop_smc(adev);
- ci_force_switch_to_arb_f0(adev);
- ci_enable_thermal_based_sclk_dpm(adev, false);
-
- ci_update_current_ps(adev, boot_ps);
-}
-
-static int ci_dpm_set_power_state(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct ci_power_info *pi = ci_get_pi(adev);
- struct amdgpu_ps *new_ps = &pi->requested_rps;
- struct amdgpu_ps *old_ps = &pi->current_rps;
- int ret;
-
- ci_find_dpm_states_clocks_in_dpm_table(adev, new_ps);
- if (pi->pcie_performance_request)
- ci_request_link_speed_change_before_state_change(adev, new_ps, old_ps);
- ret = ci_freeze_sclk_mclk_dpm(adev);
- if (ret) {
- DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n");
- return ret;
- }
- ret = ci_populate_and_upload_sclk_mclk_dpm_levels(adev, new_ps);
- if (ret) {
- DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n");
- return ret;
- }
- ret = ci_generate_dpm_level_enable_mask(adev, new_ps);
- if (ret) {
- DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n");
- return ret;
- }
-
- ret = ci_update_vce_dpm(adev, new_ps, old_ps);
- if (ret) {
- DRM_ERROR("ci_update_vce_dpm failed\n");
- return ret;
- }
-
- ret = ci_update_sclk_t(adev);
- if (ret) {
- DRM_ERROR("ci_update_sclk_t failed\n");
- return ret;
- }
- if (pi->caps_dynamic_ac_timing) {
- ret = ci_update_and_upload_mc_reg_table(adev);
- if (ret) {
- DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n");
- return ret;
- }
- }
- ret = ci_program_memory_timing_parameters(adev);
- if (ret) {
- DRM_ERROR("ci_program_memory_timing_parameters failed\n");
- return ret;
- }
- ret = ci_unfreeze_sclk_mclk_dpm(adev);
- if (ret) {
- DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n");
- return ret;
- }
- ret = ci_upload_dpm_level_enable_mask(adev);
- if (ret) {
- DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n");
- return ret;
- }
- if (pi->pcie_performance_request)
- ci_notify_link_speed_change_after_state_change(adev, new_ps, old_ps);
-
- return 0;
-}
-
-#if 0
-static void ci_dpm_reset_asic(struct amdgpu_device *adev)
-{
- ci_set_boot_state(adev);
-}
-#endif
-
-static void ci_dpm_display_configuration_changed(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- ci_program_display_gap(adev);
-}
-
-union power_info {
- struct _ATOM_POWERPLAY_INFO info;
- struct _ATOM_POWERPLAY_INFO_V2 info_2;
- struct _ATOM_POWERPLAY_INFO_V3 info_3;
- struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
- struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
- struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
-};
-
-union pplib_clock_info {
- struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
- struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
- struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
- struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
- struct _ATOM_PPLIB_SI_CLOCK_INFO si;
- struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
-};
-
-union pplib_power_state {
- struct _ATOM_PPLIB_STATE v1;
- struct _ATOM_PPLIB_STATE_V2 v2;
-};
-
-static void ci_parse_pplib_non_clock_info(struct amdgpu_device *adev,
- struct amdgpu_ps *rps,
- struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
- u8 table_rev)
-{
- rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
- rps->class = le16_to_cpu(non_clock_info->usClassification);
- rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
-
- if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
- rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
- rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
- } else {
- rps->vclk = 0;
- rps->dclk = 0;
- }
-
- if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
- adev->pm.dpm.boot_ps = rps;
- if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
- adev->pm.dpm.uvd_ps = rps;
-}
-
-static void ci_parse_pplib_clock_info(struct amdgpu_device *adev,
- struct amdgpu_ps *rps, int index,
- union pplib_clock_info *clock_info)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct ci_ps *ps = ci_get_ps(rps);
- struct ci_pl *pl = &ps->performance_levels[index];
-
- ps->performance_level_count = index + 1;
-
- pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
- pl->sclk |= clock_info->ci.ucEngineClockHigh << 16;
- pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
- pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16;
-
- pl->pcie_gen = amdgpu_get_pcie_gen_support(adev,
- pi->sys_pcie_mask,
- pi->vbios_boot_state.pcie_gen_bootup_value,
- clock_info->ci.ucPCIEGen);
- pl->pcie_lane = amdgpu_get_pcie_lane_support(adev,
- pi->vbios_boot_state.pcie_lane_bootup_value,
- le16_to_cpu(clock_info->ci.usPCIELane));
-
- if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
- pi->acpi_pcie_gen = pl->pcie_gen;
- }
-
- if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
- pi->ulv.supported = true;
- pi->ulv.pl = *pl;
- pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT;
- }
-
- /* patch up boot state */
- if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
- pl->mclk = pi->vbios_boot_state.mclk_bootup_value;
- pl->sclk = pi->vbios_boot_state.sclk_bootup_value;
- pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value;
- pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value;
- }
-
- switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
- case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
- pi->use_pcie_powersaving_levels = true;
- if (pi->pcie_gen_powersaving.max < pl->pcie_gen)
- pi->pcie_gen_powersaving.max = pl->pcie_gen;
- if (pi->pcie_gen_powersaving.min > pl->pcie_gen)
- pi->pcie_gen_powersaving.min = pl->pcie_gen;
- if (pi->pcie_lane_powersaving.max < pl->pcie_lane)
- pi->pcie_lane_powersaving.max = pl->pcie_lane;
- if (pi->pcie_lane_powersaving.min > pl->pcie_lane)
- pi->pcie_lane_powersaving.min = pl->pcie_lane;
- break;
- case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
- pi->use_pcie_performance_levels = true;
- if (pi->pcie_gen_performance.max < pl->pcie_gen)
- pi->pcie_gen_performance.max = pl->pcie_gen;
- if (pi->pcie_gen_performance.min > pl->pcie_gen)
- pi->pcie_gen_performance.min = pl->pcie_gen;
- if (pi->pcie_lane_performance.max < pl->pcie_lane)
- pi->pcie_lane_performance.max = pl->pcie_lane;
- if (pi->pcie_lane_performance.min > pl->pcie_lane)
- pi->pcie_lane_performance.min = pl->pcie_lane;
- break;
- default:
- break;
- }
-}
-
-static int ci_parse_power_table(struct amdgpu_device *adev)
-{
- struct amdgpu_mode_info *mode_info = &adev->mode_info;
- struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
- union pplib_power_state *power_state;
- int i, j, k, non_clock_array_index, clock_array_index;
- union pplib_clock_info *clock_info;
- struct _StateArray *state_array;
- struct _ClockInfoArray *clock_info_array;
- struct _NonClockInfoArray *non_clock_info_array;
- union power_info *power_info;
- int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
- u16 data_offset;
- u8 frev, crev;
- u8 *power_state_offset;
- struct ci_ps *ps;
-
- if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
- &frev, &crev, &data_offset))
- return -EINVAL;
- power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
-
- amdgpu_add_thermal_controller(adev);
-
- state_array = (struct _StateArray *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib.usStateArrayOffset));
- clock_info_array = (struct _ClockInfoArray *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
- non_clock_info_array = (struct _NonClockInfoArray *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
-
- adev->pm.dpm.ps = kcalloc(state_array->ucNumEntries,
- sizeof(struct amdgpu_ps),
- GFP_KERNEL);
- if (!adev->pm.dpm.ps)
- return -ENOMEM;
- power_state_offset = (u8 *)state_array->states;
- for (i = 0; i < state_array->ucNumEntries; i++) {
- u8 *idx;
- power_state = (union pplib_power_state *)power_state_offset;
- non_clock_array_index = power_state->v2.nonClockInfoIndex;
- non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
- &non_clock_info_array->nonClockInfo[non_clock_array_index];
- ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
- if (ps == NULL) {
- kfree(adev->pm.dpm.ps);
- return -ENOMEM;
- }
- adev->pm.dpm.ps[i].ps_priv = ps;
- ci_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
- non_clock_info,
- non_clock_info_array->ucEntrySize);
- k = 0;
- idx = (u8 *)&power_state->v2.clockInfoIndex[0];
- for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
- clock_array_index = idx[j];
- if (clock_array_index >= clock_info_array->ucNumEntries)
- continue;
- if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS)
- break;
- clock_info = (union pplib_clock_info *)
- ((u8 *)&clock_info_array->clockInfo[0] +
- (clock_array_index * clock_info_array->ucEntrySize));
- ci_parse_pplib_clock_info(adev,
- &adev->pm.dpm.ps[i], k,
- clock_info);
- k++;
- }
- power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
- }
- adev->pm.dpm.num_ps = state_array->ucNumEntries;
-
- /* fill in the vce power states */
- for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
- u32 sclk, mclk;
- clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
- clock_info = (union pplib_clock_info *)
- &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
- sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
- sclk |= clock_info->ci.ucEngineClockHigh << 16;
- mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
- mclk |= clock_info->ci.ucMemoryClockHigh << 16;
- adev->pm.dpm.vce_states[i].sclk = sclk;
- adev->pm.dpm.vce_states[i].mclk = mclk;
- }
-
- return 0;
-}
-
-static int ci_get_vbios_boot_values(struct amdgpu_device *adev,
- struct ci_vbios_boot_state *boot_state)
-{
- struct amdgpu_mode_info *mode_info = &adev->mode_info;
- int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
- ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
- u8 frev, crev;
- u16 data_offset;
-
- if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
- &frev, &crev, &data_offset)) {
- firmware_info =
- (ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios +
- data_offset);
- boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage);
- boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage);
- boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage);
- boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(adev);
- boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(adev);
- boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock);
- boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock);
-
- return 0;
- }
- return -EINVAL;
-}
-
-static void ci_dpm_fini(struct amdgpu_device *adev)
-{
- int i;
-
- for (i = 0; i < adev->pm.dpm.num_ps; i++) {
- kfree(adev->pm.dpm.ps[i].ps_priv);
- }
- kfree(adev->pm.dpm.ps);
- kfree(adev->pm.dpm.priv);
- kfree(adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
- amdgpu_free_extended_power_table(adev);
-}
-
-/**
- * ci_dpm_init_microcode - load ucode images from disk
- *
- * @adev: amdgpu_device pointer
- *
- * Use the firmware interface to load the ucode images into
- * the driver (not loaded into hw).
- * Returns 0 on success, error on failure.
- */
-static int ci_dpm_init_microcode(struct amdgpu_device *adev)
-{
- const char *chip_name;
- char fw_name[30];
- int err;
-
- DRM_DEBUG("\n");
-
- switch (adev->asic_type) {
- case CHIP_BONAIRE:
- if ((adev->pdev->revision == 0x80) ||
- (adev->pdev->revision == 0x81) ||
- (adev->pdev->device == 0x665f))
- chip_name = "bonaire_k";
- else
- chip_name = "bonaire";
- break;
- case CHIP_HAWAII:
- if (adev->pdev->revision == 0x80)
- chip_name = "hawaii_k";
- else
- chip_name = "hawaii";
- break;
- case CHIP_KAVERI:
- case CHIP_KABINI:
- case CHIP_MULLINS:
- default: BUG();
- }
-
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
- err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->pm.fw);
-
-out:
- if (err) {
- pr_err("cik_smc: Failed to load firmware \"%s\"\n", fw_name);
- release_firmware(adev->pm.fw);
- adev->pm.fw = NULL;
- }
- return err;
-}
-
-static int ci_dpm_init(struct amdgpu_device *adev)
-{
- int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
- SMU7_Discrete_DpmTable *dpm_table;
- struct amdgpu_gpio_rec gpio;
- u16 data_offset, size;
- u8 frev, crev;
- struct ci_power_info *pi;
- int ret;
-
- pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
- if (pi == NULL)
- return -ENOMEM;
- adev->pm.dpm.priv = pi;
-
- pi->sys_pcie_mask =
- adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK;
-
- pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
-
- pi->pcie_gen_performance.max = AMDGPU_PCIE_GEN1;
- pi->pcie_gen_performance.min = AMDGPU_PCIE_GEN3;
- pi->pcie_gen_powersaving.max = AMDGPU_PCIE_GEN1;
- pi->pcie_gen_powersaving.min = AMDGPU_PCIE_GEN3;
-
- pi->pcie_lane_performance.max = 0;
- pi->pcie_lane_performance.min = 16;
- pi->pcie_lane_powersaving.max = 0;
- pi->pcie_lane_powersaving.min = 16;
-
- ret = ci_get_vbios_boot_values(adev, &pi->vbios_boot_state);
- if (ret) {
- ci_dpm_fini(adev);
- return ret;
- }
-
- ret = amdgpu_get_platform_caps(adev);
- if (ret) {
- ci_dpm_fini(adev);
- return ret;
- }
-
- ret = amdgpu_parse_extended_power_table(adev);
- if (ret) {
- ci_dpm_fini(adev);
- return ret;
- }
-
- ret = ci_parse_power_table(adev);
- if (ret) {
- ci_dpm_fini(adev);
- return ret;
- }
-
- pi->dll_default_on = false;
- pi->sram_end = SMC_RAM_END;
-
- pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT;
- pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT;
- pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT;
- pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT;
- pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT;
- pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT;
- pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT;
- pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT;
-
- pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT;
-
- pi->sclk_dpm_key_disabled = 0;
- pi->mclk_dpm_key_disabled = 0;
- pi->pcie_dpm_key_disabled = 0;
- pi->thermal_sclk_dpm_enabled = 0;
-
- if (adev->powerplay.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
- pi->caps_sclk_ds = true;
- else
- pi->caps_sclk_ds = false;
-
- pi->mclk_strobe_mode_threshold = 40000;
- pi->mclk_stutter_mode_threshold = 40000;
- pi->mclk_edc_enable_threshold = 40000;
- pi->mclk_edc_wr_enable_threshold = 40000;
-
- ci_initialize_powertune_defaults(adev);
-
- pi->caps_fps = false;
-
- pi->caps_sclk_throttle_low_notification = false;
-
- pi->caps_uvd_dpm = true;
- pi->caps_vce_dpm = true;
-
- ci_get_leakage_voltages(adev);
- ci_patch_dependency_tables_with_leakage(adev);
- ci_set_private_data_variables_based_on_pptable(adev);
-
- adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
- kcalloc(4,
- sizeof(struct amdgpu_clock_voltage_dependency_entry),
- GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
- ci_dpm_fini(adev);
- return -ENOMEM;
- }
- adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
- adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
- adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
- adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
- adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
- adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
- adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
- adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
- adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
-
- adev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
- adev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
- adev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
-
- adev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
- adev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
- adev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
- adev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
-
- if (adev->asic_type == CHIP_HAWAII) {
- pi->thermal_temp_setting.temperature_low = 94500;
- pi->thermal_temp_setting.temperature_high = 95000;
- pi->thermal_temp_setting.temperature_shutdown = 104000;
- } else {
- pi->thermal_temp_setting.temperature_low = 99500;
- pi->thermal_temp_setting.temperature_high = 100000;
- pi->thermal_temp_setting.temperature_shutdown = 104000;
- }
-
- pi->uvd_enabled = false;
-
- dpm_table = &pi->smc_state_table;
-
- gpio = amdgpu_atombios_lookup_gpio(adev, VDDC_VRHOT_GPIO_PINID);
- if (gpio.valid) {
- dpm_table->VRHotGpio = gpio.shift;
- adev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
- } else {
- dpm_table->VRHotGpio = CISLANDS_UNUSED_GPIO_PIN;
- adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
- }
-
- gpio = amdgpu_atombios_lookup_gpio(adev, PP_AC_DC_SWITCH_GPIO_PINID);
- if (gpio.valid) {
- dpm_table->AcDcGpio = gpio.shift;
- adev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_HARDWAREDC;
- } else {
- dpm_table->AcDcGpio = CISLANDS_UNUSED_GPIO_PIN;
- adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_HARDWAREDC;
- }
-
- gpio = amdgpu_atombios_lookup_gpio(adev, VDDC_PCC_GPIO_PINID);
- if (gpio.valid) {
- u32 tmp = RREG32_SMC(ixCNB_PWRMGT_CNTL);
-
- switch (gpio.shift) {
- case 0:
- tmp &= ~CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK;
- tmp |= 1 << CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT;
- break;
- case 1:
- tmp &= ~CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK;
- tmp |= 2 << CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT;
- break;
- case 2:
- tmp |= CNB_PWRMGT_CNTL__GNB_SLOW_MASK;
- break;
- case 3:
- tmp |= CNB_PWRMGT_CNTL__FORCE_NB_PS1_MASK;
- break;
- case 4:
- tmp |= CNB_PWRMGT_CNTL__DPM_ENABLED_MASK;
- break;
- default:
- DRM_INFO("Invalid PCC GPIO: %u!\n", gpio.shift);
- break;
- }
- WREG32_SMC(ixCNB_PWRMGT_CNTL, tmp);
- }
-
- pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
- pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
- pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
- if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
- pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
- else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
- pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
-
- if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) {
- if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
- pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
- else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
- pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
- else
- adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL;
- }
-
- if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) {
- if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
- pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
- else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
- pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
- else
- adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL;
- }
-
- pi->vddc_phase_shed_control = true;
-
-#if defined(CONFIG_ACPI)
- pi->pcie_performance_request =
- amdgpu_acpi_is_pcie_performance_request_supported(adev);
-#else
- pi->pcie_performance_request = false;
-#endif
-
- if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size,
- &frev, &crev, &data_offset)) {
- pi->caps_sclk_ss_support = true;
- pi->caps_mclk_ss_support = true;
- pi->dynamic_ss = true;
- } else {
- pi->caps_sclk_ss_support = false;
- pi->caps_mclk_ss_support = false;
- pi->dynamic_ss = true;
- }
-
- if (adev->pm.int_thermal_type != THERMAL_TYPE_NONE)
- pi->thermal_protection = true;
- else
- pi->thermal_protection = false;
-
- pi->caps_dynamic_ac_timing = true;
-
- pi->uvd_power_gated = true;
-
- /* make sure dc limits are valid */
- if ((adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
- (adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
- adev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
- adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
-
- pi->fan_ctrl_is_in_default_mode = true;
-
- return 0;
-}
-
-static void
-ci_dpm_debugfs_print_current_performance_level(void *handle,
- struct seq_file *m)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct ci_power_info *pi = ci_get_pi(adev);
- struct amdgpu_ps *rps = &pi->current_rps;
- u32 sclk = ci_get_average_sclk_freq(adev);
- u32 mclk = ci_get_average_mclk_freq(adev);
- u32 activity_percent = 50;
- int ret;
-
- ret = ci_read_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, AverageGraphicsA),
- &activity_percent);
-
- if (ret == 0) {
- activity_percent += 0x80;
- activity_percent >>= 8;
- activity_percent = activity_percent > 100 ? 100 : activity_percent;
- }
-
- seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en");
- seq_printf(m, "vce %sabled\n", rps->vce_active ? "en" : "dis");
- seq_printf(m, "power level avg sclk: %u mclk: %u\n",
- sclk, mclk);
- seq_printf(m, "GPU load: %u %%\n", activity_percent);
-}
-
-static void ci_dpm_print_power_state(void *handle, void *current_ps)
-{
- struct amdgpu_ps *rps = (struct amdgpu_ps *)current_ps;
- struct ci_ps *ps = ci_get_ps(rps);
- struct ci_pl *pl;
- int i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- amdgpu_dpm_print_class_info(rps->class, rps->class2);
- amdgpu_dpm_print_cap_info(rps->caps);
- printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
- for (i = 0; i < ps->performance_level_count; i++) {
- pl = &ps->performance_levels[i];
- printk("\t\tpower level %d sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n",
- i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane);
- }
- amdgpu_dpm_print_ps_status(adev, rps);
-}
-
-static inline bool ci_are_power_levels_equal(const struct ci_pl *ci_cpl1,
- const struct ci_pl *ci_cpl2)
-{
- return ((ci_cpl1->mclk == ci_cpl2->mclk) &&
- (ci_cpl1->sclk == ci_cpl2->sclk) &&
- (ci_cpl1->pcie_gen == ci_cpl2->pcie_gen) &&
- (ci_cpl1->pcie_lane == ci_cpl2->pcie_lane));
-}
-
-static int ci_check_state_equal(void *handle,
- void *current_ps,
- void *request_ps,
- bool *equal)
-{
- struct ci_ps *ci_cps;
- struct ci_ps *ci_rps;
- int i;
- struct amdgpu_ps *cps = (struct amdgpu_ps *)current_ps;
- struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
- return -EINVAL;
-
- ci_cps = ci_get_ps((struct amdgpu_ps *)cps);
- ci_rps = ci_get_ps((struct amdgpu_ps *)rps);
-
- if (ci_cps == NULL) {
- *equal = false;
- return 0;
- }
-
- if (ci_cps->performance_level_count != ci_rps->performance_level_count) {
-
- *equal = false;
- return 0;
- }
-
- for (i = 0; i < ci_cps->performance_level_count; i++) {
- if (!ci_are_power_levels_equal(&(ci_cps->performance_levels[i]),
- &(ci_rps->performance_levels[i]))) {
- *equal = false;
- return 0;
- }
- }
-
- /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
- *equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk));
- *equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk));
-
- return 0;
-}
-
-static u32 ci_dpm_get_sclk(void *handle, bool low)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct ci_power_info *pi = ci_get_pi(adev);
- struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
-
- if (low)
- return requested_state->performance_levels[0].sclk;
- else
- return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
-}
-
-static u32 ci_dpm_get_mclk(void *handle, bool low)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct ci_power_info *pi = ci_get_pi(adev);
- struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
-
- if (low)
- return requested_state->performance_levels[0].mclk;
- else
- return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
-}
-
-/* get temperature in millidegrees */
-static int ci_dpm_get_temp(void *handle)
-{
- u32 temp;
- int actual_temp = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- temp = (RREG32_SMC(ixCG_MULT_THERMAL_STATUS) & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
- CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
-
- if (temp & 0x200)
- actual_temp = 255;
- else
- actual_temp = temp & 0x1ff;
-
- actual_temp = actual_temp * 1000;
-
- return actual_temp;
-}
-
-static int ci_set_temperature_range(struct amdgpu_device *adev)
-{
- int ret;
-
- ret = ci_thermal_enable_alert(adev, false);
- if (ret)
- return ret;
- ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN,
- CISLANDS_TEMP_RANGE_MAX);
- if (ret)
- return ret;
- ret = ci_thermal_enable_alert(adev, true);
- if (ret)
- return ret;
- return ret;
-}
-
-static int ci_dpm_early_init(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- adev->powerplay.pp_funcs = &ci_dpm_funcs;
- adev->powerplay.pp_handle = adev;
- ci_dpm_set_irq_funcs(adev);
-
- return 0;
-}
-
-static int ci_dpm_late_init(void *handle)
-{
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (!adev->pm.dpm_enabled)
- return 0;
-
- /* init the sysfs and debugfs files late */
- ret = amdgpu_pm_sysfs_init(adev);
- if (ret)
- return ret;
-
- ret = ci_set_temperature_range(adev);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int ci_dpm_sw_init(void *handle)
-{
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 230,
- &adev->pm.dpm.thermal.irq);
- if (ret)
- return ret;
-
- ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 231,
- &adev->pm.dpm.thermal.irq);
- if (ret)
- return ret;
-
- /* default to balanced state */
- adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
- adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
- adev->pm.dpm.forced_level = AMD_DPM_FORCED_LEVEL_AUTO;
- adev->pm.default_sclk = adev->clock.default_sclk;
- adev->pm.default_mclk = adev->clock.default_mclk;
- adev->pm.current_sclk = adev->clock.default_sclk;
- adev->pm.current_mclk = adev->clock.default_mclk;
- adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
-
- ret = ci_dpm_init_microcode(adev);
- if (ret)
- return ret;
-
- if (amdgpu_dpm == 0)
- return 0;
-
- INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
- mutex_lock(&adev->pm.mutex);
- ret = ci_dpm_init(adev);
- if (ret)
- goto dpm_failed;
- adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
- if (amdgpu_dpm == 1)
- amdgpu_pm_print_power_states(adev);
- mutex_unlock(&adev->pm.mutex);
- DRM_INFO("amdgpu: dpm initialized\n");
-
- return 0;
-
-dpm_failed:
- ci_dpm_fini(adev);
- mutex_unlock(&adev->pm.mutex);
- DRM_ERROR("amdgpu: dpm initialization failed\n");
- return ret;
-}
-
-static int ci_dpm_sw_fini(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- flush_work(&adev->pm.dpm.thermal.work);
-
- mutex_lock(&adev->pm.mutex);
- ci_dpm_fini(adev);
- mutex_unlock(&adev->pm.mutex);
-
- release_firmware(adev->pm.fw);
- adev->pm.fw = NULL;
-
- return 0;
-}
-
-static int ci_dpm_hw_init(void *handle)
-{
- int ret;
-
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (!amdgpu_dpm) {
- ret = ci_upload_firmware(adev);
- if (ret) {
- DRM_ERROR("ci_upload_firmware failed\n");
- return ret;
- }
- ci_dpm_start_smc(adev);
- return 0;
- }
-
- mutex_lock(&adev->pm.mutex);
- ci_dpm_setup_asic(adev);
- ret = ci_dpm_enable(adev);
- if (ret)
- adev->pm.dpm_enabled = false;
- else
- adev->pm.dpm_enabled = true;
- mutex_unlock(&adev->pm.mutex);
-
- return ret;
-}
-
-static int ci_dpm_hw_fini(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->pm.dpm_enabled) {
- mutex_lock(&adev->pm.mutex);
- ci_dpm_disable(adev);
- mutex_unlock(&adev->pm.mutex);
- } else {
- ci_dpm_stop_smc(adev);
- }
-
- return 0;
-}
-
-static int ci_dpm_suspend(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->pm.dpm_enabled) {
- mutex_lock(&adev->pm.mutex);
- amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
- AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
- amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
- AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
- adev->pm.dpm.last_user_state = adev->pm.dpm.user_state;
- adev->pm.dpm.last_state = adev->pm.dpm.state;
- adev->pm.dpm.user_state = POWER_STATE_TYPE_INTERNAL_BOOT;
- adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_BOOT;
- mutex_unlock(&adev->pm.mutex);
- amdgpu_pm_compute_clocks(adev);
-
- }
-
- return 0;
-}
-
-static int ci_dpm_resume(void *handle)
-{
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->pm.dpm_enabled) {
- /* asic init will reset to the boot state */
- mutex_lock(&adev->pm.mutex);
- ci_dpm_setup_asic(adev);
- ret = ci_dpm_enable(adev);
- if (ret)
- adev->pm.dpm_enabled = false;
- else
- adev->pm.dpm_enabled = true;
- adev->pm.dpm.user_state = adev->pm.dpm.last_user_state;
- adev->pm.dpm.state = adev->pm.dpm.last_state;
- mutex_unlock(&adev->pm.mutex);
- if (adev->pm.dpm_enabled)
- amdgpu_pm_compute_clocks(adev);
- }
- return 0;
-}
-
-static bool ci_dpm_is_idle(void *handle)
-{
- /* XXX */
- return true;
-}
-
-static int ci_dpm_wait_for_idle(void *handle)
-{
- /* XXX */
- return 0;
-}
-
-static int ci_dpm_soft_reset(void *handle)
-{
- return 0;
-}
-
-static int ci_dpm_set_interrupt_state(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- unsigned type,
- enum amdgpu_interrupt_state state)
-{
- u32 cg_thermal_int;
-
- switch (type) {
- case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH:
- switch (state) {
- case AMDGPU_IRQ_STATE_DISABLE:
- cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
- cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
- WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
- break;
- case AMDGPU_IRQ_STATE_ENABLE:
- cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
- cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
- WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
- break;
- default:
- break;
- }
- break;
-
- case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW:
- switch (state) {
- case AMDGPU_IRQ_STATE_DISABLE:
- cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
- cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
- WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
- break;
- case AMDGPU_IRQ_STATE_ENABLE:
- cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
- cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
- WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
- break;
- default:
- break;
- }
- break;
-
- default:
- break;
- }
- return 0;
-}
-
-static int ci_dpm_process_interrupt(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
-{
- bool queue_thermal = false;
-
- if (entry == NULL)
- return -EINVAL;
-
- switch (entry->src_id) {
- case 230: /* thermal low to high */
- DRM_DEBUG("IH: thermal low to high\n");
- adev->pm.dpm.thermal.high_to_low = false;
- queue_thermal = true;
- break;
- case 231: /* thermal high to low */
- DRM_DEBUG("IH: thermal high to low\n");
- adev->pm.dpm.thermal.high_to_low = true;
- queue_thermal = true;
- break;
- default:
- break;
- }
-
- if (queue_thermal)
- schedule_work(&adev->pm.dpm.thermal.work);
-
- return 0;
-}
-
-static int ci_dpm_set_clockgating_state(void *handle,
- enum amd_clockgating_state state)
-{
- return 0;
-}
-
-static int ci_dpm_set_powergating_state(void *handle,
- enum amd_powergating_state state)
-{
- return 0;
-}
-
-static int ci_dpm_print_clock_levels(void *handle,
- enum pp_clock_type type, char *buf)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct ci_power_info *pi = ci_get_pi(adev);
- struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
- struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
- struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
-
- int i, now, size = 0;
- uint32_t clock, pcie_speed;
-
- switch (type) {
- case PP_SCLK:
- amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetSclkFrequency);
- clock = RREG32(mmSMC_MSG_ARG_0);
-
- for (i = 0; i < sclk_table->count; i++) {
- if (clock > sclk_table->dpm_levels[i].value)
- continue;
- break;
- }
- now = i;
-
- for (i = 0; i < sclk_table->count; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n",
- i, sclk_table->dpm_levels[i].value / 100,
- (i == now) ? "*" : "");
- break;
- case PP_MCLK:
- amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetMclkFrequency);
- clock = RREG32(mmSMC_MSG_ARG_0);
-
- for (i = 0; i < mclk_table->count; i++) {
- if (clock > mclk_table->dpm_levels[i].value)
- continue;
- break;
- }
- now = i;
-
- for (i = 0; i < mclk_table->count; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n",
- i, mclk_table->dpm_levels[i].value / 100,
- (i == now) ? "*" : "");
- break;
- case PP_PCIE:
- pcie_speed = ci_get_current_pcie_speed(adev);
- for (i = 0; i < pcie_table->count; i++) {
- if (pcie_speed != pcie_table->dpm_levels[i].value)
- continue;
- break;
- }
- now = i;
-
- for (i = 0; i < pcie_table->count; i++)
- size += sprintf(buf + size, "%d: %s %s\n", i,
- (pcie_table->dpm_levels[i].value == 0) ? "2.5GT/s, x1" :
- (pcie_table->dpm_levels[i].value == 1) ? "5.0GT/s, x16" :
- (pcie_table->dpm_levels[i].value == 2) ? "8.0GT/s, x16" : "",
- (i == now) ? "*" : "");
- break;
- default:
- break;
- }
-
- return size;
-}
-
-static int ci_dpm_force_clock_level(void *handle,
- enum pp_clock_type type, uint32_t mask)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct ci_power_info *pi = ci_get_pi(adev);
-
- if (adev->pm.dpm.forced_level != AMD_DPM_FORCED_LEVEL_MANUAL)
- return -EINVAL;
-
- if (mask == 0)
- return -EINVAL;
-
- switch (type) {
- case PP_SCLK:
- if (!pi->sclk_dpm_key_disabled)
- amdgpu_ci_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_SCLKDPM_SetEnabledMask,
- pi->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
- break;
-
- case PP_MCLK:
- if (!pi->mclk_dpm_key_disabled)
- amdgpu_ci_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_MCLKDPM_SetEnabledMask,
- pi->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
- break;
-
- case PP_PCIE:
- {
- uint32_t tmp = mask & pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
-
- if (!pi->pcie_dpm_key_disabled) {
- if (fls(tmp) != ffs(tmp))
- amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_UnForceLevel);
- else
- amdgpu_ci_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_PCIeDPM_ForceLevel,
- fls(tmp) - 1);
- }
- break;
- }
- default:
- break;
- }
-
- return 0;
-}
-
-static int ci_dpm_get_sclk_od(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct ci_power_info *pi = ci_get_pi(adev);
- struct ci_single_dpm_table *sclk_table = &(pi->dpm_table.sclk_table);
- struct ci_single_dpm_table *golden_sclk_table =
- &(pi->golden_dpm_table.sclk_table);
- int value;
-
- value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
- golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
- 100 /
- golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
-
- return value;
-}
-
-static int ci_dpm_set_sclk_od(void *handle, uint32_t value)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct ci_power_info *pi = ci_get_pi(adev);
- struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
- struct ci_single_dpm_table *golden_sclk_table =
- &(pi->golden_dpm_table.sclk_table);
-
- if (value > 20)
- value = 20;
-
- ps->performance_levels[ps->performance_level_count - 1].sclk =
- golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
- value / 100 +
- golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
-
- return 0;
-}
-
-static int ci_dpm_get_mclk_od(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct ci_power_info *pi = ci_get_pi(adev);
- struct ci_single_dpm_table *mclk_table = &(pi->dpm_table.mclk_table);
- struct ci_single_dpm_table *golden_mclk_table =
- &(pi->golden_dpm_table.mclk_table);
- int value;
-
- value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
- golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
- 100 /
- golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
-
- return value;
-}
-
-static int ci_dpm_set_mclk_od(void *handle, uint32_t value)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct ci_power_info *pi = ci_get_pi(adev);
- struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
- struct ci_single_dpm_table *golden_mclk_table =
- &(pi->golden_dpm_table.mclk_table);
-
- if (value > 20)
- value = 20;
-
- ps->performance_levels[ps->performance_level_count - 1].mclk =
- golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
- value / 100 +
- golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
-
- return 0;
-}
-
-static int ci_dpm_read_sensor(void *handle, int idx,
- void *value, int *size)
-{
- u32 activity_percent = 50;
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- /* size must be at least 4 bytes for all sensors */
- if (*size < 4)
- return -EINVAL;
-
- switch (idx) {
- case AMDGPU_PP_SENSOR_GFX_SCLK:
- *((uint32_t *)value) = ci_get_average_sclk_freq(adev);
- *size = 4;
- return 0;
- case AMDGPU_PP_SENSOR_GFX_MCLK:
- *((uint32_t *)value) = ci_get_average_mclk_freq(adev);
- *size = 4;
- return 0;
- case AMDGPU_PP_SENSOR_GPU_TEMP:
- *((uint32_t *)value) = ci_dpm_get_temp(adev);
- *size = 4;
- return 0;
- case AMDGPU_PP_SENSOR_GPU_LOAD:
- ret = ci_read_smc_soft_register(adev,
- offsetof(SMU7_SoftRegisters,
- AverageGraphicsA),
- &activity_percent);
- if (ret == 0) {
- activity_percent += 0x80;
- activity_percent >>= 8;
- activity_percent =
- activity_percent > 100 ? 100 : activity_percent;
- }
- *((uint32_t *)value) = activity_percent;
- *size = 4;
- return 0;
- default:
- return -EINVAL;
- }
-}
-
-static int ci_set_powergating_by_smu(void *handle,
- uint32_t block_type, bool gate)
-{
- switch (block_type) {
- case AMD_IP_BLOCK_TYPE_UVD:
- ci_dpm_powergate_uvd(handle, gate);
- break;
- default:
- break;
- }
- return 0;
-}
-
-static const struct amd_ip_funcs ci_dpm_ip_funcs = {
- .name = "ci_dpm",
- .early_init = ci_dpm_early_init,
- .late_init = ci_dpm_late_init,
- .sw_init = ci_dpm_sw_init,
- .sw_fini = ci_dpm_sw_fini,
- .hw_init = ci_dpm_hw_init,
- .hw_fini = ci_dpm_hw_fini,
- .suspend = ci_dpm_suspend,
- .resume = ci_dpm_resume,
- .is_idle = ci_dpm_is_idle,
- .wait_for_idle = ci_dpm_wait_for_idle,
- .soft_reset = ci_dpm_soft_reset,
- .set_clockgating_state = ci_dpm_set_clockgating_state,
- .set_powergating_state = ci_dpm_set_powergating_state,
-};
-
-const struct amdgpu_ip_block_version ci_smu_ip_block =
-{
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 7,
- .minor = 0,
- .rev = 0,
- .funcs = &ci_dpm_ip_funcs,
-};
-
-static const struct amd_pm_funcs ci_dpm_funcs = {
- .pre_set_power_state = &ci_dpm_pre_set_power_state,
- .set_power_state = &ci_dpm_set_power_state,
- .post_set_power_state = &ci_dpm_post_set_power_state,
- .display_configuration_changed = &ci_dpm_display_configuration_changed,
- .get_sclk = &ci_dpm_get_sclk,
- .get_mclk = &ci_dpm_get_mclk,
- .print_power_state = &ci_dpm_print_power_state,
- .debugfs_print_current_performance_level = &ci_dpm_debugfs_print_current_performance_level,
- .force_performance_level = &ci_dpm_force_performance_level,
- .vblank_too_short = &ci_dpm_vblank_too_short,
- .set_powergating_by_smu = &ci_set_powergating_by_smu,
- .set_fan_control_mode = &ci_dpm_set_fan_control_mode,
- .get_fan_control_mode = &ci_dpm_get_fan_control_mode,
- .set_fan_speed_percent = &ci_dpm_set_fan_speed_percent,
- .get_fan_speed_percent = &ci_dpm_get_fan_speed_percent,
- .print_clock_levels = ci_dpm_print_clock_levels,
- .force_clock_level = ci_dpm_force_clock_level,
- .get_sclk_od = ci_dpm_get_sclk_od,
- .set_sclk_od = ci_dpm_set_sclk_od,
- .get_mclk_od = ci_dpm_get_mclk_od,
- .set_mclk_od = ci_dpm_set_mclk_od,
- .check_state_equal = ci_check_state_equal,
- .get_vce_clock_state = amdgpu_get_vce_clock_state,
- .read_sensor = ci_dpm_read_sensor,
-};
-
-static const struct amdgpu_irq_src_funcs ci_dpm_irq_funcs = {
- .set = ci_dpm_set_interrupt_state,
- .process = ci_dpm_process_interrupt,
-};
-
-static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev)
-{
- adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
- adev->pm.dpm.thermal.irq.funcs = &ci_dpm_irq_funcs;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.h b/drivers/gpu/drm/amd/amdgpu/ci_dpm.h
deleted file mode 100644
index 91be2996ae7c..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.h
+++ /dev/null
@@ -1,349 +0,0 @@
-/*
- * Copyright 2013 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef __CI_DPM_H__
-#define __CI_DPM_H__
-
-#include "amdgpu_atombios.h"
-#include "ppsmc.h"
-
-#define SMU__NUM_SCLK_DPM_STATE 8
-#define SMU__NUM_MCLK_DPM_LEVELS 6
-#define SMU__NUM_LCLK_DPM_LEVELS 8
-#define SMU__NUM_PCIE_DPM_LEVELS 8
-#include "smu7_discrete.h"
-
-#define CISLANDS_MAX_HARDWARE_POWERLEVELS 2
-
-#define CISLANDS_UNUSED_GPIO_PIN 0x7F
-
-struct ci_pl {
- u32 mclk;
- u32 sclk;
- enum amdgpu_pcie_gen pcie_gen;
- u16 pcie_lane;
-};
-
-struct ci_ps {
- u16 performance_level_count;
- bool dc_compatible;
- u32 sclk_t;
- struct ci_pl performance_levels[CISLANDS_MAX_HARDWARE_POWERLEVELS];
-};
-
-struct ci_dpm_level {
- bool enabled;
- u32 value;
- u32 param1;
-};
-
-#define CISLAND_MAX_DEEPSLEEP_DIVIDER_ID 5
-#define MAX_REGULAR_DPM_NUMBER 8
-#define CISLAND_MINIMUM_ENGINE_CLOCK 800
-
-struct ci_single_dpm_table {
- u32 count;
- struct ci_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER];
-};
-
-struct ci_dpm_table {
- struct ci_single_dpm_table sclk_table;
- struct ci_single_dpm_table mclk_table;
- struct ci_single_dpm_table pcie_speed_table;
- struct ci_single_dpm_table vddc_table;
- struct ci_single_dpm_table vddci_table;
- struct ci_single_dpm_table mvdd_table;
-};
-
-struct ci_mc_reg_entry {
- u32 mclk_max;
- u32 mc_data[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
-};
-
-struct ci_mc_reg_table {
- u8 last;
- u8 num_entries;
- u16 valid_flag;
- struct ci_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
- SMU7_Discrete_MCRegisterAddress mc_reg_address[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
-};
-
-struct ci_ulv_parm
-{
- bool supported;
- u32 cg_ulv_parameter;
- u32 volt_change_delay;
- struct ci_pl pl;
-};
-
-#define CISLANDS_MAX_LEAKAGE_COUNT 8
-
-struct ci_leakage_voltage {
- u16 count;
- u16 leakage_id[CISLANDS_MAX_LEAKAGE_COUNT];
- u16 actual_voltage[CISLANDS_MAX_LEAKAGE_COUNT];
-};
-
-struct ci_dpm_level_enable_mask {
- u32 uvd_dpm_enable_mask;
- u32 vce_dpm_enable_mask;
- u32 acp_dpm_enable_mask;
- u32 samu_dpm_enable_mask;
- u32 sclk_dpm_enable_mask;
- u32 mclk_dpm_enable_mask;
- u32 pcie_dpm_enable_mask;
-};
-
-struct ci_vbios_boot_state
-{
- u16 mvdd_bootup_value;
- u16 vddc_bootup_value;
- u16 vddci_bootup_value;
- u32 sclk_bootup_value;
- u32 mclk_bootup_value;
- u16 pcie_gen_bootup_value;
- u16 pcie_lane_bootup_value;
-};
-
-struct ci_clock_registers {
- u32 cg_spll_func_cntl;
- u32 cg_spll_func_cntl_2;
- u32 cg_spll_func_cntl_3;
- u32 cg_spll_func_cntl_4;
- u32 cg_spll_spread_spectrum;
- u32 cg_spll_spread_spectrum_2;
- u32 dll_cntl;
- u32 mclk_pwrmgt_cntl;
- u32 mpll_ad_func_cntl;
- u32 mpll_dq_func_cntl;
- u32 mpll_func_cntl;
- u32 mpll_func_cntl_1;
- u32 mpll_func_cntl_2;
- u32 mpll_ss1;
- u32 mpll_ss2;
-};
-
-struct ci_thermal_temperature_setting {
- s32 temperature_low;
- s32 temperature_high;
- s32 temperature_shutdown;
-};
-
-struct ci_pcie_perf_range {
- u16 max;
- u16 min;
-};
-
-enum ci_pt_config_reg_type {
- CISLANDS_CONFIGREG_MMR = 0,
- CISLANDS_CONFIGREG_SMC_IND,
- CISLANDS_CONFIGREG_DIDT_IND,
- CISLANDS_CONFIGREG_CACHE,
- CISLANDS_CONFIGREG_MAX
-};
-
-#define POWERCONTAINMENT_FEATURE_BAPM 0x00000001
-#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002
-#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004
-
-struct ci_pt_config_reg {
- u32 offset;
- u32 mask;
- u32 shift;
- u32 value;
- enum ci_pt_config_reg_type type;
-};
-
-struct ci_pt_defaults {
- u8 svi_load_line_en;
- u8 svi_load_line_vddc;
- u8 tdc_vddc_throttle_release_limit_perc;
- u8 tdc_mawt;
- u8 tdc_waterfall_ctl;
- u8 dte_ambient_temp_base;
- u32 display_cac;
- u32 bapm_temp_gradient;
- u16 bapmti_r[SMU7_DTE_ITERATIONS * SMU7_DTE_SOURCES * SMU7_DTE_SINKS];
- u16 bapmti_rc[SMU7_DTE_ITERATIONS * SMU7_DTE_SOURCES * SMU7_DTE_SINKS];
-};
-
-#define DPMTABLE_OD_UPDATE_SCLK 0x00000001
-#define DPMTABLE_OD_UPDATE_MCLK 0x00000002
-#define DPMTABLE_UPDATE_SCLK 0x00000004
-#define DPMTABLE_UPDATE_MCLK 0x00000008
-
-struct ci_power_info {
- struct ci_dpm_table dpm_table;
- struct ci_dpm_table golden_dpm_table;
- u32 voltage_control;
- u32 mvdd_control;
- u32 vddci_control;
- u32 active_auto_throttle_sources;
- struct ci_clock_registers clock_registers;
- u16 acpi_vddc;
- u16 acpi_vddci;
- enum amdgpu_pcie_gen force_pcie_gen;
- enum amdgpu_pcie_gen acpi_pcie_gen;
- struct ci_leakage_voltage vddc_leakage;
- struct ci_leakage_voltage vddci_leakage;
- u16 max_vddc_in_pp_table;
- u16 min_vddc_in_pp_table;
- u16 max_vddci_in_pp_table;
- u16 min_vddci_in_pp_table;
- u32 mclk_strobe_mode_threshold;
- u32 mclk_stutter_mode_threshold;
- u32 mclk_edc_enable_threshold;
- u32 mclk_edc_wr_enable_threshold;
- struct ci_vbios_boot_state vbios_boot_state;
- /* smc offsets */
- u32 sram_end;
- u32 dpm_table_start;
- u32 soft_regs_start;
- u32 mc_reg_table_start;
- u32 fan_table_start;
- u32 arb_table_start;
- /* smc tables */
- SMU7_Discrete_DpmTable smc_state_table;
- SMU7_Discrete_MCRegisters smc_mc_reg_table;
- SMU7_Discrete_PmFuses smc_powertune_table;
- /* other stuff */
- struct ci_mc_reg_table mc_reg_table;
- struct atom_voltage_table vddc_voltage_table;
- struct atom_voltage_table vddci_voltage_table;
- struct atom_voltage_table mvdd_voltage_table;
- struct ci_ulv_parm ulv;
- u32 power_containment_features;
- const struct ci_pt_defaults *powertune_defaults;
- u32 dte_tj_offset;
- bool vddc_phase_shed_control;
- struct ci_thermal_temperature_setting thermal_temp_setting;
- struct ci_dpm_level_enable_mask dpm_level_enable_mask;
- u32 need_update_smu7_dpm_table;
- u32 sclk_dpm_key_disabled;
- u32 mclk_dpm_key_disabled;
- u32 pcie_dpm_key_disabled;
- u32 thermal_sclk_dpm_enabled;
- struct ci_pcie_perf_range pcie_gen_performance;
- struct ci_pcie_perf_range pcie_lane_performance;
- struct ci_pcie_perf_range pcie_gen_powersaving;
- struct ci_pcie_perf_range pcie_lane_powersaving;
- u32 activity_target[SMU7_MAX_LEVELS_GRAPHICS];
- u32 mclk_activity_target;
- u32 low_sclk_interrupt_t;
- u32 last_mclk_dpm_enable_mask;
- u32 sys_pcie_mask;
- /* caps */
- bool caps_power_containment;
- bool caps_cac;
- bool caps_sq_ramping;
- bool caps_db_ramping;
- bool caps_td_ramping;
- bool caps_tcp_ramping;
- bool caps_fps;
- bool caps_sclk_ds;
- bool caps_sclk_ss_support;
- bool caps_mclk_ss_support;
- bool caps_uvd_dpm;
- bool caps_vce_dpm;
- bool caps_samu_dpm;
- bool caps_acp_dpm;
- bool caps_automatic_dc_transition;
- bool caps_sclk_throttle_low_notification;
- bool caps_dynamic_ac_timing;
- bool caps_od_fuzzy_fan_control_support;
- /* flags */
- bool thermal_protection;
- bool pcie_performance_request;
- bool dynamic_ss;
- bool dll_default_on;
- bool cac_enabled;
- bool uvd_enabled;
- bool battery_state;
- bool pspp_notify_required;
- bool enable_bapm_feature;
- bool enable_tdc_limit_feature;
- bool enable_pkg_pwr_tracking_feature;
- bool use_pcie_performance_levels;
- bool use_pcie_powersaving_levels;
- bool uvd_power_gated;
- /* driver states */
- struct amdgpu_ps current_rps;
- struct ci_ps current_ps;
- struct amdgpu_ps requested_rps;
- struct ci_ps requested_ps;
- /* fan control */
- bool fan_ctrl_is_in_default_mode;
- bool fan_is_controlled_by_smc;
- u32 t_min;
- u32 fan_ctrl_default_mode;
-};
-
-#define CISLANDS_VOLTAGE_CONTROL_NONE 0x0
-#define CISLANDS_VOLTAGE_CONTROL_BY_GPIO 0x1
-#define CISLANDS_VOLTAGE_CONTROL_BY_SVID2 0x2
-
-#define CISLANDS_Q88_FORMAT_CONVERSION_UNIT 256
-
-#define CISLANDS_VRC_DFLT0 0x3FFFC000
-#define CISLANDS_VRC_DFLT1 0x000400
-#define CISLANDS_VRC_DFLT2 0xC00080
-#define CISLANDS_VRC_DFLT3 0xC00200
-#define CISLANDS_VRC_DFLT4 0xC01680
-#define CISLANDS_VRC_DFLT5 0xC00033
-#define CISLANDS_VRC_DFLT6 0xC00033
-#define CISLANDS_VRC_DFLT7 0x3FFFC000
-
-#define CISLANDS_CGULVPARAMETER_DFLT 0x00040035
-#define CISLAND_TARGETACTIVITY_DFLT 30
-#define CISLAND_MCLK_TARGETACTIVITY_DFLT 10
-
-#define PCIE_PERF_REQ_REMOVE_REGISTRY 0
-#define PCIE_PERF_REQ_FORCE_LOWPOWER 1
-#define PCIE_PERF_REQ_PECI_GEN1 2
-#define PCIE_PERF_REQ_PECI_GEN2 3
-#define PCIE_PERF_REQ_PECI_GEN3 4
-
-#define CISLANDS_SSTU_DFLT 0
-#define CISLANDS_SST_DFLT 0x00C8
-
-/* XXX are these ok? */
-#define CISLANDS_TEMP_RANGE_MIN (90 * 1000)
-#define CISLANDS_TEMP_RANGE_MAX (120 * 1000)
-
-int amdgpu_ci_copy_bytes_to_smc(struct amdgpu_device *adev,
- u32 smc_start_address,
- const u8 *src, u32 byte_count, u32 limit);
-void amdgpu_ci_start_smc(struct amdgpu_device *adev);
-void amdgpu_ci_reset_smc(struct amdgpu_device *adev);
-int amdgpu_ci_program_jump_on_start(struct amdgpu_device *adev);
-void amdgpu_ci_stop_smc_clock(struct amdgpu_device *adev);
-void amdgpu_ci_start_smc_clock(struct amdgpu_device *adev);
-bool amdgpu_ci_is_smc_running(struct amdgpu_device *adev);
-PPSMC_Result amdgpu_ci_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg);
-PPSMC_Result amdgpu_ci_wait_for_smc_inactive(struct amdgpu_device *adev);
-int amdgpu_ci_load_smc_ucode(struct amdgpu_device *adev, u32 limit);
-int amdgpu_ci_read_smc_sram_dword(struct amdgpu_device *adev,
- u32 smc_address, u32 *value, u32 limit);
-int amdgpu_ci_write_smc_sram_dword(struct amdgpu_device *adev,
- u32 smc_address, u32 value, u32 limit);
-
-#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_smc.c b/drivers/gpu/drm/amd/amdgpu/ci_smc.c
deleted file mode 100644
index b8ba51e045b5..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/ci_smc.c
+++ /dev/null
@@ -1,279 +0,0 @@
-/*
- * Copyright 2011 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Alex Deucher
- */
-
-#include <linux/firmware.h>
-#include <drm/drmP.h>
-#include "amdgpu.h"
-#include "cikd.h"
-#include "ppsmc.h"
-#include "amdgpu_ucode.h"
-#include "ci_dpm.h"
-
-#include "smu/smu_7_0_1_d.h"
-#include "smu/smu_7_0_1_sh_mask.h"
-
-static int ci_set_smc_sram_address(struct amdgpu_device *adev,
- u32 smc_address, u32 limit)
-{
- if (smc_address & 3)
- return -EINVAL;
- if ((smc_address + 3) > limit)
- return -EINVAL;
-
- WREG32(mmSMC_IND_INDEX_0, smc_address);
- WREG32_P(mmSMC_IND_ACCESS_CNTL, 0, ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK);
-
- return 0;
-}
-
-int amdgpu_ci_copy_bytes_to_smc(struct amdgpu_device *adev,
- u32 smc_start_address,
- const u8 *src, u32 byte_count, u32 limit)
-{
- unsigned long flags;
- u32 data, original_data;
- u32 addr;
- u32 extra_shift;
- int ret = 0;
-
- if (smc_start_address & 3)
- return -EINVAL;
- if ((smc_start_address + byte_count) > limit)
- return -EINVAL;
-
- addr = smc_start_address;
-
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
- while (byte_count >= 4) {
- /* SMC address space is BE */
- data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
-
- ret = ci_set_smc_sram_address(adev, addr, limit);
- if (ret)
- goto done;
-
- WREG32(mmSMC_IND_DATA_0, data);
-
- src += 4;
- byte_count -= 4;
- addr += 4;
- }
-
- /* RMW for the final bytes */
- if (byte_count > 0) {
- data = 0;
-
- ret = ci_set_smc_sram_address(adev, addr, limit);
- if (ret)
- goto done;
-
- original_data = RREG32(mmSMC_IND_DATA_0);
-
- extra_shift = 8 * (4 - byte_count);
-
- while (byte_count > 0) {
- data = (data << 8) + *src++;
- byte_count--;
- }
-
- data <<= extra_shift;
-
- data |= (original_data & ~((~0UL) << extra_shift));
-
- ret = ci_set_smc_sram_address(adev, addr, limit);
- if (ret)
- goto done;
-
- WREG32(mmSMC_IND_DATA_0, data);
- }
-
-done:
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
-
- return ret;
-}
-
-void amdgpu_ci_start_smc(struct amdgpu_device *adev)
-{
- u32 tmp = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
-
- tmp &= ~SMC_SYSCON_RESET_CNTL__rst_reg_MASK;
- WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, tmp);
-}
-
-void amdgpu_ci_reset_smc(struct amdgpu_device *adev)
-{
- u32 tmp = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
-
- tmp |= SMC_SYSCON_RESET_CNTL__rst_reg_MASK;
- WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, tmp);
-}
-
-int amdgpu_ci_program_jump_on_start(struct amdgpu_device *adev)
-{
- static u8 data[] = { 0xE0, 0x00, 0x80, 0x40 };
-
- return amdgpu_ci_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
-}
-
-void amdgpu_ci_stop_smc_clock(struct amdgpu_device *adev)
-{
- u32 tmp = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
-
- tmp |= SMC_SYSCON_CLOCK_CNTL_0__ck_disable_MASK;
-
- WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, tmp);
-}
-
-void amdgpu_ci_start_smc_clock(struct amdgpu_device *adev)
-{
- u32 tmp = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
-
- tmp &= ~SMC_SYSCON_CLOCK_CNTL_0__ck_disable_MASK;
-
- WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, tmp);
-}
-
-bool amdgpu_ci_is_smc_running(struct amdgpu_device *adev)
-{
- u32 clk = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
- u32 pc_c = RREG32_SMC(ixSMC_PC_C);
-
- if (!(clk & SMC_SYSCON_CLOCK_CNTL_0__ck_disable_MASK) && (0x20100 <= pc_c))
- return true;
-
- return false;
-}
-
-PPSMC_Result amdgpu_ci_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg)
-{
- u32 tmp;
- int i;
-
- if (!amdgpu_ci_is_smc_running(adev))
- return PPSMC_Result_Failed;
-
- WREG32(mmSMC_MESSAGE_0, msg);
-
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32(mmSMC_RESP_0);
- if (tmp != 0)
- break;
- udelay(1);
- }
- tmp = RREG32(mmSMC_RESP_0);
-
- return (PPSMC_Result)tmp;
-}
-
-PPSMC_Result amdgpu_ci_wait_for_smc_inactive(struct amdgpu_device *adev)
-{
- u32 tmp;
- int i;
-
- if (!amdgpu_ci_is_smc_running(adev))
- return PPSMC_Result_OK;
-
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
- if ((tmp & SMC_SYSCON_CLOCK_CNTL_0__cken_MASK) == 0)
- break;
- udelay(1);
- }
-
- return PPSMC_Result_OK;
-}
-
-int amdgpu_ci_load_smc_ucode(struct amdgpu_device *adev, u32 limit)
-{
- const struct smc_firmware_header_v1_0 *hdr;
- unsigned long flags;
- u32 ucode_start_address;
- u32 ucode_size;
- const u8 *src;
- u32 data;
-
- if (!adev->pm.fw)
- return -EINVAL;
-
- hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
- amdgpu_ucode_print_smc_hdr(&hdr->header);
-
- adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
- ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
- ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
- src = (const u8 *)
- (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-
- if (ucode_size & 3)
- return -EINVAL;
-
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
- WREG32(mmSMC_IND_INDEX_0, ucode_start_address);
- WREG32_P(mmSMC_IND_ACCESS_CNTL, SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK,
- ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK);
- while (ucode_size >= 4) {
- /* SMC address space is BE */
- data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
-
- WREG32(mmSMC_IND_DATA_0, data);
-
- src += 4;
- ucode_size -= 4;
- }
- WREG32_P(mmSMC_IND_ACCESS_CNTL, 0, ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK);
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
-
- return 0;
-}
-
-int amdgpu_ci_read_smc_sram_dword(struct amdgpu_device *adev,
- u32 smc_address, u32 *value, u32 limit)
-{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
- ret = ci_set_smc_sram_address(adev, smc_address, limit);
- if (ret == 0)
- *value = RREG32(mmSMC_IND_DATA_0);
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
-
- return ret;
-}
-
-int amdgpu_ci_write_smc_sram_dword(struct amdgpu_device *adev,
- u32 smc_address, u32 value, u32 limit)
-{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
- ret = ci_set_smc_sram_address(adev, smc_address, limit);
- if (ret == 0)
- WREG32(mmSMC_IND_DATA_0, value);
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
-
- return ret;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index f41f5f57e9f3..07c1f239e9c3 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1741,6 +1741,69 @@ static bool cik_need_full_reset(struct amdgpu_device *adev)
return true;
}
+static void cik_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
+ uint64_t *count1)
+{
+ uint32_t perfctr = 0;
+ uint64_t cnt0_of, cnt1_of;
+ int tmp;
+
+ /* This reports 0 on APUs, so return to avoid writing/reading registers
+ * that may or may not be different from their GPU counterparts
+ */
+ if (adev->flags & AMD_IS_APU)
+ return;
+
+ /* Set the 2 events that we wish to watch, defined above */
+ /* Reg 40 is # received msgs, Reg 104 is # of posted requests sent */
+ perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40);
+ perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104);
+
+ /* Write to enable desired perf counters */
+ WREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK, perfctr);
+ /* Zero out and enable the perf counters
+ * Write 0x5:
+ * Bit 0 = Start all counters(1)
+ * Bit 2 = Global counter reset enable(1)
+ */
+ WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000005);
+
+ msleep(1000);
+
+ /* Load the shadow and disable the perf counters
+ * Write 0x2:
+ * Bit 0 = Stop counters(0)
+ * Bit 1 = Load the shadow counters(1)
+ */
+ WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000002);
+
+ /* Read register values to get any >32bit overflow */
+ tmp = RREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK);
+ cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER);
+ cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER);
+
+ /* Get the values and add the overflow */
+ *count0 = RREG32_PCIE(ixPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32);
+ *count1 = RREG32_PCIE(ixPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
+}
+
+static bool cik_need_reset_on_init(struct amdgpu_device *adev)
+{
+ u32 clock_cntl, pc;
+
+ if (adev->flags & AMD_IS_APU)
+ return false;
+
+ /* check if the SMC is already running */
+ clock_cntl = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
+ pc = RREG32_SMC(ixSMC_PC_C);
+ if ((0 == REG_GET_FIELD(clock_cntl, SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) &&
+ (0x20100 <= pc))
+ return true;
+
+ return false;
+}
+
static const struct amdgpu_asic_funcs cik_asic_funcs =
{
.read_disabled_bios = &cik_read_disabled_bios,
@@ -1755,6 +1818,9 @@ static const struct amdgpu_asic_funcs cik_asic_funcs =
.flush_hdp = &cik_flush_hdp,
.invalidate_hdp = &cik_invalidate_hdp,
.need_full_reset = &cik_need_full_reset,
+ .init_doorbell_index = &legacy_doorbell_index_init,
+ .get_pcie_usage = &cik_get_pcie_usage,
+ .need_reset_on_init = &cik_need_reset_on_init,
};
static int cik_common_early_init(void *handle)
@@ -2004,10 +2070,7 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
amdgpu_device_ip_block_add(adev, &gfx_v7_2_ip_block);
amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
- if (amdgpu_dpm == -1)
- amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
- else
- amdgpu_device_ip_block_add(adev, &ci_smu_ip_block);
+ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
if (adev->enable_virtual_display)
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
@@ -2025,10 +2088,7 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
amdgpu_device_ip_block_add(adev, &gfx_v7_3_ip_block);
amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
- if (amdgpu_dpm == -1)
- amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
- else
- amdgpu_device_ip_block_add(adev, &ci_smu_ip_block);
+ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
if (adev->enable_virtual_display)
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.h b/drivers/gpu/drm/amd/amdgpu/cik.h
index e49c6f15a0a0..54c625a2e570 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.h
+++ b/drivers/gpu/drm/amd/amdgpu/cik.h
@@ -30,4 +30,5 @@ void cik_srbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid);
int cik_set_ip_blocks(struct amdgpu_device *adev);
+void legacy_doorbell_index_init(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_dpm.h b/drivers/gpu/drm/amd/amdgpu/cik_dpm.h
index 2a086610f74d..2fcc4b60153c 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/cik_dpm.h
@@ -24,7 +24,6 @@
#ifndef __CIK_DPM_H__
#define __CIK_DPM_H__
-extern const struct amdgpu_ip_block_version ci_smu_ip_block;
extern const struct amdgpu_ip_block_version kv_smu_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index b5775c6a857b..721c757156e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -103,9 +103,9 @@ static void cik_ih_disable_interrupts(struct amdgpu_device *adev)
*/
static int cik_ih_irq_init(struct amdgpu_device *adev)
{
+ struct amdgpu_ih_ring *ih = &adev->irq.ih;
int rb_bufsz;
u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
- u64 wptr_off;
/* disable irqs */
cik_ih_disable_interrupts(adev);
@@ -131,9 +131,8 @@ static int cik_ih_irq_init(struct amdgpu_device *adev)
ih_rb_cntl |= IH_RB_CNTL__WPTR_WRITEBACK_ENABLE_MASK;
/* set the writeback address whether it's enabled or not */
- wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4);
- WREG32(mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off));
- WREG32(mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF);
+ WREG32(mmIH_RB_WPTR_ADDR_LO, lower_32_bits(ih->wptr_addr));
+ WREG32(mmIH_RB_WPTR_ADDR_HI, upper_32_bits(ih->wptr_addr) & 0xFF);
WREG32(mmIH_RB_CNTL, ih_rb_cntl);
@@ -183,11 +182,12 @@ static void cik_ih_irq_disable(struct amdgpu_device *adev)
* Used by cik_irq_process().
* Returns the value of the wptr.
*/
-static u32 cik_ih_get_wptr(struct amdgpu_device *adev)
+static u32 cik_ih_get_wptr(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
{
u32 wptr, tmp;
- wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]);
+ wptr = le32_to_cpu(*ih->wptr_cpu);
if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) {
wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK;
@@ -196,13 +196,13 @@ static u32 cik_ih_get_wptr(struct amdgpu_device *adev)
* this should allow us to catchup.
*/
dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
- wptr, adev->irq.ih.rptr, (wptr + 16) & adev->irq.ih.ptr_mask);
- adev->irq.ih.rptr = (wptr + 16) & adev->irq.ih.ptr_mask;
+ wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
+ ih->rptr = (wptr + 16) & ih->ptr_mask;
tmp = RREG32(mmIH_RB_CNTL);
tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
WREG32(mmIH_RB_CNTL, tmp);
}
- return (wptr & adev->irq.ih.ptr_mask);
+ return (wptr & ih->ptr_mask);
}
/* CIK IV Ring
@@ -228,34 +228,6 @@ static u32 cik_ih_get_wptr(struct amdgpu_device *adev)
* [127:96] - reserved
*/
-/**
- * cik_ih_prescreen_iv - prescreen an interrupt vector
- *
- * @adev: amdgpu_device pointer
- *
- * Returns true if the interrupt vector should be further processed.
- */
-static bool cik_ih_prescreen_iv(struct amdgpu_device *adev)
-{
- u32 ring_index = adev->irq.ih.rptr >> 2;
- u16 pasid;
-
- switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
- case 146:
- case 147:
- pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
- if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
- return true;
- break;
- default:
- /* Not a VM fault */
- return true;
- }
-
- adev->irq.ih.rptr += 16;
- return false;
-}
-
/**
* cik_ih_decode_iv - decode an interrupt vector
*
@@ -265,16 +237,17 @@ static bool cik_ih_prescreen_iv(struct amdgpu_device *adev)
* position and also advance the position.
*/
static void cik_ih_decode_iv(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih,
struct amdgpu_iv_entry *entry)
{
/* wptr/rptr are in bytes! */
- u32 ring_index = adev->irq.ih.rptr >> 2;
+ u32 ring_index = ih->rptr >> 2;
uint32_t dw[4];
- dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
- dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]);
- dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
- dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
+ dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
+ dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
+ dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
+ dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
entry->src_id = dw[0] & 0xff;
@@ -284,7 +257,7 @@ static void cik_ih_decode_iv(struct amdgpu_device *adev,
entry->pasid = (dw[2] >> 16) & 0xffff;
/* wptr/rptr are in bytes! */
- adev->irq.ih.rptr += 16;
+ ih->rptr += 16;
}
/**
@@ -294,9 +267,10 @@ static void cik_ih_decode_iv(struct amdgpu_device *adev,
*
* Set the IH ring buffer rptr.
*/
-static void cik_ih_set_rptr(struct amdgpu_device *adev)
+static void cik_ih_set_rptr(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
{
- WREG32(mmIH_RB_RPTR, adev->irq.ih.rptr);
+ WREG32(mmIH_RB_RPTR, ih->rptr);
}
static int cik_ih_early_init(void *handle)
@@ -461,7 +435,6 @@ static const struct amd_ip_funcs cik_ih_ip_funcs = {
static const struct amdgpu_ih_funcs cik_ih_funcs = {
.get_wptr = cik_ih_get_wptr,
- .prescreen_iv = cik_ih_prescreen_iv,
.decode_iv = cik_ih_decode_iv,
.set_rptr = cik_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index b918c8886b75..189599b694e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -198,7 +198,7 @@ static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring)
static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{
- struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
+ struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
int i;
for (i = 0; i < count; i++)
@@ -218,9 +218,11 @@ static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
* Schedule an IB in the DMA ring (CIK).
*/
static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ uint32_t flags)
{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
u32 extra_bits = vmid & 0xf;
/* IB packet must end on a 8 DW boundary */
@@ -316,8 +318,8 @@ static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], 0);
}
- sdma0->ready = false;
- sdma1->ready = false;
+ sdma0->sched.ready = false;
+ sdma1->sched.ready = false;
}
/**
@@ -494,18 +496,16 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
/* enable DMA IBs */
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
- ring->ready = true;
+ ring->sched.ready = true;
}
cik_sdma_enable(adev, true);
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
return r;
- }
if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_buffer_funcs_status(adev, true);
@@ -618,21 +618,17 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring)
u64 gpu_addr;
r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+ if (r)
return r;
- }
gpu_addr = adev->wb.gpu_addr + (index * 4);
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
r = amdgpu_ring_alloc(ring, 5);
- if (r) {
- DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- amdgpu_device_wb_free(adev, index);
- return r;
- }
+ if (r)
+ goto error_free_wb;
+
amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
@@ -647,15 +643,11 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring)
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
- ring->idx, tmp);
- r = -EINVAL;
- }
- amdgpu_device_wb_free(adev, index);
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+error_free_wb:
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -678,20 +670,16 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+ if (r)
return r;
- }
gpu_addr = adev->wb.gpu_addr + (index * 4);
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 256, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ if (r)
goto err0;
- }
ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE,
SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
@@ -706,21 +694,16 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
goto err1;
} else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err1;
}
tmp = le32_to_cpu(adev->wb.wb[index]);
- if (tmp == 0xDEADBEEF) {
- DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+ if (tmp == 0xDEADBEEF)
r = 0;
- } else {
- DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
+ else
r = -EINVAL;
- }
err1:
amdgpu_ib_free(adev, &ib, NULL);
@@ -822,7 +805,7 @@ static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
*/
static void cik_sdma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
{
- struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
+ struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
u32 pad_count;
int i;
@@ -1214,8 +1197,11 @@ static int cik_sdma_process_illegal_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
+ u8 instance_id;
+
DRM_ERROR("Illegal instruction in SDMA command stream\n");
- schedule_work(&adev->reset_work);
+ instance_id = (entry->ring_id & 0x3) >> 0;
+ drm_sched_fault(&adev->sdma.instance[instance_id].ring.sched);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index df5ac4d85a00..61024b9c7a4b 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -103,9 +103,9 @@ static void cz_ih_disable_interrupts(struct amdgpu_device *adev)
*/
static int cz_ih_irq_init(struct amdgpu_device *adev)
{
- int rb_bufsz;
+ struct amdgpu_ih_ring *ih = &adev->irq.ih;
u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
- u64 wptr_off;
+ int rb_bufsz;
/* disable irqs */
cz_ih_disable_interrupts(adev);
@@ -133,9 +133,8 @@ static int cz_ih_irq_init(struct amdgpu_device *adev)
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_WRITEBACK_ENABLE, 1);
/* set the writeback address whether it's enabled or not */
- wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4);
- WREG32(mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off));
- WREG32(mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF);
+ WREG32(mmIH_RB_WPTR_ADDR_LO, lower_32_bits(ih->wptr_addr));
+ WREG32(mmIH_RB_WPTR_ADDR_HI, upper_32_bits(ih->wptr_addr) & 0xFF);
WREG32(mmIH_RB_CNTL, ih_rb_cntl);
@@ -185,11 +184,12 @@ static void cz_ih_irq_disable(struct amdgpu_device *adev)
* Used by cz_irq_process(VI).
* Returns the value of the wptr.
*/
-static u32 cz_ih_get_wptr(struct amdgpu_device *adev)
+static u32 cz_ih_get_wptr(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
{
u32 wptr, tmp;
- wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]);
+ wptr = le32_to_cpu(*ih->wptr_cpu);
if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) {
wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
@@ -198,41 +198,13 @@ static u32 cz_ih_get_wptr(struct amdgpu_device *adev)
* this should allow us to catchup.
*/
dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
- wptr, adev->irq.ih.rptr, (wptr + 16) & adev->irq.ih.ptr_mask);
- adev->irq.ih.rptr = (wptr + 16) & adev->irq.ih.ptr_mask;
+ wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
+ ih->rptr = (wptr + 16) & ih->ptr_mask;
tmp = RREG32(mmIH_RB_CNTL);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32(mmIH_RB_CNTL, tmp);
}
- return (wptr & adev->irq.ih.ptr_mask);
-}
-
-/**
- * cz_ih_prescreen_iv - prescreen an interrupt vector
- *
- * @adev: amdgpu_device pointer
- *
- * Returns true if the interrupt vector should be further processed.
- */
-static bool cz_ih_prescreen_iv(struct amdgpu_device *adev)
-{
- u32 ring_index = adev->irq.ih.rptr >> 2;
- u16 pasid;
-
- switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
- case 146:
- case 147:
- pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
- if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
- return true;
- break;
- default:
- /* Not a VM fault */
- return true;
- }
-
- adev->irq.ih.rptr += 16;
- return false;
+ return (wptr & ih->ptr_mask);
}
/**
@@ -244,16 +216,17 @@ static bool cz_ih_prescreen_iv(struct amdgpu_device *adev)
* position and also advance the position.
*/
static void cz_ih_decode_iv(struct amdgpu_device *adev,
- struct amdgpu_iv_entry *entry)
+ struct amdgpu_ih_ring *ih,
+ struct amdgpu_iv_entry *entry)
{
/* wptr/rptr are in bytes! */
- u32 ring_index = adev->irq.ih.rptr >> 2;
+ u32 ring_index = ih->rptr >> 2;
uint32_t dw[4];
- dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
- dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]);
- dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
- dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
+ dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
+ dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
+ dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
+ dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
entry->src_id = dw[0] & 0xff;
@@ -263,7 +236,7 @@ static void cz_ih_decode_iv(struct amdgpu_device *adev,
entry->pasid = (dw[2] >> 16) & 0xffff;
/* wptr/rptr are in bytes! */
- adev->irq.ih.rptr += 16;
+ ih->rptr += 16;
}
/**
@@ -273,9 +246,10 @@ static void cz_ih_decode_iv(struct amdgpu_device *adev,
*
* Set the IH ring buffer rptr.
*/
-static void cz_ih_set_rptr(struct amdgpu_device *adev)
+static void cz_ih_set_rptr(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
{
- WREG32(mmIH_RB_RPTR, adev->irq.ih.rptr);
+ WREG32(mmIH_RB_RPTR, ih->rptr);
}
static int cz_ih_early_init(void *handle)
@@ -442,7 +416,6 @@ static const struct amd_ip_funcs cz_ih_ip_funcs = {
static const struct amdgpu_ih_funcs cz_ih_funcs = {
.get_wptr = cz_ih_get_wptr,
- .prescreen_iv = cz_ih_prescreen_iv,
.decode_iv = cz_ih_decode_iv,
.set_rptr = cz_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 4cfecdce29a3..1f0426d2fc2a 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -1682,7 +1682,7 @@ static void dce_v10_0_afmt_setmode(struct drm_encoder *encoder,
dce_v10_0_audio_write_sad_regs(encoder);
dce_v10_0_audio_write_latency_fields(encoder, mode);
- err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false);
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
if (err < 0) {
DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
return;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 7c868916d90f..2280b971d758 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -1724,7 +1724,7 @@ static void dce_v11_0_afmt_setmode(struct drm_encoder *encoder,
dce_v11_0_audio_write_sad_regs(encoder);
dce_v11_0_audio_write_latency_fields(encoder, mode);
- err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false);
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
if (err < 0) {
DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
return;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 17eaaba36017..bea32f076b91 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -1423,6 +1423,7 @@ static void dce_v6_0_audio_set_avi_infoframe(struct drm_encoder *encoder,
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+ struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
struct hdmi_avi_infoframe frame;
u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
uint8_t *payload = buffer + 3;
@@ -1430,7 +1431,7 @@ static void dce_v6_0_audio_set_avi_infoframe(struct drm_encoder *encoder,
ssize_t err;
u32 tmp;
- err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false);
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
if (err < 0) {
DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
return;
@@ -2979,7 +2980,7 @@ static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- unsigned long flags;
+ unsigned long flags;
unsigned crtc_id;
struct amdgpu_crtc *amdgpu_crtc;
struct amdgpu_flip_work *works;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 8c0576978d36..13da915991dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -1616,7 +1616,7 @@ static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder,
dce_v8_0_audio_write_sad_regs(encoder);
dce_v8_0_audio_write_latency_fields(encoder, mode);
- err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false);
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
if (err < 0) {
DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
return;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index fdace004544d..e4cc1d48eaab 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -167,19 +167,6 @@ static void dce_virtual_crtc_disable(struct drm_crtc *crtc)
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
- if (crtc->primary->fb) {
- int r;
- struct amdgpu_bo *abo;
-
- abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
- r = amdgpu_bo_reserve(abo, true);
- if (unlikely(r))
- DRM_ERROR("failed to reserve abo before unpin\n");
- else {
- amdgpu_bo_unpin(abo);
- amdgpu_bo_unreserve(abo);
- }
- }
amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
amdgpu_crtc->encoder = NULL;
@@ -692,7 +679,9 @@ static int dce_virtual_pageflip(struct amdgpu_device *adev,
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
drm_crtc_vblank_put(&amdgpu_crtc->base);
- schedule_work(&works->unpin_work);
+ amdgpu_bo_unref(&works->old_abo);
+ kfree(works->shared);
+ kfree(works);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index d76eb27945dc..305276c7e4bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -1775,18 +1775,15 @@ static int gfx_v6_0_ring_test_ring(struct amdgpu_ring *ring)
int r;
r = amdgpu_gfx_scratch_get(adev, &scratch);
- if (r) {
- DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
+ if (r)
return r;
- }
+
WREG32(scratch, 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 3);
- if (r) {
- DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r);
- amdgpu_gfx_scratch_free(adev, scratch);
- return r;
- }
+ if (r)
+ goto error_free_scratch;
+
amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
amdgpu_ring_write(ring, (scratch - PACKET3_SET_CONFIG_REG_START));
amdgpu_ring_write(ring, 0xDEADBEEF);
@@ -1798,13 +1795,11 @@ static int gfx_v6_0_ring_test_ring(struct amdgpu_ring *ring)
break;
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
- ring->idx, scratch, tmp);
- r = -EINVAL;
- }
+
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+
+error_free_scratch:
amdgpu_gfx_scratch_free(adev, scratch);
return r;
}
@@ -1845,13 +1840,15 @@ static void gfx_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
}
static void gfx_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ uint32_t flags)
{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
u32 header, control = 0;
/* insert SWITCH_BUFFER packet before first IB in the ring frame */
- if (ctx_switch) {
+ if (flags & AMDGPU_HAVE_CTX_SWITCH) {
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
amdgpu_ring_write(ring, 0);
}
@@ -1892,17 +1889,15 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
r = amdgpu_gfx_scratch_get(adev, &scratch);
- if (r) {
- DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
+ if (r)
return r;
- }
+
WREG32(scratch, 0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 256, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ if (r)
goto err1;
- }
+
ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_START));
ib.ptr[2] = 0xDEADBEEF;
@@ -1914,22 +1909,16 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
goto err2;
} else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err2;
}
tmp = RREG32(scratch);
- if (tmp == 0xDEADBEEF) {
- DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+ if (tmp == 0xDEADBEEF)
r = 0;
- } else {
- DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
- scratch, tmp);
+ else
r = -EINVAL;
- }
err2:
amdgpu_ib_free(adev, &ib, NULL);
@@ -1950,9 +1939,9 @@ static void gfx_v6_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
CP_ME_CNTL__CE_HALT_MASK));
WREG32(mmSCRATCH_UMSK, 0);
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
- adev->gfx.gfx_ring[i].ready = false;
+ adev->gfx.gfx_ring[i].sched.ready = false;
for (i = 0; i < adev->gfx.num_compute_rings; i++)
- adev->gfx.compute_ring[i].ready = false;
+ adev->gfx.compute_ring[i].sched.ready = false;
}
udelay(50);
}
@@ -2124,12 +2113,9 @@ static int gfx_v6_0_cp_gfx_resume(struct amdgpu_device *adev)
/* start the rings */
gfx_v6_0_cp_gfx_start(adev);
- ring->ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
return r;
- }
return 0;
}
@@ -2227,14 +2213,11 @@ static int gfx_v6_0_cp_compute_resume(struct amdgpu_device *adev)
WREG32(mmCP_RB2_CNTL, tmp);
WREG32(mmCP_RB2_BASE, ring->gpu_addr >> 8);
- adev->gfx.compute_ring[0].ready = false;
- adev->gfx.compute_ring[1].ready = false;
for (i = 0; i < 2; i++) {
- r = amdgpu_ring_test_ring(&adev->gfx.compute_ring[i]);
+ r = amdgpu_ring_test_helper(&adev->gfx.compute_ring[i]);
if (r)
return r;
- adev->gfx.compute_ring[i].ready = true;
}
return 0;
@@ -2368,18 +2351,11 @@ static void gfx_v6_0_ring_emit_wreg(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, val);
}
-static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev)
-{
- amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj, NULL, NULL);
- amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL);
- amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL);
-}
-
static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
{
const u32 *src_ptr;
volatile u32 *dst_ptr;
- u32 dws, i;
+ u32 dws;
u64 reg_list_mc_addr;
const struct cs_section_def *cs_data;
int r;
@@ -2394,26 +2370,10 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
cs_data = adev->gfx.rlc.cs_data;
if (src_ptr) {
- /* save restore block */
- r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.save_restore_obj,
- &adev->gfx.rlc.save_restore_gpu_addr,
- (void **)&adev->gfx.rlc.sr_ptr);
- if (r) {
- dev_warn(adev->dev, "(%d) create RLC sr bo failed\n",
- r);
- gfx_v6_0_rlc_fini(adev);
+ /* init save restore block */
+ r = amdgpu_gfx_rlc_init_sr(adev, dws);
+ if (r)
return r;
- }
-
- /* write the sr buffer */
- dst_ptr = adev->gfx.rlc.sr_ptr;
- for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
- dst_ptr[i] = cpu_to_le32(src_ptr[i]);
-
- amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
}
if (cs_data) {
@@ -2428,7 +2388,7 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
(void **)&adev->gfx.rlc.cs_ptr);
if (r) {
dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
- gfx_v6_0_rlc_fini(adev);
+ amdgpu_gfx_rlc_fini(adev);
return r;
}
@@ -2549,8 +2509,8 @@ static int gfx_v6_0_rlc_resume(struct amdgpu_device *adev)
if (!adev->gfx.rlc_fw)
return -EINVAL;
- gfx_v6_0_rlc_stop(adev);
- gfx_v6_0_rlc_reset(adev);
+ adev->gfx.rlc.funcs->stop(adev);
+ adev->gfx.rlc.funcs->reset(adev);
gfx_v6_0_init_pg(adev);
gfx_v6_0_init_cg(adev);
@@ -2578,7 +2538,7 @@ static int gfx_v6_0_rlc_resume(struct amdgpu_device *adev)
WREG32(mmRLC_UCODE_ADDR, 0);
gfx_v6_0_enable_lbpw(adev, gfx_v6_0_lbpw_supported(adev));
- gfx_v6_0_rlc_start(adev);
+ adev->gfx.rlc.funcs->start(adev);
return 0;
}
@@ -3075,6 +3035,14 @@ static const struct amdgpu_gfx_funcs gfx_v6_0_gfx_funcs = {
.select_me_pipe_q = &gfx_v6_0_select_me_pipe_q
};
+static const struct amdgpu_rlc_funcs gfx_v6_0_rlc_funcs = {
+ .init = gfx_v6_0_rlc_init,
+ .resume = gfx_v6_0_rlc_resume,
+ .stop = gfx_v6_0_rlc_stop,
+ .reset = gfx_v6_0_rlc_reset,
+ .start = gfx_v6_0_rlc_start
+};
+
static int gfx_v6_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -3082,6 +3050,7 @@ static int gfx_v6_0_early_init(void *handle)
adev->gfx.num_gfx_rings = GFX6_NUM_GFX_RINGS;
adev->gfx.num_compute_rings = GFX6_NUM_COMPUTE_RINGS;
adev->gfx.funcs = &gfx_v6_0_gfx_funcs;
+ adev->gfx.rlc.funcs = &gfx_v6_0_rlc_funcs;
gfx_v6_0_set_ring_funcs(adev);
gfx_v6_0_set_irq_funcs(adev);
@@ -3114,7 +3083,7 @@ static int gfx_v6_0_sw_init(void *handle)
return r;
}
- r = gfx_v6_0_rlc_init(adev);
+ r = adev->gfx.rlc.funcs->init(adev);
if (r) {
DRM_ERROR("Failed to init rlc BOs!\n");
return r;
@@ -3165,7 +3134,7 @@ static int gfx_v6_0_sw_fini(void *handle)
for (i = 0; i < adev->gfx.num_compute_rings; i++)
amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
- gfx_v6_0_rlc_fini(adev);
+ amdgpu_gfx_rlc_fini(adev);
return 0;
}
@@ -3177,7 +3146,7 @@ static int gfx_v6_0_hw_init(void *handle)
gfx_v6_0_constants_init(adev);
- r = gfx_v6_0_rlc_resume(adev);
+ r = adev->gfx.rlc.funcs->resume(adev);
if (r)
return r;
@@ -3195,7 +3164,7 @@ static int gfx_v6_0_hw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gfx_v6_0_cp_enable(adev, false);
- gfx_v6_0_rlc_stop(adev);
+ adev->gfx.rlc.funcs->stop(adev);
gfx_v6_0_fini_pg(adev);
return 0;
@@ -3393,12 +3362,31 @@ static int gfx_v6_0_eop_irq(struct amdgpu_device *adev,
return 0;
}
+static void gfx_v6_0_fault(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry)
+{
+ struct amdgpu_ring *ring;
+
+ switch (entry->ring_id) {
+ case 0:
+ ring = &adev->gfx.gfx_ring[0];
+ break;
+ case 1:
+ case 2:
+ ring = &adev->gfx.compute_ring[entry->ring_id - 1];
+ break;
+ default:
+ return;
+ }
+ drm_sched_fault(&ring->sched);
+}
+
static int gfx_v6_0_priv_reg_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
DRM_ERROR("Illegal register access in command stream\n");
- schedule_work(&adev->reset_work);
+ gfx_v6_0_fault(adev, entry);
return 0;
}
@@ -3407,7 +3395,7 @@ static int gfx_v6_0_priv_inst_irq(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
DRM_ERROR("Illegal instruction in command stream\n");
- schedule_work(&adev->reset_work);
+ gfx_v6_0_fault(adev, entry);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 0e72bc09939a..a59e0fdf5a97 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -882,7 +882,6 @@ static const u32 kalindi_rlc_save_restore_register_list[] =
static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev);
static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
-static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device *adev);
static void gfx_v7_0_init_pg(struct amdgpu_device *adev);
static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev);
@@ -2064,17 +2063,14 @@ static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring)
int r;
r = amdgpu_gfx_scratch_get(adev, &scratch);
- if (r) {
- DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
+ if (r)
return r;
- }
+
WREG32(scratch, 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 3);
- if (r) {
- DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r);
- amdgpu_gfx_scratch_free(adev, scratch);
- return r;
- }
+ if (r)
+ goto error_free_scratch;
+
amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
amdgpu_ring_write(ring, 0xDEADBEEF);
@@ -2086,13 +2082,10 @@ static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring)
break;
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
- ring->idx, scratch, tmp);
- r = -EINVAL;
- }
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+
+error_free_scratch:
amdgpu_gfx_scratch_free(adev, scratch);
return r;
}
@@ -2233,13 +2226,15 @@ static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
* on the gfx ring for execution by the GPU.
*/
static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib,
+ uint32_t flags)
{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
u32 header, control = 0;
/* insert SWITCH_BUFFER packet before first IB in the ring frame */
- if (ctx_switch) {
+ if (flags & AMDGPU_HAVE_CTX_SWITCH) {
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
amdgpu_ring_write(ring, 0);
}
@@ -2262,11 +2257,29 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
}
static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ uint32_t flags)
{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
+ /* Currently, there is a high possibility to get wave ID mismatch
+ * between ME and GDS, leading to a hw deadlock, because ME generates
+ * different wave IDs than the GDS expects. This situation happens
+ * randomly when at least 5 compute pipes use GDS ordered append.
+ * The wave IDs generated by ME are also wrong after suspend/resume.
+ * Those are probably bugs somewhere else in the kernel driver.
+ *
+ * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
+ * GDS to 0 for this ring (me/pipe).
+ */
+ if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID - PACKET3_SET_CONFIG_REG_START);
+ amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
+ }
+
amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
amdgpu_ring_write(ring,
#ifdef __BIG_ENDIAN
@@ -2316,17 +2329,15 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
r = amdgpu_gfx_scratch_get(adev, &scratch);
- if (r) {
- DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
+ if (r)
return r;
- }
+
WREG32(scratch, 0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 256, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ if (r)
goto err1;
- }
+
ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
ib.ptr[2] = 0xDEADBEEF;
@@ -2338,22 +2349,16 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
goto err2;
} else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err2;
}
tmp = RREG32(scratch);
- if (tmp == 0xDEADBEEF) {
- DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+ if (tmp == 0xDEADBEEF)
r = 0;
- } else {
- DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
- scratch, tmp);
+ else
r = -EINVAL;
- }
err2:
amdgpu_ib_free(adev, &ib, NULL);
@@ -2403,7 +2408,7 @@ static void gfx_v7_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
} else {
WREG32(mmCP_ME_CNTL, (CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK));
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
- adev->gfx.gfx_ring[i].ready = false;
+ adev->gfx.gfx_ring[i].sched.ready = false;
}
udelay(50);
}
@@ -2613,12 +2618,9 @@ static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev)
/* start the ring */
gfx_v7_0_cp_gfx_start(adev);
- ring->ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
return r;
- }
return 0;
}
@@ -2675,7 +2677,7 @@ static void gfx_v7_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
} else {
WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
for (i = 0; i < adev->gfx.num_compute_rings; i++)
- adev->gfx.compute_ring[i].ready = false;
+ adev->gfx.compute_ring[i].sched.ready = false;
}
udelay(50);
}
@@ -2781,7 +2783,7 @@ static int gfx_v7_0_mec_init(struct amdgpu_device *adev)
* GFX7_MEC_HPD_SIZE * 2;
r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_GTT,
+ AMDGPU_GEM_DOMAIN_VRAM,
&adev->gfx.mec.hpd_eop_obj,
&adev->gfx.mec.hpd_eop_gpu_addr,
(void **)&hpd);
@@ -3106,10 +3108,7 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
ring = &adev->gfx.compute_ring[i];
- ring->ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r)
- ring->ready = false;
+ amdgpu_ring_test_helper(ring);
}
return 0;
@@ -3268,18 +3267,10 @@ static void gfx_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
* The RLC is a multi-purpose microengine that handles a
* variety of functions.
*/
-static void gfx_v7_0_rlc_fini(struct amdgpu_device *adev)
-{
- amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj, NULL, NULL);
- amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL);
- amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL);
-}
-
static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
{
const u32 *src_ptr;
- volatile u32 *dst_ptr;
- u32 dws, i;
+ u32 dws;
const struct cs_section_def *cs_data;
int r;
@@ -3306,66 +3297,23 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
cs_data = adev->gfx.rlc.cs_data;
if (src_ptr) {
- /* save restore block */
- r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.save_restore_obj,
- &adev->gfx.rlc.save_restore_gpu_addr,
- (void **)&adev->gfx.rlc.sr_ptr);
- if (r) {
- dev_warn(adev->dev, "(%d) create, pin or map of RLC sr bo failed\n", r);
- gfx_v7_0_rlc_fini(adev);
+ /* init save restore block */
+ r = amdgpu_gfx_rlc_init_sr(adev, dws);
+ if (r)
return r;
- }
-
- /* write the sr buffer */
- dst_ptr = adev->gfx.rlc.sr_ptr;
- for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
- dst_ptr[i] = cpu_to_le32(src_ptr[i]);
- amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
}
if (cs_data) {
- /* clear state block */
- adev->gfx.rlc.clear_state_size = dws = gfx_v7_0_get_csb_size(adev);
-
- r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.clear_state_obj,
- &adev->gfx.rlc.clear_state_gpu_addr,
- (void **)&adev->gfx.rlc.cs_ptr);
- if (r) {
- dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
- gfx_v7_0_rlc_fini(adev);
+ /* init clear state block */
+ r = amdgpu_gfx_rlc_init_csb(adev);
+ if (r)
return r;
- }
-
- /* set up the cs buffer */
- dst_ptr = adev->gfx.rlc.cs_ptr;
- gfx_v7_0_get_csb_buffer(adev, dst_ptr);
- amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
}
if (adev->gfx.rlc.cp_table_size) {
-
- r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.cp_table_obj,
- &adev->gfx.rlc.cp_table_gpu_addr,
- (void **)&adev->gfx.rlc.cp_table_ptr);
- if (r) {
- dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
- gfx_v7_0_rlc_fini(adev);
+ r = amdgpu_gfx_rlc_init_cpt(adev);
+ if (r)
return r;
- }
-
- gfx_v7_0_init_cp_pg_table(adev);
-
- amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
-
}
return 0;
@@ -3446,7 +3394,12 @@ static u32 gfx_v7_0_halt_rlc(struct amdgpu_device *adev)
return orig;
}
-static void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
+static bool gfx_v7_0_is_rlc_enabled(struct amdgpu_device *adev)
+{
+ return true;
+}
+
+static void gfx_v7_0_set_safe_mode(struct amdgpu_device *adev)
{
u32 tmp, i, mask;
@@ -3468,7 +3421,7 @@ static void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
}
}
-static void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
+static void gfx_v7_0_unset_safe_mode(struct amdgpu_device *adev)
{
u32 tmp;
@@ -3545,13 +3498,13 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev)
adev->gfx.rlc_feature_version = le32_to_cpu(
hdr->ucode_feature_version);
- gfx_v7_0_rlc_stop(adev);
+ adev->gfx.rlc.funcs->stop(adev);
/* disable CG */
tmp = RREG32(mmRLC_CGCG_CGLS_CTRL) & 0xfffffffc;
WREG32(mmRLC_CGCG_CGLS_CTRL, tmp);
- gfx_v7_0_rlc_reset(adev);
+ adev->gfx.rlc.funcs->reset(adev);
gfx_v7_0_init_pg(adev);
@@ -3582,7 +3535,7 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev)
if (adev->asic_type == CHIP_BONAIRE)
WREG32(mmRLC_DRIVER_CPDMA_STATUS, 0);
- gfx_v7_0_rlc_start(adev);
+ adev->gfx.rlc.funcs->start(adev);
return 0;
}
@@ -3784,72 +3737,12 @@ static void gfx_v7_0_enable_gds_pg(struct amdgpu_device *adev, bool enable)
WREG32(mmRLC_PG_CNTL, data);
}
-static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device *adev)
+static int gfx_v7_0_cp_pg_table_num(struct amdgpu_device *adev)
{
- const __le32 *fw_data;
- volatile u32 *dst_ptr;
- int me, i, max_me = 4;
- u32 bo_offset = 0;
- u32 table_offset, table_size;
-
if (adev->asic_type == CHIP_KAVERI)
- max_me = 5;
-
- if (adev->gfx.rlc.cp_table_ptr == NULL)
- return;
-
- /* write the cp table buffer */
- dst_ptr = adev->gfx.rlc.cp_table_ptr;
- for (me = 0; me < max_me; me++) {
- if (me == 0) {
- const struct gfx_firmware_header_v1_0 *hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
- fw_data = (const __le32 *)
- (adev->gfx.ce_fw->data +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- table_offset = le32_to_cpu(hdr->jt_offset);
- table_size = le32_to_cpu(hdr->jt_size);
- } else if (me == 1) {
- const struct gfx_firmware_header_v1_0 *hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
- fw_data = (const __le32 *)
- (adev->gfx.pfp_fw->data +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- table_offset = le32_to_cpu(hdr->jt_offset);
- table_size = le32_to_cpu(hdr->jt_size);
- } else if (me == 2) {
- const struct gfx_firmware_header_v1_0 *hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
- fw_data = (const __le32 *)
- (adev->gfx.me_fw->data +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- table_offset = le32_to_cpu(hdr->jt_offset);
- table_size = le32_to_cpu(hdr->jt_size);
- } else if (me == 3) {
- const struct gfx_firmware_header_v1_0 *hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
- fw_data = (const __le32 *)
- (adev->gfx.mec_fw->data +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- table_offset = le32_to_cpu(hdr->jt_offset);
- table_size = le32_to_cpu(hdr->jt_size);
- } else {
- const struct gfx_firmware_header_v1_0 *hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
- fw_data = (const __le32 *)
- (adev->gfx.mec2_fw->data +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- table_offset = le32_to_cpu(hdr->jt_offset);
- table_size = le32_to_cpu(hdr->jt_size);
- }
-
- for (i = 0; i < table_size; i ++) {
- dst_ptr[bo_offset + i] =
- cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
- }
-
- bo_offset += table_size;
- }
+ return 5;
+ else
+ return 4;
}
static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev,
@@ -4288,8 +4181,17 @@ static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = {
};
static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = {
- .enter_safe_mode = gfx_v7_0_enter_rlc_safe_mode,
- .exit_safe_mode = gfx_v7_0_exit_rlc_safe_mode
+ .is_rlc_enabled = gfx_v7_0_is_rlc_enabled,
+ .set_safe_mode = gfx_v7_0_set_safe_mode,
+ .unset_safe_mode = gfx_v7_0_unset_safe_mode,
+ .init = gfx_v7_0_rlc_init,
+ .get_csb_size = gfx_v7_0_get_csb_size,
+ .get_csb_buffer = gfx_v7_0_get_csb_buffer,
+ .get_cp_table_num = gfx_v7_0_cp_pg_table_num,
+ .resume = gfx_v7_0_rlc_resume,
+ .stop = gfx_v7_0_rlc_stop,
+ .reset = gfx_v7_0_rlc_reset,
+ .start = gfx_v7_0_rlc_start
};
static int gfx_v7_0_early_init(void *handle)
@@ -4477,7 +4379,7 @@ static int gfx_v7_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
ring->ring_obj = NULL;
ring->use_doorbell = true;
- ring->doorbell_index = AMDGPU_DOORBELL_MEC_RING0 + ring_id;
+ ring->doorbell_index = adev->doorbell_index.mec_ring0 + ring_id;
sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
@@ -4540,7 +4442,7 @@ static int gfx_v7_0_sw_init(void *handle)
return r;
}
- r = gfx_v7_0_rlc_init(adev);
+ r = adev->gfx.rlc.funcs->init(adev);
if (r) {
DRM_ERROR("Failed to init rlc BOs!\n");
return r;
@@ -4604,7 +4506,7 @@ static int gfx_v7_0_sw_fini(void *handle)
amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
gfx_v7_0_cp_compute_fini(adev);
- gfx_v7_0_rlc_fini(adev);
+ amdgpu_gfx_rlc_fini(adev);
gfx_v7_0_mec_fini(adev);
amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
&adev->gfx.rlc.clear_state_gpu_addr,
@@ -4627,7 +4529,7 @@ static int gfx_v7_0_hw_init(void *handle)
gfx_v7_0_constants_init(adev);
/* init rlc */
- r = gfx_v7_0_rlc_resume(adev);
+ r = adev->gfx.rlc.funcs->resume(adev);
if (r)
return r;
@@ -4645,7 +4547,7 @@ static int gfx_v7_0_hw_fini(void *handle)
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
gfx_v7_0_cp_enable(adev, false);
- gfx_v7_0_rlc_stop(adev);
+ adev->gfx.rlc.funcs->stop(adev);
gfx_v7_0_fini_pg(adev);
return 0;
@@ -4730,7 +4632,7 @@ static int gfx_v7_0_soft_reset(void *handle)
gfx_v7_0_update_cg(adev, false);
/* stop the rlc */
- gfx_v7_0_rlc_stop(adev);
+ adev->gfx.rlc.funcs->stop(adev);
/* Disable GFX parsing/prefetching */
WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK);
@@ -4959,12 +4861,36 @@ static int gfx_v7_0_eop_irq(struct amdgpu_device *adev,
return 0;
}
+static void gfx_v7_0_fault(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry)
+{
+ struct amdgpu_ring *ring;
+ u8 me_id, pipe_id;
+ int i;
+
+ me_id = (entry->ring_id & 0x0c) >> 2;
+ pipe_id = (entry->ring_id & 0x03) >> 0;
+ switch (me_id) {
+ case 0:
+ drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
+ break;
+ case 1:
+ case 2:
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ ring = &adev->gfx.compute_ring[i];
+ if ((ring->me == me_id) && (ring->pipe == pipe_id))
+ drm_sched_fault(&ring->sched);
+ }
+ break;
+ }
+}
+
static int gfx_v7_0_priv_reg_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
DRM_ERROR("Illegal register access in command stream\n");
- schedule_work(&adev->reset_work);
+ gfx_v7_0_fault(adev, entry);
return 0;
}
@@ -4974,7 +4900,7 @@ static int gfx_v7_0_priv_inst_irq(struct amdgpu_device *adev,
{
DRM_ERROR("Illegal instruction in command stream\n");
// XXX soft reset the gfx block only
- schedule_work(&adev->reset_work);
+ gfx_v7_0_fault(adev, entry);
return 0;
}
@@ -5090,7 +5016,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
7 + /* gfx_v7_0_ring_emit_pipeline_sync */
CIK_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v7_0_ring_emit_vm_flush */
7 + 7 + 7, /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */
- .emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_compute */
+ .emit_ib_size = 7, /* gfx_v7_0_ring_emit_ib_compute */
.emit_ib = gfx_v7_0_ring_emit_ib_compute,
.emit_fence = gfx_v7_0_ring_emit_fence_compute,
.emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync,
@@ -5147,6 +5073,7 @@ static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev)
adev->gds.mem.total_size = RREG32(mmGDS_VMID0_SIZE);
adev->gds.gws.total_size = 64;
adev->gds.oa.total_size = 16;
+ adev->gds.gds_compute_max_wave_id = RREG32(mmGDS_COMPUTE_MAX_WAVE_ID);
if (adev->gds.mem.total_size == 64 * 1024) {
adev->gds.mem.gfx_partition_size = 4096;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 617b0c8908a3..b8e50a34bdb3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -44,7 +44,6 @@
#include "gca/gfx_8_0_d.h"
#include "gca/gfx_8_0_enum.h"
#include "gca/gfx_8_0_sh_mask.h"
-#include "gca/gfx_8_0_enum.h"
#include "dce/dce_10_0_d.h"
#include "dce/dce_10_0_sh_mask.h"
@@ -54,7 +53,7 @@
#include "ivsrcid/ivsrcid_vislands30.h"
#define GFX8_NUM_GFX_RINGS 1
-#define GFX8_MEC_HPD_SIZE 2048
+#define GFX8_MEC_HPD_SIZE 4096
#define TOPAZ_GB_ADDR_CONFIG_GOLDEN 0x22010001
#define CARRIZO_GB_ADDR_CONFIG_GOLDEN 0x22010001
@@ -839,18 +838,14 @@ static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring)
int r;
r = amdgpu_gfx_scratch_get(adev, &scratch);
- if (r) {
- DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
+ if (r)
return r;
- }
+
WREG32(scratch, 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 3);
- if (r) {
- DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
- ring->idx, r);
- amdgpu_gfx_scratch_free(adev, scratch);
- return r;
- }
+ if (r)
+ goto error_free_scratch;
+
amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
amdgpu_ring_write(ring, 0xDEADBEEF);
@@ -862,14 +857,11 @@ static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring)
break;
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
- ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
- ring->idx, scratch, tmp);
- r = -EINVAL;
- }
+
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+
+error_free_scratch:
amdgpu_gfx_scratch_free(adev, scratch);
return r;
}
@@ -886,19 +878,16 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+ if (r)
return r;
- }
gpu_addr = adev->wb.gpu_addr + (index * 4);
adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 16, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ if (r)
goto err1;
- }
+
ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
ib.ptr[2] = lower_32_bits(gpu_addr);
@@ -912,22 +901,17 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out.\n");
r = -ETIMEDOUT;
goto err2;
} else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err2;
}
tmp = adev->wb.wb[index];
- if (tmp == 0xDEADBEEF) {
- DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+ if (tmp == 0xDEADBEEF)
r = 0;
- } else {
- DRM_ERROR("ib test on ring %d failed\n", ring->idx);
+ else
r = -EINVAL;
- }
err2:
amdgpu_ib_free(adev, &ib, NULL);
@@ -1298,81 +1282,16 @@ static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev,
buffer[count++] = cpu_to_le32(0);
}
-static void cz_init_cp_jump_table(struct amdgpu_device *adev)
+static int gfx_v8_0_cp_jump_table_num(struct amdgpu_device *adev)
{
- const __le32 *fw_data;
- volatile u32 *dst_ptr;
- int me, i, max_me = 4;
- u32 bo_offset = 0;
- u32 table_offset, table_size;
-
if (adev->asic_type == CHIP_CARRIZO)
- max_me = 5;
-
- /* write the cp table buffer */
- dst_ptr = adev->gfx.rlc.cp_table_ptr;
- for (me = 0; me < max_me; me++) {
- if (me == 0) {
- const struct gfx_firmware_header_v1_0 *hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
- fw_data = (const __le32 *)
- (adev->gfx.ce_fw->data +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- table_offset = le32_to_cpu(hdr->jt_offset);
- table_size = le32_to_cpu(hdr->jt_size);
- } else if (me == 1) {
- const struct gfx_firmware_header_v1_0 *hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
- fw_data = (const __le32 *)
- (adev->gfx.pfp_fw->data +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- table_offset = le32_to_cpu(hdr->jt_offset);
- table_size = le32_to_cpu(hdr->jt_size);
- } else if (me == 2) {
- const struct gfx_firmware_header_v1_0 *hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
- fw_data = (const __le32 *)
- (adev->gfx.me_fw->data +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- table_offset = le32_to_cpu(hdr->jt_offset);
- table_size = le32_to_cpu(hdr->jt_size);
- } else if (me == 3) {
- const struct gfx_firmware_header_v1_0 *hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
- fw_data = (const __le32 *)
- (adev->gfx.mec_fw->data +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- table_offset = le32_to_cpu(hdr->jt_offset);
- table_size = le32_to_cpu(hdr->jt_size);
- } else if (me == 4) {
- const struct gfx_firmware_header_v1_0 *hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
- fw_data = (const __le32 *)
- (adev->gfx.mec2_fw->data +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- table_offset = le32_to_cpu(hdr->jt_offset);
- table_size = le32_to_cpu(hdr->jt_size);
- }
-
- for (i = 0; i < table_size; i ++) {
- dst_ptr[bo_offset + i] =
- cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
- }
-
- bo_offset += table_size;
- }
-}
-
-static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev)
-{
- amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL);
- amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL);
+ return 5;
+ else
+ return 4;
}
static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
{
- volatile u32 *dst_ptr;
- u32 dws;
const struct cs_section_def *cs_data;
int r;
@@ -1381,44 +1300,18 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
cs_data = adev->gfx.rlc.cs_data;
if (cs_data) {
- /* clear state block */
- adev->gfx.rlc.clear_state_size = dws = gfx_v8_0_get_csb_size(adev);
-
- r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.clear_state_obj,
- &adev->gfx.rlc.clear_state_gpu_addr,
- (void **)&adev->gfx.rlc.cs_ptr);
- if (r) {
- dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
- gfx_v8_0_rlc_fini(adev);
+ /* init clear state block */
+ r = amdgpu_gfx_rlc_init_csb(adev);
+ if (r)
return r;
- }
-
- /* set up the cs buffer */
- dst_ptr = adev->gfx.rlc.cs_ptr;
- gfx_v8_0_get_csb_buffer(adev, dst_ptr);
- amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
}
if ((adev->asic_type == CHIP_CARRIZO) ||
(adev->asic_type == CHIP_STONEY)) {
adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
- r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.cp_table_obj,
- &adev->gfx.rlc.cp_table_gpu_addr,
- (void **)&adev->gfx.rlc.cp_table_ptr);
- if (r) {
- dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
+ r = amdgpu_gfx_rlc_init_cpt(adev);
+ if (r)
return r;
- }
-
- cz_init_cp_jump_table(adev);
-
- amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
}
return 0;
@@ -1443,7 +1336,7 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
mec_hpd_size = adev->gfx.num_compute_rings * GFX8_MEC_HPD_SIZE;
r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_GTT,
+ AMDGPU_GEM_DOMAIN_VRAM,
&adev->gfx.mec.hpd_eop_obj,
&adev->gfx.mec.hpd_eop_gpu_addr,
(void **)&hpd);
@@ -1629,7 +1522,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
return 0;
/* bail if the compute ring is not ready */
- if (!ring->ready)
+ if (!ring->sched.ready)
return 0;
tmp = RREG32(mmGB_EDC_MODE);
@@ -1997,7 +1890,7 @@ static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
ring->ring_obj = NULL;
ring->use_doorbell = true;
- ring->doorbell_index = AMDGPU_DOORBELL_MEC_RING0 + ring_id;
+ ring->doorbell_index = adev->doorbell_index.mec_ring0 + ring_id;
ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
+ (ring_id * GFX8_MEC_HPD_SIZE);
sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
@@ -2088,7 +1981,7 @@ static int gfx_v8_0_sw_init(void *handle)
return r;
}
- r = gfx_v8_0_rlc_init(adev);
+ r = adev->gfx.rlc.funcs->init(adev);
if (r) {
DRM_ERROR("Failed to init rlc BOs!\n");
return r;
@@ -2108,7 +2001,7 @@ static int gfx_v8_0_sw_init(void *handle)
/* no gfx doorbells on iceland */
if (adev->asic_type != CHIP_TOPAZ) {
ring->use_doorbell = true;
- ring->doorbell_index = AMDGPU_DOORBELL_GFX_RING0;
+ ring->doorbell_index = adev->doorbell_index.gfx_ring0;
}
r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
@@ -2181,7 +2074,7 @@ static int gfx_v8_0_sw_fini(void *handle)
amdgpu_gfx_kiq_fini(adev);
gfx_v8_0_mec_fini(adev);
- gfx_v8_0_rlc_fini(adev);
+ amdgpu_gfx_rlc_fini(adev);
amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
&adev->gfx.rlc.clear_state_gpu_addr,
(void **)&adev->gfx.rlc.cs_ptr);
@@ -4175,10 +4068,15 @@ static void gfx_v8_0_rlc_start(struct amdgpu_device *adev)
static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
{
- gfx_v8_0_rlc_stop(adev);
- gfx_v8_0_rlc_reset(adev);
+ if (amdgpu_sriov_vf(adev)) {
+ gfx_v8_0_init_csb(adev);
+ return 0;
+ }
+
+ adev->gfx.rlc.funcs->stop(adev);
+ adev->gfx.rlc.funcs->reset(adev);
gfx_v8_0_init_pg(adev);
- gfx_v8_0_rlc_start(adev);
+ adev->gfx.rlc.funcs->start(adev);
return 0;
}
@@ -4197,7 +4095,7 @@ static void gfx_v8_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
- adev->gfx.gfx_ring[i].ready = false;
+ adev->gfx.gfx_ring[i].sched.ready = false;
}
WREG32(mmCP_ME_CNTL, tmp);
udelay(50);
@@ -4322,7 +4220,7 @@ static void gfx_v8_0_set_cpg_door_bell(struct amdgpu_device *adev, struct amdgpu
tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
DOORBELL_RANGE_LOWER,
- AMDGPU_DOORBELL_GFX_RING0);
+ adev->doorbell_index.gfx_ring0);
WREG32(mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
WREG32(mmCP_RB_DOORBELL_RANGE_UPPER,
@@ -4335,7 +4233,6 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
u32 tmp;
u32 rb_bufsz;
u64 rb_addr, rptr_addr, wptr_gpu_addr;
- int r;
/* Set the write pointer delay */
WREG32(mmCP_RB_WPTR_DELAY, 0);
@@ -4379,12 +4276,9 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
/* start the ring */
amdgpu_ring_clear_ring(ring);
gfx_v8_0_cp_gfx_start(adev);
- ring->ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r)
- ring->ready = false;
+ ring->sched.ready = true;
- return r;
+ return 0;
}
static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
@@ -4396,8 +4290,8 @@ static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
} else {
WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
for (i = 0; i < adev->gfx.num_compute_rings; i++)
- adev->gfx.compute_ring[i].ready = false;
- adev->gfx.kiq.ring.ready = false;
+ adev->gfx.compute_ring[i].sched.ready = false;
+ adev->gfx.kiq.ring.sched.ready = false;
}
udelay(50);
}
@@ -4473,12 +4367,9 @@ static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev)
amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
}
- r = amdgpu_ring_test_ring(kiq_ring);
- if (r) {
- DRM_ERROR("KCQ enable failed\n");
- kiq_ring->ready = false;
- }
- return r;
+ amdgpu_ring_commit(kiq_ring);
+
+ return 0;
}
static int gfx_v8_0_deactivate_hqd(struct amdgpu_device *adev, u32 req)
@@ -4755,8 +4646,8 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
static void gfx_v8_0_set_mec_doorbell_range(struct amdgpu_device *adev)
{
if (adev->asic_type > CHIP_TONGA) {
- WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER, AMDGPU_DOORBELL_KIQ << 2);
- WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER, AMDGPU_DOORBELL_MEC_RING7 << 2);
+ WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER, adev->doorbell_index.kiq << 2);
+ WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER, adev->doorbell_index.mec_ring7 << 2);
}
/* enable doorbells */
WREG32_FIELD(CP_PQ_STATUS, DOORBELL_ENABLE, 1);
@@ -4781,7 +4672,7 @@ static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev)
amdgpu_bo_kunmap(ring->mqd_obj);
ring->mqd_ptr = NULL;
amdgpu_bo_unreserve(ring->mqd_obj);
- ring->ready = true;
+ ring->sched.ready = true;
return 0;
}
@@ -4815,19 +4706,32 @@ static int gfx_v8_0_kcq_resume(struct amdgpu_device *adev)
if (r)
goto done;
- /* Test KCQs - reversing the order of rings seems to fix ring test failure
- * after GPU reset
- */
- for (i = adev->gfx.num_compute_rings - 1; i >= 0; i--) {
+done:
+ return r;
+}
+
+static int gfx_v8_0_cp_test_all_rings(struct amdgpu_device *adev)
+{
+ int r, i;
+ struct amdgpu_ring *ring;
+
+ /* collect all the ring_tests here, gfx, kiq, compute */
+ ring = &adev->gfx.gfx_ring[0];
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ return r;
+
+ ring = &adev->gfx.kiq.ring;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ return r;
+
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
ring = &adev->gfx.compute_ring[i];
- ring->ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r)
- ring->ready = false;
+ amdgpu_ring_test_helper(ring);
}
-done:
- return r;
+ return 0;
}
static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
@@ -4848,6 +4752,11 @@ static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
r = gfx_v8_0_kcq_resume(adev);
if (r)
return r;
+
+ r = gfx_v8_0_cp_test_all_rings(adev);
+ if (r)
+ return r;
+
gfx_v8_0_enable_gui_idle_interrupt(adev, true);
return 0;
@@ -4867,7 +4776,7 @@ static int gfx_v8_0_hw_init(void *handle)
gfx_v8_0_init_golden_registers(adev);
gfx_v8_0_constants_init(adev);
- r = gfx_v8_0_rlc_resume(adev);
+ r = adev->gfx.rlc.funcs->resume(adev);
if (r)
return r;
@@ -4899,7 +4808,7 @@ static int gfx_v8_0_kcq_disable(struct amdgpu_device *adev)
amdgpu_ring_write(kiq_ring, 0);
amdgpu_ring_write(kiq_ring, 0);
}
- r = amdgpu_ring_test_ring(kiq_ring);
+ r = amdgpu_ring_test_helper(kiq_ring);
if (r)
DRM_ERROR("KCQ disable failed\n");
@@ -4973,16 +4882,16 @@ static int gfx_v8_0_hw_fini(void *handle)
pr_debug("For SRIOV client, shouldn't do anything.\n");
return 0;
}
- adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
if (!gfx_v8_0_wait_for_idle(adev))
gfx_v8_0_cp_enable(adev, false);
else
pr_err("cp is busy, skip halt cp\n");
if (!gfx_v8_0_wait_for_rlc_idle(adev))
- gfx_v8_0_rlc_stop(adev);
+ adev->gfx.rlc.funcs->stop(adev);
else
pr_err("rlc is busy, skip halt rlc\n");
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
return 0;
}
@@ -5061,17 +4970,16 @@ static bool gfx_v8_0_check_soft_reset(void *handle)
static int gfx_v8_0_pre_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+ u32 grbm_soft_reset = 0;
if ((!adev->gfx.grbm_soft_reset) &&
(!adev->gfx.srbm_soft_reset))
return 0;
grbm_soft_reset = adev->gfx.grbm_soft_reset;
- srbm_soft_reset = adev->gfx.srbm_soft_reset;
/* stop the rlc */
- gfx_v8_0_rlc_stop(adev);
+ adev->gfx.rlc.funcs->stop(adev);
if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
@@ -5165,14 +5073,13 @@ static int gfx_v8_0_soft_reset(void *handle)
static int gfx_v8_0_post_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+ u32 grbm_soft_reset = 0;
if ((!adev->gfx.grbm_soft_reset) &&
(!adev->gfx.srbm_soft_reset))
return 0;
grbm_soft_reset = adev->gfx.grbm_soft_reset;
- srbm_soft_reset = adev->gfx.srbm_soft_reset;
if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) ||
@@ -5197,7 +5104,9 @@ static int gfx_v8_0_post_soft_reset(void *handle)
REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
gfx_v8_0_cp_gfx_resume(adev);
- gfx_v8_0_rlc_start(adev);
+ gfx_v8_0_cp_test_all_rings(adev);
+
+ adev->gfx.rlc.funcs->start(adev);
return 0;
}
@@ -5445,7 +5354,7 @@ static int gfx_v8_0_set_powergating_state(void *handle,
AMD_PG_SUPPORT_RLC_SMU_HS |
AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_GFX_DMG))
- adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
switch (adev->asic_type) {
case CHIP_CARRIZO:
case CHIP_STONEY:
@@ -5499,7 +5408,7 @@ static int gfx_v8_0_set_powergating_state(void *handle,
AMD_PG_SUPPORT_RLC_SMU_HS |
AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_GFX_DMG))
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
return 0;
}
@@ -5593,57 +5502,53 @@ static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev,
#define RLC_GPR_REG2__MESSAGE__SHIFT 0x00000001
#define RLC_GPR_REG2__MESSAGE_MASK 0x0000001e
-static void iceland_enter_rlc_safe_mode(struct amdgpu_device *adev)
+static bool gfx_v8_0_is_rlc_enabled(struct amdgpu_device *adev)
{
- u32 data;
- unsigned i;
+ uint32_t rlc_setting;
- data = RREG32(mmRLC_CNTL);
- if (!(data & RLC_CNTL__RLC_ENABLE_F32_MASK))
- return;
+ rlc_setting = RREG32(mmRLC_CNTL);
+ if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
+ return false;
- if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) {
- data |= RLC_SAFE_MODE__CMD_MASK;
- data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
- data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
- WREG32(mmRLC_SAFE_MODE, data);
+ return true;
+}
- for (i = 0; i < adev->usec_timeout; i++) {
- if ((RREG32(mmRLC_GPM_STAT) &
- (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
- RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) ==
- (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
- RLC_GPM_STAT__GFX_POWER_STATUS_MASK))
- break;
- udelay(1);
- }
+static void gfx_v8_0_set_safe_mode(struct amdgpu_device *adev)
+{
+ uint32_t data;
+ unsigned i;
+ data = RREG32(mmRLC_CNTL);
+ data |= RLC_SAFE_MODE__CMD_MASK;
+ data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
+ data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
+ WREG32(mmRLC_SAFE_MODE, data);
- for (i = 0; i < adev->usec_timeout; i++) {
- if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
- break;
- udelay(1);
- }
- adev->gfx.rlc.in_safe_mode = true;
+ /* wait for RLC_SAFE_MODE */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if ((RREG32(mmRLC_GPM_STAT) &
+ (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
+ RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) ==
+ (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
+ RLC_GPM_STAT__GFX_POWER_STATUS_MASK))
+ break;
+ udelay(1);
+ }
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
+ break;
+ udelay(1);
}
}
-static void iceland_exit_rlc_safe_mode(struct amdgpu_device *adev)
+static void gfx_v8_0_unset_safe_mode(struct amdgpu_device *adev)
{
- u32 data = 0;
+ uint32_t data;
unsigned i;
data = RREG32(mmRLC_CNTL);
- if (!(data & RLC_CNTL__RLC_ENABLE_F32_MASK))
- return;
-
- if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) {
- if (adev->gfx.rlc.in_safe_mode) {
- data |= RLC_SAFE_MODE__CMD_MASK;
- data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
- WREG32(mmRLC_SAFE_MODE, data);
- adev->gfx.rlc.in_safe_mode = false;
- }
- }
+ data |= RLC_SAFE_MODE__CMD_MASK;
+ data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
+ WREG32(mmRLC_SAFE_MODE, data);
for (i = 0; i < adev->usec_timeout; i++) {
if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
@@ -5653,8 +5558,17 @@ static void iceland_exit_rlc_safe_mode(struct amdgpu_device *adev)
}
static const struct amdgpu_rlc_funcs iceland_rlc_funcs = {
- .enter_safe_mode = iceland_enter_rlc_safe_mode,
- .exit_safe_mode = iceland_exit_rlc_safe_mode
+ .is_rlc_enabled = gfx_v8_0_is_rlc_enabled,
+ .set_safe_mode = gfx_v8_0_set_safe_mode,
+ .unset_safe_mode = gfx_v8_0_unset_safe_mode,
+ .init = gfx_v8_0_rlc_init,
+ .get_csb_size = gfx_v8_0_get_csb_size,
+ .get_csb_buffer = gfx_v8_0_get_csb_buffer,
+ .get_cp_table_num = gfx_v8_0_cp_jump_table_num,
+ .resume = gfx_v8_0_rlc_resume,
+ .stop = gfx_v8_0_rlc_stop,
+ .reset = gfx_v8_0_rlc_reset,
+ .start = gfx_v8_0_rlc_start
};
static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
@@ -5662,7 +5576,7 @@ static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
{
uint32_t temp, data;
- adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
/* It is disabled by HW by default */
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
@@ -5758,7 +5672,7 @@ static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
gfx_v8_0_wait_for_rlc_serdes(adev);
}
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
}
static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
@@ -5768,7 +5682,7 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
temp = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
- adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
@@ -5851,7 +5765,7 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
gfx_v8_0_wait_for_rlc_serdes(adev);
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
}
static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev,
bool enable)
@@ -6131,9 +6045,11 @@ static void gfx_v8_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
}
static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib,
+ uint32_t flags)
{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
u32 header, control = 0;
if (ib->flags & AMDGPU_IB_FLAG_CE)
@@ -6161,11 +6077,29 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
}
static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ uint32_t flags)
{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
+ /* Currently, there is a high possibility to get wave ID mismatch
+ * between ME and GDS, leading to a hw deadlock, because ME generates
+ * different wave IDs than the GDS expects. This situation happens
+ * randomly when at least 5 compute pipes use GDS ordered append.
+ * The wave IDs generated by ME are also wrong after suspend/resume.
+ * Those are probably bugs somewhere else in the kernel driver.
+ *
+ * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
+ * GDS to 0 for this ring (me/pipe).
+ */
+ if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID - PACKET3_SET_CONFIG_REG_START);
+ amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
+ }
+
amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
amdgpu_ring_write(ring,
#ifdef __BIG_ENDIAN
@@ -6738,12 +6672,39 @@ static int gfx_v8_0_eop_irq(struct amdgpu_device *adev,
return 0;
}
+static void gfx_v8_0_fault(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry)
+{
+ u8 me_id, pipe_id, queue_id;
+ struct amdgpu_ring *ring;
+ int i;
+
+ me_id = (entry->ring_id & 0x0c) >> 2;
+ pipe_id = (entry->ring_id & 0x03) >> 0;
+ queue_id = (entry->ring_id & 0x70) >> 4;
+
+ switch (me_id) {
+ case 0:
+ drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
+ break;
+ case 1:
+ case 2:
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ ring = &adev->gfx.compute_ring[i];
+ if (ring->me == me_id && ring->pipe == pipe_id &&
+ ring->queue == queue_id)
+ drm_sched_fault(&ring->sched);
+ }
+ break;
+ }
+}
+
static int gfx_v8_0_priv_reg_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
DRM_ERROR("Illegal register access in command stream\n");
- schedule_work(&adev->reset_work);
+ gfx_v8_0_fault(adev, entry);
return 0;
}
@@ -6752,7 +6713,7 @@ static int gfx_v8_0_priv_inst_irq(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
DRM_ERROR("Illegal instruction in command stream\n");
- schedule_work(&adev->reset_work);
+ gfx_v8_0_fault(adev, entry);
return 0;
}
@@ -6945,7 +6906,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
7 + /* gfx_v8_0_ring_emit_pipeline_sync */
VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v8_0_ring_emit_vm_flush */
7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
- .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_compute */
+ .emit_ib_size = 7, /* gfx_v8_0_ring_emit_ib_compute */
.emit_ib = gfx_v8_0_ring_emit_ib_compute,
.emit_fence = gfx_v8_0_ring_emit_fence_compute,
.emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
@@ -6975,11 +6936,9 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = {
7 + /* gfx_v8_0_ring_emit_pipeline_sync */
17 + /* gfx_v8_0_ring_emit_vm_flush */
7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_kiq x3 for user fence, vm fence */
- .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_compute */
- .emit_ib = gfx_v8_0_ring_emit_ib_compute,
+ .emit_ib_size = 7, /* gfx_v8_0_ring_emit_ib_compute */
.emit_fence = gfx_v8_0_ring_emit_fence_kiq,
.test_ring = gfx_v8_0_ring_test_ring,
- .test_ib = gfx_v8_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_rreg = gfx_v8_0_ring_emit_rreg,
@@ -7053,6 +7012,7 @@ static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev)
adev->gds.mem.total_size = RREG32(mmGDS_VMID0_SIZE);
adev->gds.gws.total_size = 64;
adev->gds.oa.total_size = 16;
+ adev->gds.gds_compute_max_wave_id = RREG32(mmGDS_COMPUTE_MAX_WAVE_ID);
if (adev->gds.mem.total_size == 64 * 1024) {
adev->gds.mem.gfx_partition_size = 4096;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 21363b2b2ee5..a11db2b1a63f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -41,7 +41,7 @@
#include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
#define GFX9_NUM_GFX_RINGS 1
-#define GFX9_MEC_HPD_SIZE 2048
+#define GFX9_MEC_HPD_SIZE 4096
#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
#define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L
@@ -86,6 +86,7 @@ MODULE_FIRMWARE("amdgpu/picasso_me.bin");
MODULE_FIRMWARE("amdgpu/picasso_mec.bin");
MODULE_FIRMWARE("amdgpu/picasso_mec2.bin");
MODULE_FIRMWARE("amdgpu/picasso_rlc.bin");
+MODULE_FIRMWARE("amdgpu/picasso_rlc_am4.bin");
MODULE_FIRMWARE("amdgpu/raven2_ce.bin");
MODULE_FIRMWARE("amdgpu/raven2_pfp.bin");
@@ -112,7 +113,10 @@ static const struct soc15_reg_golden golden_settings_gc_9_0[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff)
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
};
static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
@@ -134,10 +138,7 @@ static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
};
static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
@@ -219,6 +220,7 @@ static const struct soc15_reg_golden golden_settings_gc_9_1_rv2[] =
static const struct soc15_reg_golden golden_settings_gc_9_x_common[] =
{
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0xffffffff, 0x000001ff),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_INDEX, 0xffffffff, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2544c382)
};
@@ -396,18 +398,14 @@ static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
int r;
r = amdgpu_gfx_scratch_get(adev, &scratch);
- if (r) {
- DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
+ if (r)
return r;
- }
+
WREG32(scratch, 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 3);
- if (r) {
- DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
- ring->idx, r);
- amdgpu_gfx_scratch_free(adev, scratch);
- return r;
- }
+ if (r)
+ goto error_free_scratch;
+
amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
amdgpu_ring_write(ring, 0xDEADBEEF);
@@ -419,14 +417,11 @@ static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
break;
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
- ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
- ring->idx, scratch, tmp);
- r = -EINVAL;
- }
+
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+
+error_free_scratch:
amdgpu_gfx_scratch_free(adev, scratch);
return r;
}
@@ -443,19 +438,16 @@ static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+ if (r)
return r;
- }
gpu_addr = adev->wb.gpu_addr + (index * 4);
adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 16, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ if (r)
goto err1;
- }
+
ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
ib.ptr[2] = lower_32_bits(gpu_addr);
@@ -469,22 +461,17 @@ static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out.\n");
- r = -ETIMEDOUT;
- goto err2;
+ r = -ETIMEDOUT;
+ goto err2;
} else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
- goto err2;
+ goto err2;
}
tmp = adev->wb.wb[index];
- if (tmp == 0xDEADBEEF) {
- DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
- r = 0;
- } else {
- DRM_ERROR("ib test on ring %d failed\n", ring->idx);
- r = -EINVAL;
- }
+ if (tmp == 0xDEADBEEF)
+ r = 0;
+ else
+ r = -EINVAL;
err2:
amdgpu_ib_free(adev, &ib, NULL);
@@ -660,7 +647,20 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
+ /*
+ * For Picasso && AM4 SOCKET board, we use picasso_rlc_am4.bin
+ * instead of picasso_rlc.bin.
+ * Judgment method:
+ * PCO AM4: revision >= 0xC8 && revision <= 0xCF
+ * or revision >= 0xD8 && revision <= 0xDF
+ * otherwise is PCO FP5
+ */
+ if (!strcmp(chip_name, "picasso") &&
+ (((adev->pdev->revision >= 0xC8) && (adev->pdev->revision <= 0xCF)) ||
+ ((adev->pdev->revision >= 0xD8) && (adev->pdev->revision <= 0xDF))))
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc_am4.bin", chip_name);
+ else
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
if (err)
goto out;
@@ -1065,85 +1065,13 @@ static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
WREG32_FIELD15(GC, 0, RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0);
}
-static void rv_init_cp_jump_table(struct amdgpu_device *adev)
-{
- const __le32 *fw_data;
- volatile u32 *dst_ptr;
- int me, i, max_me = 5;
- u32 bo_offset = 0;
- u32 table_offset, table_size;
-
- /* write the cp table buffer */
- dst_ptr = adev->gfx.rlc.cp_table_ptr;
- for (me = 0; me < max_me; me++) {
- if (me == 0) {
- const struct gfx_firmware_header_v1_0 *hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
- fw_data = (const __le32 *)
- (adev->gfx.ce_fw->data +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- table_offset = le32_to_cpu(hdr->jt_offset);
- table_size = le32_to_cpu(hdr->jt_size);
- } else if (me == 1) {
- const struct gfx_firmware_header_v1_0 *hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
- fw_data = (const __le32 *)
- (adev->gfx.pfp_fw->data +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- table_offset = le32_to_cpu(hdr->jt_offset);
- table_size = le32_to_cpu(hdr->jt_size);
- } else if (me == 2) {
- const struct gfx_firmware_header_v1_0 *hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
- fw_data = (const __le32 *)
- (adev->gfx.me_fw->data +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- table_offset = le32_to_cpu(hdr->jt_offset);
- table_size = le32_to_cpu(hdr->jt_size);
- } else if (me == 3) {
- const struct gfx_firmware_header_v1_0 *hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
- fw_data = (const __le32 *)
- (adev->gfx.mec_fw->data +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- table_offset = le32_to_cpu(hdr->jt_offset);
- table_size = le32_to_cpu(hdr->jt_size);
- } else if (me == 4) {
- const struct gfx_firmware_header_v1_0 *hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
- fw_data = (const __le32 *)
- (adev->gfx.mec2_fw->data +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- table_offset = le32_to_cpu(hdr->jt_offset);
- table_size = le32_to_cpu(hdr->jt_size);
- }
-
- for (i = 0; i < table_size; i ++) {
- dst_ptr[bo_offset + i] =
- cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
- }
-
- bo_offset += table_size;
- }
-}
-
-static void gfx_v9_0_rlc_fini(struct amdgpu_device *adev)
+static int gfx_v9_0_cp_jump_table_num(struct amdgpu_device *adev)
{
- /* clear state block */
- amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
- &adev->gfx.rlc.clear_state_gpu_addr,
- (void **)&adev->gfx.rlc.cs_ptr);
-
- /* jump table block */
- amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
- &adev->gfx.rlc.cp_table_gpu_addr,
- (void **)&adev->gfx.rlc.cp_table_ptr);
+ return 5;
}
static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
{
- volatile u32 *dst_ptr;
- u32 dws;
const struct cs_section_def *cs_data;
int r;
@@ -1152,45 +1080,18 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
cs_data = adev->gfx.rlc.cs_data;
if (cs_data) {
- /* clear state block */
- adev->gfx.rlc.clear_state_size = dws = gfx_v9_0_get_csb_size(adev);
- r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.clear_state_obj,
- &adev->gfx.rlc.clear_state_gpu_addr,
- (void **)&adev->gfx.rlc.cs_ptr);
- if (r) {
- dev_err(adev->dev, "(%d) failed to create rlc csb bo\n",
- r);
- gfx_v9_0_rlc_fini(adev);
+ /* init clear state block */
+ r = amdgpu_gfx_rlc_init_csb(adev);
+ if (r)
return r;
- }
- /* set up the cs buffer */
- dst_ptr = adev->gfx.rlc.cs_ptr;
- gfx_v9_0_get_csb_buffer(adev, dst_ptr);
- amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
- amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
}
if (adev->asic_type == CHIP_RAVEN) {
/* TODO: double check the cp_table_size for RV */
adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
- r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.cp_table_obj,
- &adev->gfx.rlc.cp_table_gpu_addr,
- (void **)&adev->gfx.rlc.cp_table_ptr);
- if (r) {
- dev_err(adev->dev,
- "(%d) failed to create cp table bo\n", r);
- gfx_v9_0_rlc_fini(adev);
+ r = amdgpu_gfx_rlc_init_cpt(adev);
+ if (r)
return r;
- }
-
- rv_init_cp_jump_table(adev);
- amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
}
switch (adev->asic_type) {
@@ -1264,7 +1165,7 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_GTT,
+ AMDGPU_GEM_DOMAIN_VRAM,
&adev->gfx.mec.hpd_eop_obj,
&adev->gfx.mec.hpd_eop_gpu_addr,
(void **)&hpd);
@@ -1635,8 +1536,8 @@ static int gfx_v9_0_ngg_en(struct amdgpu_device *adev)
/* Clear GDS reserved memory */
r = amdgpu_ring_alloc(ring, 17);
if (r) {
- DRM_ERROR("amdgpu: NGG failed to lock ring %d (%d).\n",
- ring->idx, r);
+ DRM_ERROR("amdgpu: NGG failed to lock ring %s (%d).\n",
+ ring->name, r);
return r;
}
@@ -1680,7 +1581,7 @@ static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
ring->ring_obj = NULL;
ring->use_doorbell = true;
- ring->doorbell_index = (AMDGPU_DOORBELL_MEC_RING0 + ring_id) << 1;
+ ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
+ (ring_id * GFX9_MEC_HPD_SIZE);
sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
@@ -1748,7 +1649,7 @@ static int gfx_v9_0_sw_init(void *handle)
return r;
}
- r = gfx_v9_0_rlc_init(adev);
+ r = adev->gfx.rlc.funcs->init(adev);
if (r) {
DRM_ERROR("Failed to init rlc BOs!\n");
return r;
@@ -1769,7 +1670,7 @@ static int gfx_v9_0_sw_init(void *handle)
else
sprintf(ring->name, "gfx_%d", i);
ring->use_doorbell = true;
- ring->doorbell_index = AMDGPU_DOORBELL64_GFX_RING0 << 1;
+ ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
r = amdgpu_ring_init(adev, ring, 1024,
&adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP);
if (r)
@@ -2499,13 +2400,11 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
return 0;
}
- gfx_v9_0_rlc_stop(adev);
+ adev->gfx.rlc.funcs->stop(adev);
/* disable CG */
WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
- gfx_v9_0_rlc_reset(adev);
-
gfx_v9_0_init_pg(adev);
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
@@ -2515,15 +2414,24 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
return r;
}
- if (adev->asic_type == CHIP_RAVEN ||
- adev->asic_type == CHIP_VEGA20) {
- if (amdgpu_lbpw != 0)
+ switch (adev->asic_type) {
+ case CHIP_RAVEN:
+ if (amdgpu_lbpw == 0)
+ gfx_v9_0_enable_lbpw(adev, false);
+ else
+ gfx_v9_0_enable_lbpw(adev, true);
+ break;
+ case CHIP_VEGA20:
+ if (amdgpu_lbpw > 0)
gfx_v9_0_enable_lbpw(adev, true);
else
gfx_v9_0_enable_lbpw(adev, false);
+ break;
+ default:
+ break;
}
- gfx_v9_0_rlc_start(adev);
+ adev->gfx.rlc.funcs->start(adev);
return 0;
}
@@ -2538,7 +2446,7 @@ static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
if (!enable) {
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
- adev->gfx.gfx_ring[i].ready = false;
+ adev->gfx.gfx_ring[i].sched.ready = false;
}
WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
udelay(50);
@@ -2728,7 +2636,7 @@ static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
/* start the ring */
gfx_v9_0_cp_gfx_start(adev);
- ring->ready = true;
+ ring->sched.ready = true;
return 0;
}
@@ -2743,8 +2651,8 @@ static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
WREG32_SOC15(GC, 0, mmCP_MEC_CNTL,
(CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
for (i = 0; i < adev->gfx.num_compute_rings; i++)
- adev->gfx.compute_ring[i].ready = false;
- adev->gfx.kiq.ring.ready = false;
+ adev->gfx.compute_ring[i].sched.ready = false;
+ adev->gfx.kiq.ring.sched.ready = false;
}
udelay(50);
}
@@ -2867,11 +2775,9 @@ static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
}
- r = amdgpu_ring_test_ring(kiq_ring);
- if (r) {
+ r = amdgpu_ring_test_helper(kiq_ring);
+ if (r)
DRM_ERROR("KCQ enable failed\n");
- kiq_ring->ready = false;
- }
return r;
}
@@ -3089,9 +2995,9 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
/* enable the doorbell if requested */
if (ring->use_doorbell) {
WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
- (AMDGPU_DOORBELL64_KIQ *2) << 2);
+ (adev->doorbell_index.kiq * 2) << 2);
WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
- (AMDGPU_DOORBELL64_USERQUEUE_END * 2) << 2);
+ (adev->doorbell_index.userqueue_end * 2) << 2);
}
WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
@@ -3250,7 +3156,7 @@ static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
amdgpu_bo_kunmap(ring->mqd_obj);
ring->mqd_ptr = NULL;
amdgpu_bo_unreserve(ring->mqd_obj);
- ring->ready = true;
+ ring->sched.ready = true;
return 0;
}
@@ -3315,19 +3221,13 @@ static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
return r;
ring = &adev->gfx.gfx_ring[0];
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
return r;
- }
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
ring = &adev->gfx.compute_ring[i];
-
- ring->ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r)
- ring->ready = false;
+ amdgpu_ring_test_helper(ring);
}
gfx_v9_0_enable_gui_idle_interrupt(adev, true);
@@ -3354,7 +3254,7 @@ static int gfx_v9_0_hw_init(void *handle)
if (r)
return r;
- r = gfx_v9_0_rlc_resume(adev);
+ r = adev->gfx.rlc.funcs->resume(adev);
if (r)
return r;
@@ -3392,7 +3292,7 @@ static int gfx_v9_0_kcq_disable(struct amdgpu_device *adev)
amdgpu_ring_write(kiq_ring, 0);
amdgpu_ring_write(kiq_ring, 0);
}
- r = amdgpu_ring_test_ring(kiq_ring);
+ r = amdgpu_ring_test_helper(kiq_ring);
if (r)
DRM_ERROR("KCQ disable failed\n");
@@ -3434,7 +3334,7 @@ static int gfx_v9_0_hw_fini(void *handle)
}
gfx_v9_0_cp_enable(adev, false);
- gfx_v9_0_rlc_stop(adev);
+ adev->gfx.rlc.funcs->stop(adev);
gfx_v9_0_csb_vram_unpin(adev);
@@ -3509,7 +3409,7 @@ static int gfx_v9_0_soft_reset(void *handle)
if (grbm_soft_reset) {
/* stop the rlc */
- gfx_v9_0_rlc_stop(adev);
+ adev->gfx.rlc.funcs->stop(adev);
/* Disable GFX parsing/prefetching */
gfx_v9_0_cp_gfx_enable(adev, false);
@@ -3608,64 +3508,47 @@ static int gfx_v9_0_late_init(void *handle)
return 0;
}
-static void gfx_v9_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
+static bool gfx_v9_0_is_rlc_enabled(struct amdgpu_device *adev)
{
- uint32_t rlc_setting, data;
- unsigned i;
-
- if (adev->gfx.rlc.in_safe_mode)
- return;
+ uint32_t rlc_setting;
/* if RLC is not enabled, do nothing */
rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
- return;
-
- if (adev->cg_flags &
- (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
- AMD_CG_SUPPORT_GFX_3D_CGCG)) {
- data = RLC_SAFE_MODE__CMD_MASK;
- data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
- WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
+ return false;
- /* wait for RLC_SAFE_MODE */
- for (i = 0; i < adev->usec_timeout; i++) {
- if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
- break;
- udelay(1);
- }
- adev->gfx.rlc.in_safe_mode = true;
- }
+ return true;
}
-static void gfx_v9_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
+static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev)
{
- uint32_t rlc_setting, data;
+ uint32_t data;
+ unsigned i;
- if (!adev->gfx.rlc.in_safe_mode)
- return;
+ data = RLC_SAFE_MODE__CMD_MASK;
+ data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
+ WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
- /* if RLC is not enabled, do nothing */
- rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
- if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
- return;
-
- if (adev->cg_flags &
- (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) {
- /*
- * Try to exit safe mode only if it is already in safe
- * mode.
- */
- data = RLC_SAFE_MODE__CMD_MASK;
- WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
- adev->gfx.rlc.in_safe_mode = false;
+ /* wait for RLC_SAFE_MODE */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
+ break;
+ udelay(1);
}
}
+static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev)
+{
+ uint32_t data;
+
+ data = RLC_SAFE_MODE__CMD_MASK;
+ WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
+}
+
static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
bool enable)
{
- gfx_v9_0_enter_rlc_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
gfx_v9_0_enable_gfx_cg_power_gating(adev, true);
@@ -3676,7 +3559,7 @@ static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
}
- gfx_v9_0_exit_rlc_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
}
static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev,
@@ -3703,6 +3586,8 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
{
uint32_t data, def;
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
+
/* It is disabled by HW by default */
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
/* 1 - RLC_CGTT_MGCG_OVERRIDE */
@@ -3767,6 +3652,8 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
}
}
+
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
}
static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
@@ -3774,7 +3661,7 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
{
uint32_t data, def;
- adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
/* Enable 3D CGCG/CGLS */
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
@@ -3814,7 +3701,7 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
}
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
}
static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
@@ -3822,7 +3709,7 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
{
uint32_t def, data;
- adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
@@ -3862,7 +3749,7 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
}
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
}
static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
@@ -3891,8 +3778,17 @@ static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
}
static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
- .enter_safe_mode = gfx_v9_0_enter_rlc_safe_mode,
- .exit_safe_mode = gfx_v9_0_exit_rlc_safe_mode
+ .is_rlc_enabled = gfx_v9_0_is_rlc_enabled,
+ .set_safe_mode = gfx_v9_0_set_safe_mode,
+ .unset_safe_mode = gfx_v9_0_unset_safe_mode,
+ .init = gfx_v9_0_rlc_init,
+ .get_csb_size = gfx_v9_0_get_csb_size,
+ .get_csb_buffer = gfx_v9_0_get_csb_buffer,
+ .get_cp_table_num = gfx_v9_0_cp_jump_table_num,
+ .resume = gfx_v9_0_rlc_resume,
+ .stop = gfx_v9_0_rlc_stop,
+ .reset = gfx_v9_0_rlc_reset,
+ .start = gfx_v9_0_rlc_start
};
static int gfx_v9_0_set_powergating_state(void *handle,
@@ -4073,9 +3969,11 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
}
static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib,
+ uint32_t flags)
{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
u32 header, control = 0;
if (ib->flags & AMDGPU_IB_FLAG_CE)
@@ -4104,20 +4002,38 @@ static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
}
static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
-{
- u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib,
+ uint32_t flags)
+{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+ u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
+
+ /* Currently, there is a high possibility to get wave ID mismatch
+ * between ME and GDS, leading to a hw deadlock, because ME generates
+ * different wave IDs than the GDS expects. This situation happens
+ * randomly when at least 5 compute pipes use GDS ordered append.
+ * The wave IDs generated by ME are also wrong after suspend/resume.
+ * Those are probably bugs somewhere else in the kernel driver.
+ *
+ * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
+ * GDS to 0 for this ring (me/pipe).
+ */
+ if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID);
+ amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
+ }
- amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+ amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
- amdgpu_ring_write(ring,
+ amdgpu_ring_write(ring,
#ifdef __BIG_ENDIAN
- (2 << 0) |
+ (2 << 0) |
#endif
- lower_32_bits(ib->gpu_addr));
- amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
- amdgpu_ring_write(ring, control);
+ lower_32_bits(ib->gpu_addr));
+ amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+ amdgpu_ring_write(ring, control);
}
static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
@@ -4696,12 +4612,39 @@ static int gfx_v9_0_eop_irq(struct amdgpu_device *adev,
return 0;
}
+static void gfx_v9_0_fault(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry)
+{
+ u8 me_id, pipe_id, queue_id;
+ struct amdgpu_ring *ring;
+ int i;
+
+ me_id = (entry->ring_id & 0x0c) >> 2;
+ pipe_id = (entry->ring_id & 0x03) >> 0;
+ queue_id = (entry->ring_id & 0x70) >> 4;
+
+ switch (me_id) {
+ case 0:
+ drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
+ break;
+ case 1:
+ case 2:
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ ring = &adev->gfx.compute_ring[i];
+ if (ring->me == me_id && ring->pipe == pipe_id &&
+ ring->queue == queue_id)
+ drm_sched_fault(&ring->sched);
+ }
+ break;
+ }
+}
+
static int gfx_v9_0_priv_reg_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
DRM_ERROR("Illegal register access in command stream\n");
- schedule_work(&adev->reset_work);
+ gfx_v9_0_fault(adev, entry);
return 0;
}
@@ -4710,7 +4653,7 @@ static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
DRM_ERROR("Illegal instruction in command stream\n");
- schedule_work(&adev->reset_work);
+ gfx_v9_0_fault(adev, entry);
return 0;
}
@@ -4801,7 +4744,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
2 + /* gfx_v9_0_ring_emit_vm_flush */
8 + 8 + 8, /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
- .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */
+ .emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
.emit_ib = gfx_v9_0_ring_emit_ib_compute,
.emit_fence = gfx_v9_0_ring_emit_fence,
.emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
@@ -4836,11 +4779,9 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
2 + /* gfx_v9_0_ring_emit_vm_flush */
8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
- .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */
- .emit_ib = gfx_v9_0_ring_emit_ib_compute,
+ .emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
.emit_fence = gfx_v9_0_ring_emit_fence_kiq,
.test_ring = gfx_v9_0_ring_test_ring,
- .test_ib = gfx_v9_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_rreg = gfx_v9_0_ring_emit_rreg,
@@ -4920,6 +4861,26 @@ static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
break;
}
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ case CHIP_VEGA20:
+ adev->gds.gds_compute_max_wave_id = 0x7ff;
+ break;
+ case CHIP_VEGA12:
+ adev->gds.gds_compute_max_wave_id = 0x27f;
+ break;
+ case CHIP_RAVEN:
+ if (adev->rev_id >= 0x8)
+ adev->gds.gds_compute_max_wave_id = 0x77; /* raven2 */
+ else
+ adev->gds.gds_compute_max_wave_id = 0x15f; /* raven1 */
+ break;
+ default:
+ /* this really depends on the chip */
+ adev->gds.gds_compute_max_wave_id = 0x7ff;
+ break;
+ }
+
adev->gds.gws.total_size = 64;
adev->gds.oa.total_size = 16;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index bfa317ad20a9..f5edddf3b29d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -35,20 +35,25 @@ u64 gfxhub_v1_0_get_mc_fb_offset(struct amdgpu_device *adev)
return (u64)RREG32_SOC15(GC, 0, mmMC_VM_FB_OFFSET) << 24;
}
-static void gfxhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev)
+void gfxhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+ uint64_t page_table_base)
{
- uint64_t value = amdgpu_gmc_pd_addr(adev->gart.bo);
+ /* two registers distance between mmVM_CONTEXT0_* to mmVM_CONTEXT1_* */
+ int offset = mmVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32
+ - mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
- WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
- lower_32_bits(value));
+ WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+ offset * vmid, lower_32_bits(page_table_base));
- WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
- upper_32_bits(value));
+ WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
+ offset * vmid, upper_32_bits(page_table_base));
}
static void gfxhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
{
- gfxhub_v1_0_init_gart_pt_regs(adev);
+ uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
+
+ gfxhub_v1_0_setup_vm_pt_regs(adev, 0, pt_base);
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
(u32)(adev->gmc.gart_start >> 12));
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
index 206e29cad753..92d3a70cd9b1 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
@@ -30,5 +30,7 @@ void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
bool value);
void gfxhub_v1_0_init(struct amdgpu_device *adev);
u64 gfxhub_v1_0_get_mc_fb_offset(struct amdgpu_device *adev);
+void gfxhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+ uint64_t page_table_base);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 73ad02aea2b2..9fc3296592fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -359,7 +359,8 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
return 0;
}
-static void gmc_v6_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid)
+static void gmc_v6_0_flush_gpu_tlb(struct amdgpu_device *adev,
+ uint32_t vmid, uint32_t flush_type)
{
WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
}
@@ -581,7 +582,7 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
else
gmc_v6_0_set_fault_enable_default(adev, true);
- gmc_v6_0_flush_gpu_tlb(adev, 0);
+ gmc_v6_0_flush_gpu_tlb(adev, 0, 0);
dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(adev->gmc.gart_size >> 20),
(unsigned long long)table_addr);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 910c4ce19cb3..761dcfb2fec0 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -430,7 +430,8 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
*
* Flush the TLB for the requested page table (CIK).
*/
-static void gmc_v7_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid)
+static void gmc_v7_0_flush_gpu_tlb(struct amdgpu_device *adev,
+ uint32_t vmid, uint32_t flush_type)
{
/* bits 0-15 are the VM contexts0-15 */
WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
@@ -698,7 +699,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
WREG32(mmCHUB_CONTROL, tmp);
}
- gmc_v7_0_flush_gpu_tlb(adev, 0);
+ gmc_v7_0_flush_gpu_tlb(adev, 0, 0);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(adev->gmc.gart_size >> 20),
(unsigned long long)table_addr);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 747c068379dc..34440672f938 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -633,7 +633,7 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
* Flush the TLB for the requested page table (CIK).
*/
static void gmc_v8_0_flush_gpu_tlb(struct amdgpu_device *adev,
- uint32_t vmid)
+ uint32_t vmid, uint32_t flush_type)
{
/* bits 0-15 are the VM contexts0-15 */
WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
@@ -942,7 +942,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
else
gmc_v8_0_set_fault_enable_default(adev, true);
- gmc_v8_0_flush_gpu_tlb(adev, 0);
+ gmc_v8_0_flush_gpu_tlb(adev, 0, 0);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(adev->gmc.gart_size >> 20),
(unsigned long long)table_addr);
@@ -1471,8 +1471,9 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
gmc_v8_0_set_fault_enable_default(adev, false);
if (printk_ratelimit()) {
- struct amdgpu_task_info task_info = { 0 };
+ struct amdgpu_task_info task_info;
+ memset(&task_info, 0, sizeof(struct amdgpu_task_info));
amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
dev_err(adev->dev, "GPU fault detected: %d 0x%08x for process %s pid %d thread %s pid %d\n",
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index f35d7a554ad5..2fe8397241ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -244,30 +244,93 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
return 0;
}
+/**
+ * vega10_ih_prescreen_iv - prescreen an interrupt vector
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Returns true if the interrupt vector should be further processed.
+ */
+static bool gmc_v9_0_prescreen_iv(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry,
+ uint64_t addr)
+{
+ struct amdgpu_vm *vm;
+ u64 key;
+ int r;
+
+ /* No PASID, can't identify faulting process */
+ if (!entry->pasid)
+ return true;
+
+ /* Not a retry fault */
+ if (!(entry->src_data[1] & 0x80))
+ return true;
+
+ /* Track retry faults in per-VM fault FIFO. */
+ spin_lock(&adev->vm_manager.pasid_lock);
+ vm = idr_find(&adev->vm_manager.pasid_idr, entry->pasid);
+ if (!vm) {
+ /* VM not found, process it normally */
+ spin_unlock(&adev->vm_manager.pasid_lock);
+ return true;
+ }
+
+ key = AMDGPU_VM_FAULT(entry->pasid, addr);
+ r = amdgpu_vm_add_fault(vm->fault_hash, key);
+
+ /* Hash table is full or the fault is already being processed,
+ * ignore further page faults
+ */
+ if (r != 0) {
+ spin_unlock(&adev->vm_manager.pasid_lock);
+ return false;
+ }
+ /* No locking required with single writer and single reader */
+ r = kfifo_put(&vm->faults, key);
+ if (!r) {
+ /* FIFO is full. Ignore it until there is space */
+ amdgpu_vm_clear_fault(vm->fault_hash, key);
+ spin_unlock(&adev->vm_manager.pasid_lock);
+ return false;
+ }
+
+ spin_unlock(&adev->vm_manager.pasid_lock);
+ /* It's the first fault for this address, process it normally */
+ return true;
+}
+
static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
+ bool retry_fault = !!(entry->src_data[1] & 0x80);
uint32_t status = 0;
u64 addr;
addr = (u64)entry->src_data[0] << 12;
addr |= ((u64)entry->src_data[1] & 0xf) << 44;
+ if (!gmc_v9_0_prescreen_iv(adev, entry, addr))
+ return 1; /* This also prevents sending it to KFD */
+
if (!amdgpu_sriov_vf(adev)) {
status = RREG32(hub->vm_l2_pro_fault_status);
WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
}
if (printk_ratelimit()) {
- struct amdgpu_task_info task_info = { 0 };
+ struct amdgpu_task_info task_info;
+ memset(&task_info, 0, sizeof(struct amdgpu_task_info));
amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
dev_err(adev->dev,
- "[%s] VMC page fault (src_id:%u ring:%u vmid:%u pasid:%u, for process %s pid %d thread %s pid %d)\n",
+ "[%s] %s page fault (src_id:%u ring:%u vmid:%u "
+ "pasid:%u, for process %s pid %d thread %s pid %d)\n",
entry->vmid_src ? "mmhub" : "gfxhub",
+ retry_fault ? "retry" : "no-retry",
entry->src_id, entry->ring_id, entry->vmid,
entry->pasid, task_info.process_name, task_info.tgid,
task_info.task_name, task_info.pid);
@@ -293,14 +356,14 @@ static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
}
-static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid)
+static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
+ uint32_t flush_type)
{
u32 req = 0;
- /* invalidate using legacy mode on vmid*/
req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
PER_VMID_INVALIDATE_REQ, 1 << vmid);
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
+ req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
@@ -312,48 +375,6 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid)
return req;
}
-static signed long amdgpu_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
- uint32_t reg0, uint32_t reg1,
- uint32_t ref, uint32_t mask)
-{
- signed long r, cnt = 0;
- unsigned long flags;
- uint32_t seq;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
- struct amdgpu_ring *ring = &kiq->ring;
-
- spin_lock_irqsave(&kiq->ring_lock, flags);
-
- amdgpu_ring_alloc(ring, 32);
- amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
- ref, mask);
- amdgpu_fence_emit_polling(ring, &seq);
- amdgpu_ring_commit(ring);
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
- r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
-
- /* don't wait anymore for IRQ context */
- if (r < 1 && in_interrupt())
- goto failed_kiq;
-
- might_sleep();
-
- while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
- msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
- r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
- }
-
- if (cnt > MAX_KIQ_REG_TRY)
- goto failed_kiq;
-
- return 0;
-
-failed_kiq:
- pr_err("failed to invalidate tlb with kiq\n");
- return r;
-}
-
/*
* GART
* VMID 0 is the physical GPU addresses as used by the kernel.
@@ -362,64 +383,50 @@ failed_kiq:
*/
/**
- * gmc_v9_0_flush_gpu_tlb - gart tlb flush callback
+ * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type
*
* @adev: amdgpu_device pointer
* @vmid: vm instance to flush
+ * @flush_type: the flush type
*
- * Flush the TLB for the requested page table.
+ * Flush the TLB for the requested page table using certain type.
*/
static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev,
- uint32_t vmid)
+ uint32_t vmid, uint32_t flush_type)
{
- /* Use register 17 for GART */
const unsigned eng = 17;
unsigned i, j;
- int r;
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
struct amdgpu_vmhub *hub = &adev->vmhub[i];
- u32 tmp = gmc_v9_0_get_invalidate_req(vmid);
+ u32 tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type);
- if (adev->gfx.kiq.ring.ready &&
+ /* This is necessary for a HW workaround under SRIOV as well
+ * as GFXOFF under bare metal
+ */
+ if (adev->gfx.kiq.ring.sched.ready &&
(amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
!adev->in_gpu_reset) {
- r = amdgpu_kiq_reg_write_reg_wait(adev, hub->vm_inv_eng0_req + eng,
- hub->vm_inv_eng0_ack + eng, tmp, 1 << vmid);
- if (!r)
- continue;
- }
+ uint32_t req = hub->vm_inv_eng0_req + eng;
+ uint32_t ack = hub->vm_inv_eng0_ack + eng;
- spin_lock(&adev->gmc.invalidate_lock);
-
- WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
-
- /* Busy wait for ACK.*/
- for (j = 0; j < 100; j++) {
- tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
- tmp &= 1 << vmid;
- if (tmp)
- break;
- cpu_relax();
- }
- if (j < 100) {
- spin_unlock(&adev->gmc.invalidate_lock);
+ amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, tmp,
+ 1 << vmid);
continue;
}
- /* Wait for ACK with a delay.*/
+ spin_lock(&adev->gmc.invalidate_lock);
+ WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
for (j = 0; j < adev->usec_timeout; j++) {
tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
- tmp &= 1 << vmid;
- if (tmp)
+ if (tmp & (1 << vmid))
break;
udelay(1);
}
- if (j < adev->usec_timeout) {
- spin_unlock(&adev->gmc.invalidate_lock);
- continue;
- }
spin_unlock(&adev->gmc.invalidate_lock);
+ if (j < adev->usec_timeout)
+ continue;
+
DRM_ERROR("Timeout waiting for VM flush ACK!\n");
}
}
@@ -429,7 +436,7 @@ static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
- uint32_t req = gmc_v9_0_get_invalidate_req(vmid);
+ uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
unsigned eng = ring->vm_inv_eng;
amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
@@ -715,38 +722,46 @@ static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev)
}
}
+static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] =
+ {GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP};
+ unsigned i;
+ unsigned vmhub, inv_eng;
+
+ for (i = 0; i < adev->num_rings; ++i) {
+ ring = adev->rings[i];
+ vmhub = ring->funcs->vmhub;
+
+ inv_eng = ffs(vm_inv_engs[vmhub]);
+ if (!inv_eng) {
+ dev_err(adev->dev, "no VM inv eng for ring %s\n",
+ ring->name);
+ return -EINVAL;
+ }
+
+ ring->vm_inv_eng = inv_eng - 1;
+ vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng);
+
+ dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
+ ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
+ }
+
+ return 0;
+}
+
static int gmc_v9_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- /*
- * The latest engine allocation on gfx9 is:
- * Engine 0, 1: idle
- * Engine 2, 3: firmware
- * Engine 4~13: amdgpu ring, subject to change when ring number changes
- * Engine 14~15: idle
- * Engine 16: kfd tlb invalidation
- * Engine 17: Gart flushes
- */
- unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 };
- unsigned i;
int r;
if (!gmc_v9_0_keep_stolen_memory(adev))
amdgpu_bo_late_init(adev);
- for(i = 0; i < adev->num_rings; ++i) {
- struct amdgpu_ring *ring = adev->rings[i];
- unsigned vmhub = ring->funcs->vmhub;
-
- ring->vm_inv_eng = vm_inv_eng[vmhub]++;
- dev_info(adev->dev, "ring %u(%s) uses VM inv eng %u on hub %u\n",
- ring->idx, ring->name, ring->vm_inv_eng,
- ring->funcs->vmhub);
- }
-
- /* Engine 16 is used for KFD and 17 for GART flushes */
- for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
- BUG_ON(vm_inv_eng[i] > 16);
+ r = gmc_v9_0_allocate_vm_inv_eng(adev);
+ if (r)
+ return r;
if (adev->asic_type == CHIP_VEGA10 && !amdgpu_sriov_vf(adev)) {
r = gmc_v9_0_ecc_available(adev);
@@ -950,7 +965,11 @@ static int gmc_v9_0_sw_init(void *handle)
* vm size is 256TB (48bit), maximum size of Vega10,
* block size 512 (9bit)
*/
- amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
+ /* sriov restrict max_pfn below AMDGPU_GMC_HOLE */
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47);
+ else
+ amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
break;
default:
break;
@@ -959,6 +978,9 @@ static int gmc_v9_0_sw_init(void *handle)
/* This interrupt is VMC page fault.*/
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
&adev->gmc.vm_fault);
+ if (r)
+ return r;
+
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
&adev->gmc.vm_fault);
@@ -991,7 +1013,7 @@ static int gmc_v9_0_sw_init(void *handle)
}
adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
- if (adev->asic_type == CHIP_VEGA20) {
+ if (adev->gmc.xgmi.supported) {
r = gfxhub_v1_1_get_xgmi_info(adev);
if (r)
return r;
@@ -1122,7 +1144,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
gfxhub_v1_0_set_fault_enable_default(adev, value);
mmhub_v1_0_set_fault_enable_default(adev, value);
- gmc_v9_0_flush_gpu_tlb(adev, 0);
+ gmc_v9_0_flush_gpu_tlb(adev, 0, 0);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(adev->gmc.gart_size >> 20),
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h
index b030ca5ea107..5c8deac65580 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h
@@ -24,6 +24,16 @@
#ifndef __GMC_V9_0_H__
#define __GMC_V9_0_H__
+ /*
+ * The latest engine allocation on gfx9 is:
+ * Engine 2, 3: firmware
+ * Engine 0, 1, 4~16: amdgpu ring,
+ * subject to change when ring number changes
+ * Engine 17: Gart flushes
+ */
+#define GFXHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3
+#define MMHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3
+
extern const struct amd_ip_funcs gmc_v9_0_ip_funcs;
extern const struct amdgpu_ip_block_version gmc_v9_0_ip_block;
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index cf0fc61aebe6..b1626e1d2f5d 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -103,9 +103,9 @@ static void iceland_ih_disable_interrupts(struct amdgpu_device *adev)
*/
static int iceland_ih_irq_init(struct amdgpu_device *adev)
{
+ struct amdgpu_ih_ring *ih = &adev->irq.ih;
int rb_bufsz;
u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
- u64 wptr_off;
/* disable irqs */
iceland_ih_disable_interrupts(adev);
@@ -133,9 +133,8 @@ static int iceland_ih_irq_init(struct amdgpu_device *adev)
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_WRITEBACK_ENABLE, 1);
/* set the writeback address whether it's enabled or not */
- wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4);
- WREG32(mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off));
- WREG32(mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF);
+ WREG32(mmIH_RB_WPTR_ADDR_LO, lower_32_bits(ih->wptr_addr));
+ WREG32(mmIH_RB_WPTR_ADDR_HI, upper_32_bits(ih->wptr_addr) & 0xFF);
WREG32(mmIH_RB_CNTL, ih_rb_cntl);
@@ -185,11 +184,12 @@ static void iceland_ih_irq_disable(struct amdgpu_device *adev)
* Used by cz_irq_process(VI).
* Returns the value of the wptr.
*/
-static u32 iceland_ih_get_wptr(struct amdgpu_device *adev)
+static u32 iceland_ih_get_wptr(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
{
u32 wptr, tmp;
- wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]);
+ wptr = le32_to_cpu(*ih->wptr_cpu);
if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) {
wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
@@ -198,41 +198,13 @@ static u32 iceland_ih_get_wptr(struct amdgpu_device *adev)
* this should allow us to catchup.
*/
dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
- wptr, adev->irq.ih.rptr, (wptr + 16) & adev->irq.ih.ptr_mask);
- adev->irq.ih.rptr = (wptr + 16) & adev->irq.ih.ptr_mask;
+ wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
+ ih->rptr = (wptr + 16) & ih->ptr_mask;
tmp = RREG32(mmIH_RB_CNTL);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32(mmIH_RB_CNTL, tmp);
}
- return (wptr & adev->irq.ih.ptr_mask);
-}
-
-/**
- * iceland_ih_prescreen_iv - prescreen an interrupt vector
- *
- * @adev: amdgpu_device pointer
- *
- * Returns true if the interrupt vector should be further processed.
- */
-static bool iceland_ih_prescreen_iv(struct amdgpu_device *adev)
-{
- u32 ring_index = adev->irq.ih.rptr >> 2;
- u16 pasid;
-
- switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
- case 146:
- case 147:
- pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
- if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
- return true;
- break;
- default:
- /* Not a VM fault */
- return true;
- }
-
- adev->irq.ih.rptr += 16;
- return false;
+ return (wptr & ih->ptr_mask);
}
/**
@@ -244,16 +216,17 @@ static bool iceland_ih_prescreen_iv(struct amdgpu_device *adev)
* position and also advance the position.
*/
static void iceland_ih_decode_iv(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih,
struct amdgpu_iv_entry *entry)
{
/* wptr/rptr are in bytes! */
- u32 ring_index = adev->irq.ih.rptr >> 2;
+ u32 ring_index = ih->rptr >> 2;
uint32_t dw[4];
- dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
- dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]);
- dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
- dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
+ dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
+ dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
+ dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
+ dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
entry->src_id = dw[0] & 0xff;
@@ -263,7 +236,7 @@ static void iceland_ih_decode_iv(struct amdgpu_device *adev,
entry->pasid = (dw[2] >> 16) & 0xffff;
/* wptr/rptr are in bytes! */
- adev->irq.ih.rptr += 16;
+ ih->rptr += 16;
}
/**
@@ -273,9 +246,10 @@ static void iceland_ih_decode_iv(struct amdgpu_device *adev,
*
* Set the IH ring buffer rptr.
*/
-static void iceland_ih_set_rptr(struct amdgpu_device *adev)
+static void iceland_ih_set_rptr(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
{
- WREG32(mmIH_RB_RPTR, adev->irq.ih.rptr);
+ WREG32(mmIH_RB_RPTR, ih->rptr);
}
static int iceland_ih_early_init(void *handle)
@@ -440,7 +414,6 @@ static const struct amd_ip_funcs iceland_ih_ip_funcs = {
static const struct amdgpu_ih_funcs iceland_ih_funcs = {
.get_wptr = iceland_ih_get_wptr,
- .prescreen_iv = iceland_ih_prescreen_iv,
.decode_iv = iceland_ih_decode_iv,
.set_rptr = iceland_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index d0e478f43443..0c9a2c03504e 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -508,19 +508,19 @@ static int kv_enable_didt(struct amdgpu_device *adev, bool enable)
pi->caps_db_ramping ||
pi->caps_td_ramping ||
pi->caps_tcp_ramping) {
- adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
if (enable) {
ret = kv_program_pt_config_registers(adev, didt_config_kv);
if (ret) {
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
return ret;
}
}
kv_do_enable_didt(adev, enable);
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index a0db67adc34c..1696644ec022 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -52,20 +52,25 @@ u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev)
return base;
}
-static void mmhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev)
+void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+ uint64_t page_table_base)
{
- uint64_t value = amdgpu_gmc_pd_addr(adev->gart.bo);
+ /* two registers distance between mmVM_CONTEXT0_* to mmVM_CONTEXT1_* */
+ int offset = mmVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32
+ - mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
- WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
- lower_32_bits(value));
+ WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+ offset * vmid, lower_32_bits(page_table_base));
- WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
- upper_32_bits(value));
+ WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
+ offset * vmid, upper_32_bits(page_table_base));
}
static void mmhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
{
- mmhub_v1_0_init_gart_pt_regs(adev);
+ uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
+
+ mmhub_v1_0_setup_vm_pt_regs(adev, 0, pt_base);
WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
(u32)(adev->gmc.gart_start >> 12));
@@ -177,6 +182,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
}
+ WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, tmp);
tmp = mmVM_L2_CNTL4_DEFAULT;
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
index bef3d0c0c117..0de0fdf98c00 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
@@ -34,5 +34,7 @@ int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags);
void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
bool enable);
+void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+ uint64_t page_table_base);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 8cbb4655896a..73851ebb3833 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -174,7 +174,7 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
return r;
}
/* Retrieve checksum from mailbox2 */
- if (req == IDH_REQ_GPU_INIT_ACCESS) {
+ if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
adev->virt.fw_reserve.checksum_key =
RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2));
@@ -266,7 +266,8 @@ flr_done:
}
/* Trigger recovery for world switch failure if no TDR */
- if (amdgpu_device_should_recover_gpu(adev))
+ if (amdgpu_device_should_recover_gpu(adev)
+ && amdgpu_lockup_timeout == MAX_SCHEDULE_TIMEOUT)
amdgpu_device_gpu_recover(adev, NULL);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
index 64e875d528dd..6a0fcd67662a 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
@@ -37,7 +37,6 @@
#include "gmc/gmc_8_2_sh_mask.h"
#include "oss/oss_3_0_d.h"
#include "oss/oss_3_0_sh_mask.h"
-#include "gca/gfx_8_0_sh_mask.h"
#include "dce/dce_10_0_d.h"
#include "dce/dce_10_0_sh_mask.h"
#include "smu/smu_7_1_3_d.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
index 6f9c54978cc1..cc967dbfd631 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
@@ -27,12 +27,9 @@
#include "nbio/nbio_6_1_default.h"
#include "nbio/nbio_6_1_offset.h"
#include "nbio/nbio_6_1_sh_mask.h"
+#include "nbio/nbio_6_1_smn.h"
#include "vega10_enum.h"
-#define smnCPM_CONTROL 0x11180460
-#define smnPCIE_CNTL2 0x11180070
-#define smnPCIE_CONFIG_CNTL 0x11180044
-
static u32 nbio_v6_1_get_rev_id(struct amdgpu_device *adev)
{
u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
@@ -71,7 +68,7 @@ static u32 nbio_v6_1_get_memsize(struct amdgpu_device *adev)
}
static void nbio_v6_1_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
- bool use_doorbell, int doorbell_index)
+ bool use_doorbell, int doorbell_index, int doorbell_size)
{
u32 reg = instance == 0 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE) :
SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE);
@@ -80,7 +77,7 @@ static void nbio_v6_1_sdma_doorbell_range(struct amdgpu_device *adev, int instan
if (use_doorbell) {
doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index);
- doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 2);
+ doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, doorbell_size);
} else
doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0);
@@ -270,6 +267,12 @@ static void nbio_v6_1_init_registers(struct amdgpu_device *adev)
if (def != data)
WREG32_PCIE(smnPCIE_CONFIG_CNTL, data);
+
+ def = data = RREG32_PCIE(smnPCIE_CI_CNTL);
+ data = REG_SET_FIELD(data, PCIE_CI_CNTL, CI_SLV_ORDERING_DIS, 1);
+
+ if (def != data)
+ WREG32_PCIE(smnPCIE_CI_CNTL, data);
}
const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
index df34dc79d444..1cdb98ad2db3 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
@@ -27,13 +27,11 @@
#include "nbio/nbio_7_0_default.h"
#include "nbio/nbio_7_0_offset.h"
#include "nbio/nbio_7_0_sh_mask.h"
+#include "nbio/nbio_7_0_smn.h"
#include "vega10_enum.h"
#define smnNBIF_MGCG_CTRL_LCLK 0x1013a05c
-#define smnCPM_CONTROL 0x11180460
-#define smnPCIE_CNTL2 0x11180070
-
static u32 nbio_v7_0_get_rev_id(struct amdgpu_device *adev)
{
u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
@@ -69,7 +67,7 @@ static u32 nbio_v7_0_get_memsize(struct amdgpu_device *adev)
}
static void nbio_v7_0_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
- bool use_doorbell, int doorbell_index)
+ bool use_doorbell, int doorbell_index, int doorbell_size)
{
u32 reg = instance == 0 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE) :
SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE);
@@ -78,7 +76,7 @@ static void nbio_v7_0_sdma_doorbell_range(struct amdgpu_device *adev, int instan
if (use_doorbell) {
doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index);
- doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 2);
+ doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, doorbell_size);
} else
doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index f8cee95d61cc..c69d51598cfe 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -26,15 +26,13 @@
#include "nbio/nbio_7_4_offset.h"
#include "nbio/nbio_7_4_sh_mask.h"
+#include "nbio/nbio_7_4_0_smn.h"
#define smnNBIF_MGCG_CTRL_LCLK 0x1013a21c
-#define smnCPM_CONTROL 0x11180460
-#define smnPCIE_CNTL2 0x11180070
-
static u32 nbio_v7_4_get_rev_id(struct amdgpu_device *adev)
{
- u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
+ u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
tmp &= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK;
tmp >>= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT;
@@ -67,7 +65,7 @@ static u32 nbio_v7_4_get_memsize(struct amdgpu_device *adev)
}
static void nbio_v7_4_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
- bool use_doorbell, int doorbell_index)
+ bool use_doorbell, int doorbell_index, int doorbell_size)
{
u32 reg = instance == 0 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE) :
SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE);
@@ -76,7 +74,7 @@ static void nbio_v7_4_sdma_doorbell_range(struct amdgpu_device *adev, int instan
if (use_doorbell) {
doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index);
- doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 2);
+ doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, doorbell_size);
} else
doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0);
@@ -92,7 +90,20 @@ static void nbio_v7_4_enable_doorbell_aperture(struct amdgpu_device *adev,
static void nbio_v7_4_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
bool enable)
{
+ u32 tmp = 0;
+
+ if (enable) {
+ tmp = REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_EN, 1) |
+ REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_MODE, 1) |
+ REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_SIZE, 0);
+
+ WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_LOW,
+ lower_32_bits(adev->doorbell.base));
+ WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_HIGH,
+ upper_32_bits(adev->doorbell.base));
+ }
+ WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_CNTL, tmp);
}
static void nbio_v7_4_ih_doorbell_range(struct amdgpu_device *adev,
@@ -222,7 +233,13 @@ static void nbio_v7_4_detect_hw_virt(struct amdgpu_device *adev)
static void nbio_v7_4_init_registers(struct amdgpu_device *adev)
{
+ uint32_t def, data;
+
+ def = data = RREG32_PCIE(smnPCIE_CI_CNTL);
+ data = REG_SET_FIELD(data, PCIE_CI_CNTL, CI_SLV_ORDERING_DIS, 1);
+ if (def != data)
+ WREG32_PCIE(smnPCIE_CI_CNTL, data);
}
const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
index 882bd83a28c4..f3a7d207af07 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
@@ -43,6 +43,8 @@ enum psp_gfx_crtl_cmd_id
GFX_CTRL_CMD_ID_ENABLE_INT = 0x00050000, /* enable PSP-to-Gfx interrupt */
GFX_CTRL_CMD_ID_DISABLE_INT = 0x00060000, /* disable PSP-to-Gfx interrupt */
GFX_CTRL_CMD_ID_MODE1_RST = 0x00070000, /* trigger the Mode 1 reset */
+ GFX_CTRL_CMD_ID_CONSUME_CMD = 0x000A0000, /* send interrupt to psp for updating write pointer of vf */
+ GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING = 0x000C0000, /* destroy GPCOM ring */
GFX_CTRL_CMD_ID_MAX = 0x000F0000, /* max command ID */
};
@@ -89,7 +91,8 @@ enum psp_gfx_cmd_id
GFX_CMD_ID_LOAD_IP_FW = 0x00000006, /* load HW IP FW */
GFX_CMD_ID_DESTROY_TMR = 0x00000007, /* destroy TMR region */
GFX_CMD_ID_SAVE_RESTORE = 0x00000008, /* save/restore HW IP FW */
-
+ GFX_CMD_ID_SETUP_VMR = 0x00000009, /* setup VMR region */
+ GFX_CMD_ID_DESTROY_VMR = 0x0000000A, /* destroy VMR region */
};
@@ -188,7 +191,7 @@ enum psp_gfx_fw_type
GFX_FW_TYPE_MMSCH = 19,
GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM = 20,
GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM = 21,
- GFX_FW_TYPE_RLC_RESTORE_LIST_CNTL = 22,
+ GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL = 22,
GFX_FW_TYPE_UVD1 = 23,
GFX_FW_TYPE_MAX = 24
};
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index 295c2205485a..77c2bc344dfc 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -38,75 +38,6 @@ MODULE_FIRMWARE("amdgpu/raven_asd.bin");
MODULE_FIRMWARE("amdgpu/picasso_asd.bin");
MODULE_FIRMWARE("amdgpu/raven2_asd.bin");
-static int
-psp_v10_0_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *type)
-{
- switch(ucode->ucode_id) {
- case AMDGPU_UCODE_ID_SDMA0:
- *type = GFX_FW_TYPE_SDMA0;
- break;
- case AMDGPU_UCODE_ID_SDMA1:
- *type = GFX_FW_TYPE_SDMA1;
- break;
- case AMDGPU_UCODE_ID_CP_CE:
- *type = GFX_FW_TYPE_CP_CE;
- break;
- case AMDGPU_UCODE_ID_CP_PFP:
- *type = GFX_FW_TYPE_CP_PFP;
- break;
- case AMDGPU_UCODE_ID_CP_ME:
- *type = GFX_FW_TYPE_CP_ME;
- break;
- case AMDGPU_UCODE_ID_CP_MEC1:
- *type = GFX_FW_TYPE_CP_MEC;
- break;
- case AMDGPU_UCODE_ID_CP_MEC1_JT:
- *type = GFX_FW_TYPE_CP_MEC_ME1;
- break;
- case AMDGPU_UCODE_ID_CP_MEC2:
- *type = GFX_FW_TYPE_CP_MEC;
- break;
- case AMDGPU_UCODE_ID_CP_MEC2_JT:
- *type = GFX_FW_TYPE_CP_MEC_ME2;
- break;
- case AMDGPU_UCODE_ID_RLC_G:
- *type = GFX_FW_TYPE_RLC_G;
- break;
- case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
- *type = GFX_FW_TYPE_RLC_RESTORE_LIST_CNTL;
- break;
- case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
- *type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM;
- break;
- case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
- *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM;
- break;
- case AMDGPU_UCODE_ID_SMC:
- *type = GFX_FW_TYPE_SMU;
- break;
- case AMDGPU_UCODE_ID_UVD:
- *type = GFX_FW_TYPE_UVD;
- break;
- case AMDGPU_UCODE_ID_VCE:
- *type = GFX_FW_TYPE_VCE;
- break;
- case AMDGPU_UCODE_ID_VCN:
- *type = GFX_FW_TYPE_VCN;
- break;
- case AMDGPU_UCODE_ID_DMCU_ERAM:
- *type = GFX_FW_TYPE_DMCU_ERAM;
- break;
- case AMDGPU_UCODE_ID_DMCU_INTV:
- *type = GFX_FW_TYPE_DMCU_ISR;
- break;
- case AMDGPU_UCODE_ID_MAXIMUM:
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
static int psp_v10_0_init_microcode(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -158,26 +89,6 @@ out:
return err;
}
-static int psp_v10_0_prep_cmd_buf(struct amdgpu_firmware_info *ucode,
- struct psp_gfx_cmd_resp *cmd)
-{
- int ret;
- uint64_t fw_mem_mc_addr = ucode->mc_addr;
-
- memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
-
- cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
- cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr);
- cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr);
- cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size;
-
- ret = psp_v10_0_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type);
- if (ret)
- DRM_ERROR("Unknown firmware type\n");
-
- return ret;
-}
-
static int psp_v10_0_ring_init(struct psp_context *psp,
enum psp_ring_type ring_type)
{
@@ -240,12 +151,9 @@ static int psp_v10_0_ring_stop(struct psp_context *psp,
enum psp_ring_type ring_type)
{
int ret = 0;
- struct psp_ring *ring;
unsigned int psp_ring_reg = 0;
struct amdgpu_device *adev = psp->adev;
- ring = &psp->km_ring;
-
/* Write the ring destroy command to C2PMSG_64 */
psp_ring_reg = 3 << 16;
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
@@ -457,7 +365,6 @@ static int psp_v10_0_mode1_reset(struct psp_context *psp)
static const struct psp_funcs psp_v10_0_funcs = {
.init_microcode = psp_v10_0_init_microcode,
- .prep_cmd_buf = psp_v10_0_prep_cmd_buf,
.ring_init = psp_v10_0_ring_init,
.ring_create = psp_v10_0_ring_create,
.ring_stop = psp_v10_0_ring_stop,
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 3f3fac2d50cd..860b70d80d3c 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -34,71 +34,21 @@
#include "nbio/nbio_7_4_offset.h"
MODULE_FIRMWARE("amdgpu/vega20_sos.bin");
+MODULE_FIRMWARE("amdgpu/vega20_asd.bin");
+MODULE_FIRMWARE("amdgpu/vega20_ta.bin");
/* address block */
#define smnMP1_FIRMWARE_FLAGS 0x3010024
-static int
-psp_v11_0_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *type)
-{
- switch (ucode->ucode_id) {
- case AMDGPU_UCODE_ID_SDMA0:
- *type = GFX_FW_TYPE_SDMA0;
- break;
- case AMDGPU_UCODE_ID_SDMA1:
- *type = GFX_FW_TYPE_SDMA1;
- break;
- case AMDGPU_UCODE_ID_CP_CE:
- *type = GFX_FW_TYPE_CP_CE;
- break;
- case AMDGPU_UCODE_ID_CP_PFP:
- *type = GFX_FW_TYPE_CP_PFP;
- break;
- case AMDGPU_UCODE_ID_CP_ME:
- *type = GFX_FW_TYPE_CP_ME;
- break;
- case AMDGPU_UCODE_ID_CP_MEC1:
- *type = GFX_FW_TYPE_CP_MEC;
- break;
- case AMDGPU_UCODE_ID_CP_MEC1_JT:
- *type = GFX_FW_TYPE_CP_MEC_ME1;
- break;
- case AMDGPU_UCODE_ID_CP_MEC2:
- *type = GFX_FW_TYPE_CP_MEC;
- break;
- case AMDGPU_UCODE_ID_CP_MEC2_JT:
- *type = GFX_FW_TYPE_CP_MEC_ME2;
- break;
- case AMDGPU_UCODE_ID_RLC_G:
- *type = GFX_FW_TYPE_RLC_G;
- break;
- case AMDGPU_UCODE_ID_SMC:
- *type = GFX_FW_TYPE_SMU;
- break;
- case AMDGPU_UCODE_ID_UVD:
- *type = GFX_FW_TYPE_UVD;
- break;
- case AMDGPU_UCODE_ID_VCE:
- *type = GFX_FW_TYPE_VCE;
- break;
- case AMDGPU_UCODE_ID_UVD1:
- *type = GFX_FW_TYPE_UVD1;
- break;
- case AMDGPU_UCODE_ID_MAXIMUM:
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
static int psp_v11_0_init_microcode(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
const char *chip_name;
char fw_name[30];
int err = 0;
- const struct psp_firmware_header_v1_0 *hdr;
+ const struct psp_firmware_header_v1_0 *sos_hdr;
+ const struct psp_firmware_header_v1_0 *asd_hdr;
+ const struct ta_firmware_header_v1_0 *ta_hdr;
DRM_DEBUG("\n");
@@ -119,26 +69,66 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
if (err)
goto out;
- hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
- adev->psp.sos_fw_version = le32_to_cpu(hdr->header.ucode_version);
- adev->psp.sos_feature_version = le32_to_cpu(hdr->ucode_feature_version);
- adev->psp.sos_bin_size = le32_to_cpu(hdr->sos_size_bytes);
- adev->psp.sys_bin_size = le32_to_cpu(hdr->header.ucode_size_bytes) -
- le32_to_cpu(hdr->sos_size_bytes);
- adev->psp.sys_start_addr = (uint8_t *)hdr +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes);
+ sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
+ adev->psp.sos_fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
+ adev->psp.sos_feature_version = le32_to_cpu(sos_hdr->ucode_feature_version);
+ adev->psp.sos_bin_size = le32_to_cpu(sos_hdr->sos_size_bytes);
+ adev->psp.sys_bin_size = le32_to_cpu(sos_hdr->header.ucode_size_bytes) -
+ le32_to_cpu(sos_hdr->sos_size_bytes);
+ adev->psp.sys_start_addr = (uint8_t *)sos_hdr +
+ le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr +
- le32_to_cpu(hdr->sos_offset_bytes);
- return 0;
-out:
+ le32_to_cpu(sos_hdr->sos_offset_bytes);
+
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
+ err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev);
+ if (err)
+ goto out1;
+
+ err = amdgpu_ucode_validate(adev->psp.asd_fw);
+ if (err)
+ goto out1;
+
+ asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
+ adev->psp.asd_fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
+ adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->ucode_feature_version);
+ adev->psp.asd_ucode_size = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
+ adev->psp.asd_start_addr = (uint8_t *)asd_hdr +
+ le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
+
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
+ err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
if (err) {
- dev_err(adev->dev,
- "psp v11.0: Failed to load firmware \"%s\"\n",
- fw_name);
- release_firmware(adev->psp.sos_fw);
- adev->psp.sos_fw = NULL;
+ release_firmware(adev->psp.ta_fw);
+ adev->psp.ta_fw = NULL;
+ dev_info(adev->dev,
+ "psp v11.0: Failed to load firmware \"%s\"\n", fw_name);
+ } else {
+ err = amdgpu_ucode_validate(adev->psp.ta_fw);
+ if (err)
+ goto out2;
+
+ ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data;
+ adev->psp.ta_xgmi_ucode_version = le32_to_cpu(ta_hdr->ta_xgmi_ucode_version);
+ adev->psp.ta_xgmi_ucode_size = le32_to_cpu(ta_hdr->ta_xgmi_size_bytes);
+ adev->psp.ta_xgmi_start_addr = (uint8_t *)ta_hdr +
+ le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
}
+ return 0;
+
+out2:
+ release_firmware(adev->psp.ta_fw);
+ adev->psp.ta_fw = NULL;
+out1:
+ release_firmware(adev->psp.asd_fw);
+ adev->psp.asd_fw = NULL;
+out:
+ dev_err(adev->dev,
+ "psp v11.0: Failed to load firmware \"%s\"\n", fw_name);
+ release_firmware(adev->psp.sos_fw);
+ adev->psp.sos_fw = NULL;
+
return err;
}
@@ -153,8 +143,11 @@ static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp)
* are already been loaded.
*/
sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
- if (sol_reg)
+ if (sol_reg) {
+ psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
+ printk("sos fw version = 0x%x.\n", psp->sos_fw_version);
return 0;
+ }
/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
@@ -167,7 +160,7 @@ static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp)
/* Copy PSP System Driver binary to memory */
memcpy(psp->fw_pri_buf, psp->sys_start_addr, psp->sys_bin_size);
- /* Provide the sys driver to bootrom */
+ /* Provide the sys driver to bootloader */
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
(uint32_t)(psp->fw_pri_mc_addr >> 20));
psp_gfxdrv_command_reg = 1 << 16;
@@ -208,7 +201,7 @@ static int psp_v11_0_bootloader_load_sos(struct psp_context *psp)
/* Copy Secure OS binary to PSP memory */
memcpy(psp->fw_pri_buf, psp->sos_start_addr, psp->sos_bin_size);
- /* Provide the PSP secure OS to bootrom */
+ /* Provide the PSP secure OS to bootloader */
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
(uint32_t)(psp->fw_pri_mc_addr >> 20));
psp_gfxdrv_command_reg = 2 << 16;
@@ -224,26 +217,6 @@ static int psp_v11_0_bootloader_load_sos(struct psp_context *psp)
return ret;
}
-static int psp_v11_0_prep_cmd_buf(struct amdgpu_firmware_info *ucode,
- struct psp_gfx_cmd_resp *cmd)
-{
- int ret;
- uint64_t fw_mem_mc_addr = ucode->mc_addr;
-
- memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
-
- cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
- cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr);
- cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr);
- cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size;
-
- ret = psp_v11_0_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type);
- if (ret)
- DRM_ERROR("Unknown firmware type\n");
-
- return ret;
-}
-
static int psp_v11_0_ring_init(struct psp_context *psp,
enum psp_ring_type ring_type)
{
@@ -270,6 +243,13 @@ static int psp_v11_0_ring_init(struct psp_context *psp,
return 0;
}
+static bool psp_v11_0_support_vmr_ring(struct psp_context *psp)
+{
+ if (amdgpu_sriov_vf(psp->adev) && psp->sos_fw_version > 0x80045)
+ return true;
+ return false;
+}
+
static int psp_v11_0_ring_create(struct psp_context *psp,
enum psp_ring_type ring_type)
{
@@ -278,26 +258,47 @@ static int psp_v11_0_ring_create(struct psp_context *psp,
struct psp_ring *ring = &psp->km_ring;
struct amdgpu_device *adev = psp->adev;
- /* Write low address of the ring to C2PMSG_69 */
- psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg);
- /* Write high address of the ring to C2PMSG_70 */
- psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, psp_ring_reg);
- /* Write size of ring to C2PMSG_71 */
- psp_ring_reg = ring->ring_size;
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_71, psp_ring_reg);
- /* Write the ring initialization command to C2PMSG_64 */
- psp_ring_reg = ring_type;
- psp_ring_reg = psp_ring_reg << 16;
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
-
- /* there might be handshake issue with hardware which needs delay */
- mdelay(20);
-
- /* Wait for response flag (bit 31) in C2PMSG_64 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
+ if (psp_v11_0_support_vmr_ring(psp)) {
+ /* Write low address of the ring to C2PMSG_102 */
+ psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_ring_reg);
+ /* Write high address of the ring to C2PMSG_103 */
+ psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_103, psp_ring_reg);
+
+ /* Write the ring initialization command to C2PMSG_101 */
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
+ GFX_CTRL_CMD_ID_INIT_GPCOM_RING);
+
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
+
+ /* Wait for response flag (bit 31) in C2PMSG_101 */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ 0x80000000, 0x8000FFFF, false);
+
+ } else {
+ /* Write low address of the ring to C2PMSG_69 */
+ psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg);
+ /* Write high address of the ring to C2PMSG_70 */
+ psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, psp_ring_reg);
+ /* Write size of ring to C2PMSG_71 */
+ psp_ring_reg = ring->ring_size;
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_71, psp_ring_reg);
+ /* Write the ring initialization command to C2PMSG_64 */
+ psp_ring_reg = ring_type;
+ psp_ring_reg = psp_ring_reg << 16;
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
+
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
+
+ /* Wait for response flag (bit 31) in C2PMSG_64 */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x8000FFFF, false);
+ }
return ret;
}
@@ -308,15 +309,24 @@ static int psp_v11_0_ring_stop(struct psp_context *psp,
int ret = 0;
struct amdgpu_device *adev = psp->adev;
- /* Write the ring destroy command to C2PMSG_64 */
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_DESTROY_RINGS);
+ /* Write the ring destroy command*/
+ if (psp_v11_0_support_vmr_ring(psp))
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
+ GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING);
+ else
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64,
+ GFX_CTRL_CMD_ID_DESTROY_RINGS);
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
- /* Wait for response flag (bit 31) in C2PMSG_64 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ /* Wait for response flag (bit 31) */
+ if (psp_v11_0_support_vmr_ring(psp))
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ 0x80000000, 0x80000000, false);
+ else
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x80000000, false);
return ret;
}
@@ -355,7 +365,10 @@ static int psp_v11_0_cmd_submit(struct psp_context *psp,
uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
/* KM (GPCOM) prepare write pointer */
- psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
+ if (psp_v11_0_support_vmr_ring(psp))
+ psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
+ else
+ psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
/* Update KM RB frame pointer to new frame */
/* write_frame ptr increments by size of rb_frame in bytes */
@@ -384,7 +397,11 @@ static int psp_v11_0_cmd_submit(struct psp_context *psp,
/* Update the write Pointer in DWORDs */
psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg);
+ if (psp_v11_0_support_vmr_ring(psp)) {
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_write_ptr_reg);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD);
+ } else
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg);
return 0;
}
@@ -529,7 +546,7 @@ static int psp_v11_0_mode1_reset(struct psp_context *psp)
/*send the mode 1 reset command*/
WREG32(offset, GFX_CTRL_CMD_ID_MODE1_RST);
- mdelay(1000);
+ msleep(500);
offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33);
@@ -552,31 +569,120 @@ static int psp_v11_0_mode1_reset(struct psp_context *psp)
static int psp_v11_0_xgmi_get_topology_info(struct psp_context *psp,
int number_devices, struct psp_xgmi_topology_info *topology)
{
+ struct ta_xgmi_shared_memory *xgmi_cmd;
+ struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
+ struct ta_xgmi_cmd_get_topology_info_output *topology_info_output;
+ int i;
+ int ret;
+
+ if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
+ return -EINVAL;
+
+ xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
+ memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+
+ /* Fill in the shared memory with topology information as input */
+ topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
+ xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO;
+ topology_info_input->num_nodes = number_devices;
+
+ for (i = 0; i < topology_info_input->num_nodes; i++) {
+ topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
+ topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
+ topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
+ topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
+ }
+
+ /* Invoke xgmi ta to get the topology information */
+ ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO);
+ if (ret)
+ return ret;
+
+ /* Read the output topology information from the shared memory */
+ topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
+ topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
+ for (i = 0; i < topology->num_nodes; i++) {
+ topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
+ topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
+ topology->nodes[i].is_sharing_enabled = topology_info_output->nodes[i].is_sharing_enabled;
+ topology->nodes[i].sdma_engine = topology_info_output->nodes[i].sdma_engine;
+ }
+
return 0;
}
static int psp_v11_0_xgmi_set_topology_info(struct psp_context *psp,
int number_devices, struct psp_xgmi_topology_info *topology)
{
+ struct ta_xgmi_shared_memory *xgmi_cmd;
+ struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
+ int i;
+
+ if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
+ return -EINVAL;
+
+ xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
+ memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+
+ topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
+ xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO;
+ topology_info_input->num_nodes = number_devices;
+
+ for (i = 0; i < topology_info_input->num_nodes; i++) {
+ topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
+ topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
+ topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
+ topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
+ }
+
+ /* Invoke xgmi ta to set topology information */
+ return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
+}
+
+static int psp_v11_0_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
+{
+ struct ta_xgmi_shared_memory *xgmi_cmd;
+ int ret;
+
+ xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
+ memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+
+ xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
+
+ /* Invoke xgmi ta to get hive id */
+ ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
+ if (ret)
+ return ret;
+
+ *hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
+
return 0;
}
-static u64 psp_v11_0_xgmi_get_hive_id(struct psp_context *psp)
+static int psp_v11_0_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
{
- u64 hive_id = 0;
+ struct ta_xgmi_shared_memory *xgmi_cmd;
+ int ret;
- /* Remove me when we can get correct hive_id through PSP */
- if (psp->adev->gmc.xgmi.num_physical_nodes)
- hive_id = 0x123456789abcdef;
+ xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
+ memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
- return hive_id;
+ xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
+
+ /* Invoke xgmi ta to get the node id */
+ ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
+ if (ret)
+ return ret;
+
+ *node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id;
+
+ return 0;
}
static const struct psp_funcs psp_v11_0_funcs = {
.init_microcode = psp_v11_0_init_microcode,
.bootloader_load_sysdrv = psp_v11_0_bootloader_load_sysdrv,
.bootloader_load_sos = psp_v11_0_bootloader_load_sos,
- .prep_cmd_buf = psp_v11_0_prep_cmd_buf,
.ring_init = psp_v11_0_ring_init,
.ring_create = psp_v11_0_ring_create,
.ring_stop = psp_v11_0_ring_stop,
@@ -587,6 +693,8 @@ static const struct psp_funcs psp_v11_0_funcs = {
.xgmi_get_topology_info = psp_v11_0_xgmi_get_topology_info,
.xgmi_set_topology_info = psp_v11_0_xgmi_set_topology_info,
.xgmi_get_hive_id = psp_v11_0_xgmi_get_hive_id,
+ .xgmi_get_node_id = psp_v11_0_xgmi_get_node_id,
+ .support_vmr_ring = psp_v11_0_support_vmr_ring,
};
void psp_v11_0_set_psp_funcs(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index e1ebf770c303..0487e3a4e9e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -47,57 +47,6 @@ MODULE_FIRMWARE("amdgpu/vega12_asd.bin");
static uint32_t sos_old_versions[] = {1517616, 1510592, 1448594, 1446554};
-static int
-psp_v3_1_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *type)
-{
- switch(ucode->ucode_id) {
- case AMDGPU_UCODE_ID_SDMA0:
- *type = GFX_FW_TYPE_SDMA0;
- break;
- case AMDGPU_UCODE_ID_SDMA1:
- *type = GFX_FW_TYPE_SDMA1;
- break;
- case AMDGPU_UCODE_ID_CP_CE:
- *type = GFX_FW_TYPE_CP_CE;
- break;
- case AMDGPU_UCODE_ID_CP_PFP:
- *type = GFX_FW_TYPE_CP_PFP;
- break;
- case AMDGPU_UCODE_ID_CP_ME:
- *type = GFX_FW_TYPE_CP_ME;
- break;
- case AMDGPU_UCODE_ID_CP_MEC1:
- *type = GFX_FW_TYPE_CP_MEC;
- break;
- case AMDGPU_UCODE_ID_CP_MEC1_JT:
- *type = GFX_FW_TYPE_CP_MEC_ME1;
- break;
- case AMDGPU_UCODE_ID_CP_MEC2:
- *type = GFX_FW_TYPE_CP_MEC;
- break;
- case AMDGPU_UCODE_ID_CP_MEC2_JT:
- *type = GFX_FW_TYPE_CP_MEC_ME2;
- break;
- case AMDGPU_UCODE_ID_RLC_G:
- *type = GFX_FW_TYPE_RLC_G;
- break;
- case AMDGPU_UCODE_ID_SMC:
- *type = GFX_FW_TYPE_SMU;
- break;
- case AMDGPU_UCODE_ID_UVD:
- *type = GFX_FW_TYPE_UVD;
- break;
- case AMDGPU_UCODE_ID_VCE:
- *type = GFX_FW_TYPE_VCE;
- break;
- case AMDGPU_UCODE_ID_MAXIMUM:
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
static int psp_v3_1_init_microcode(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -194,7 +143,7 @@ static int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp)
/* Copy PSP System Driver binary to memory */
memcpy(psp->fw_pri_buf, psp->sys_start_addr, psp->sys_bin_size);
- /* Provide the sys driver to bootrom */
+ /* Provide the sys driver to bootloader */
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
(uint32_t)(psp->fw_pri_mc_addr >> 20));
psp_gfxdrv_command_reg = 1 << 16;
@@ -240,8 +189,11 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
* are already been loaded.
*/
sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
- if (sol_reg)
+ if (sol_reg) {
+ psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
+ printk("sos fw version = 0x%x.\n", psp->sos_fw_version);
return 0;
+ }
/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
@@ -254,7 +206,7 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
/* Copy Secure OS binary to PSP memory */
memcpy(psp->fw_pri_buf, psp->sos_start_addr, psp->sos_bin_size);
- /* Provide the PSP secure OS to bootrom */
+ /* Provide the PSP secure OS to bootloader */
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
(uint32_t)(psp->fw_pri_mc_addr >> 20));
psp_gfxdrv_command_reg = 2 << 16;
@@ -274,26 +226,6 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
return ret;
}
-static int psp_v3_1_prep_cmd_buf(struct amdgpu_firmware_info *ucode,
- struct psp_gfx_cmd_resp *cmd)
-{
- int ret;
- uint64_t fw_mem_mc_addr = ucode->mc_addr;
-
- memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
-
- cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
- cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr);
- cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr);
- cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size;
-
- ret = psp_v3_1_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type);
- if (ret)
- DRM_ERROR("Unknown firmware type\n");
-
- return ret;
-}
-
static int psp_v3_1_ring_init(struct psp_context *psp,
enum psp_ring_type ring_type)
{
@@ -356,12 +288,9 @@ static int psp_v3_1_ring_stop(struct psp_context *psp,
enum psp_ring_type ring_type)
{
int ret = 0;
- struct psp_ring *ring;
unsigned int psp_ring_reg = 0;
struct amdgpu_device *adev = psp->adev;
- ring = &psp->km_ring;
-
/* Write the ring destroy command to C2PMSG_64 */
psp_ring_reg = 3 << 16;
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
@@ -571,9 +500,7 @@ static bool psp_v3_1_smu_reload_quirk(struct psp_context *psp)
struct amdgpu_device *adev = psp->adev;
uint32_t reg;
- reg = smnMP1_FIRMWARE_FLAGS | 0x03b00000;
- WREG32_SOC15(NBIO, 0, mmPCIE_INDEX2, reg);
- reg = RREG32_SOC15(NBIO, 0, mmPCIE_DATA2);
+ reg = RREG32_PCIE(smnMP1_FIRMWARE_FLAGS | 0x03b00000);
return (reg & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) ? true : false;
}
@@ -593,9 +520,9 @@ static int psp_v3_1_mode1_reset(struct psp_context *psp)
}
/*send the mode 1 reset command*/
- WREG32(offset, 0x70000);
+ WREG32(offset, GFX_CTRL_CMD_ID_MODE1_RST);
- mdelay(1000);
+ msleep(500);
offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33);
@@ -615,7 +542,6 @@ static const struct psp_funcs psp_v3_1_funcs = {
.init_microcode = psp_v3_1_init_microcode,
.bootloader_load_sysdrv = psp_v3_1_bootloader_load_sysdrv,
.bootloader_load_sos = psp_v3_1_bootloader_load_sos,
- .prep_cmd_buf = psp_v3_1_prep_cmd_buf,
.ring_init = psp_v3_1_ring_init,
.ring_create = psp_v3_1_ring_create,
.ring_stop = psp_v3_1_ring_stop,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 2d4770e173dd..cca3552b36ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -225,7 +225,7 @@ static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring)
static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{
- struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
+ struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
int i;
for (i = 0; i < count; i++)
@@ -245,9 +245,12 @@ static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
* Schedule an IB in the DMA ring (VI).
*/
static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ uint32_t flags)
{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
/* IB packet must end on a 8 DW boundary */
sdma_v2_4_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
@@ -349,8 +352,8 @@ static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
}
- sdma0->ready = false;
- sdma1->ready = false;
+ sdma0->sched.ready = false;
+ sdma1->sched.ready = false;
}
/**
@@ -471,17 +474,15 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
/* enable DMA IBs */
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
- ring->ready = true;
+ ring->sched.ready = true;
}
sdma_v2_4_enable(adev, true);
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
return r;
- }
if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_buffer_funcs_status(adev, true);
@@ -550,21 +551,16 @@ static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring)
u64 gpu_addr;
r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+ if (r)
return r;
- }
gpu_addr = adev->wb.gpu_addr + (index * 4);
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
r = amdgpu_ring_alloc(ring, 5);
- if (r) {
- DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- amdgpu_device_wb_free(adev, index);
- return r;
- }
+ if (r)
+ goto error_free_wb;
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
@@ -581,15 +577,11 @@ static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring)
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
- ring->idx, tmp);
- r = -EINVAL;
- }
- amdgpu_device_wb_free(adev, index);
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+error_free_wb:
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -612,20 +604,16 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+ if (r)
return r;
- }
gpu_addr = adev->wb.gpu_addr + (index * 4);
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 256, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ if (r)
goto err0;
- }
ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
@@ -644,21 +632,16 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
goto err1;
} else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err1;
}
tmp = le32_to_cpu(adev->wb.wb[index]);
- if (tmp == 0xDEADBEEF) {
- DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+ if (tmp == 0xDEADBEEF)
r = 0;
- } else {
- DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
+ else
r = -EINVAL;
- }
err1:
amdgpu_ib_free(adev, &ib, NULL);
@@ -760,7 +743,7 @@ static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
*/
static void sdma_v2_4_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
{
- struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
+ struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
u32 pad_count;
int i;
@@ -1105,8 +1088,14 @@ static int sdma_v2_4_process_illegal_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
+ u8 instance_id, queue_id;
+
DRM_ERROR("Illegal instruction in SDMA command stream\n");
- schedule_work(&adev->reset_work);
+ instance_id = (entry->ring_id & 0x3) >> 0;
+ queue_id = (entry->ring_id & 0xc) >> 2;
+
+ if (instance_id <= 1 && queue_id == 0)
+ drm_sched_fault(&adev->sdma.instance[instance_id].ring.sched);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 6fb3edaba0ec..0ce8331baeb2 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -399,7 +399,7 @@ static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{
- struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
+ struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
int i;
for (i = 0; i < count; i++)
@@ -419,9 +419,12 @@ static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
* Schedule an IB in the DMA ring (VI).
*/
static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ uint32_t flags)
{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
/* IB packet must end on a 8 DW boundary */
sdma_v3_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
@@ -523,8 +526,8 @@ static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
}
- sdma0->ready = false;
- sdma1->ready = false;
+ sdma0->sched.ready = false;
+ sdma1->sched.ready = false;
}
/**
@@ -739,7 +742,7 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
/* enable DMA IBs */
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
- ring->ready = true;
+ ring->sched.ready = true;
}
/* unhalt the MEs */
@@ -749,11 +752,9 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
return r;
- }
if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_buffer_funcs_status(adev, true);
@@ -822,21 +823,16 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring)
u64 gpu_addr;
r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+ if (r)
return r;
- }
gpu_addr = adev->wb.gpu_addr + (index * 4);
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
r = amdgpu_ring_alloc(ring, 5);
- if (r) {
- DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- amdgpu_device_wb_free(adev, index);
- return r;
- }
+ if (r)
+ goto error_free_wb;
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
@@ -853,15 +849,11 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring)
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
- ring->idx, tmp);
- r = -EINVAL;
- }
- amdgpu_device_wb_free(adev, index);
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+error_free_wb:
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -884,20 +876,16 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+ if (r)
return r;
- }
gpu_addr = adev->wb.gpu_addr + (index * 4);
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 256, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ if (r)
goto err0;
- }
ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
@@ -916,21 +904,16 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
goto err1;
} else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err1;
}
tmp = le32_to_cpu(adev->wb.wb[index]);
- if (tmp == 0xDEADBEEF) {
- DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+ if (tmp == 0xDEADBEEF)
r = 0;
- } else {
- DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
+ else
r = -EINVAL;
- }
err1:
amdgpu_ib_free(adev, &ib, NULL);
dma_fence_put(f);
@@ -1031,7 +1014,7 @@ static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
*/
static void sdma_v3_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
{
- struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
+ struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
u32 pad_count;
int i;
@@ -1162,8 +1145,7 @@ static int sdma_v3_0_sw_init(void *handle)
ring->ring_obj = NULL;
if (!amdgpu_sriov_vf(adev)) {
ring->use_doorbell = true;
- ring->doorbell_index = (i == 0) ?
- AMDGPU_DOORBELL_sDMA_ENGINE0 : AMDGPU_DOORBELL_sDMA_ENGINE1;
+ ring->doorbell_index = adev->doorbell_index.sdma_engine[i];
} else {
ring->use_pollmem = true;
}
@@ -1440,8 +1422,14 @@ static int sdma_v3_0_process_illegal_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
+ u8 instance_id, queue_id;
+
DRM_ERROR("Illegal instruction in SDMA command stream\n");
- schedule_work(&adev->reset_work);
+ instance_id = (entry->ring_id & 0x3) >> 0;
+ queue_id = (entry->ring_id & 0xc) >> 2;
+
+ if (instance_id <= 1 && queue_id == 0)
+ drm_sched_fault(&adev->sdma.instance[instance_id].ring.sched);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 7a8c9172d30a..c816e55d43a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -54,6 +54,11 @@ MODULE_FIRMWARE("amdgpu/raven2_sdma.bin");
#define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK 0x000000F8L
#define SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK 0xFC000000L
+#define WREG32_SDMA(instance, offset, value) \
+ WREG32(sdma_v4_0_get_reg_offset(adev, (instance), (offset)), value)
+#define RREG32_SDMA(instance, offset) \
+ RREG32(sdma_v4_0_get_reg_offset(adev, (instance), (offset)))
+
static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev);
@@ -73,7 +78,6 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000),
- SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
@@ -91,6 +95,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002)
};
@@ -98,6 +103,7 @@ static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
static const struct soc15_reg_golden golden_settings_sdma_vg12[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001)
};
@@ -122,7 +128,7 @@ static const struct soc15_reg_golden golden_settings_sdma0_4_2_init[] = {
static const struct soc15_reg_golden golden_settings_sdma0_4_2[] =
{
- SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xffffffff, 0x3f000100),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
@@ -152,7 +158,7 @@ static const struct soc15_reg_golden golden_settings_sdma0_4_2[] =
};
static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = {
- SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
@@ -367,16 +373,11 @@ static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs]));
DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
} else {
- u32 lowbit, highbit;
-
- lowbit = RREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR)) >> 2;
- highbit = RREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2;
-
- DRM_DEBUG("wptr [%i]high== 0x%08x low==0x%08x\n",
- ring->me, highbit, lowbit);
- wptr = highbit;
+ wptr = RREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR_HI);
wptr = wptr << 32;
- wptr |= lowbit;
+ wptr |= RREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR);
+ DRM_DEBUG("wptr before shift [%i] wptr == 0x%016llx\n",
+ ring->me, wptr);
}
return wptr >> 2;
@@ -417,14 +418,67 @@ static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
lower_32_bits(ring->wptr << 2),
ring->me,
upper_32_bits(ring->wptr << 2));
- WREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2));
- WREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
+ WREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR,
+ lower_32_bits(ring->wptr << 2));
+ WREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR_HI,
+ upper_32_bits(ring->wptr << 2));
+ }
+}
+
+/**
+ * sdma_v4_0_page_ring_get_wptr - get the current write pointer
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Get the current wptr from the hardware (VEGA10+).
+ */
+static uint64_t sdma_v4_0_page_ring_get_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ u64 wptr;
+
+ if (ring->use_doorbell) {
+ /* XXX check if swapping is necessary on BE */
+ wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs]));
+ } else {
+ wptr = RREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR_HI);
+ wptr = wptr << 32;
+ wptr |= RREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR);
+ }
+
+ return wptr >> 2;
+}
+
+/**
+ * sdma_v4_0_ring_set_wptr - commit the write pointer
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Write the wptr back to the hardware (VEGA10+).
+ */
+static void sdma_v4_0_page_ring_set_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->use_doorbell) {
+ u64 *wb = (u64 *)&adev->wb.wb[ring->wptr_offs];
+
+ /* XXX check if swapping is necessary on BE */
+ WRITE_ONCE(*wb, (ring->wptr << 2));
+ WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
+ } else {
+ uint64_t wptr = ring->wptr << 2;
+
+ WREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR,
+ lower_32_bits(wptr));
+ WREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR_HI,
+ upper_32_bits(wptr));
}
}
static void sdma_v4_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{
- struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
+ struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
int i;
for (i = 0; i < count; i++)
@@ -444,9 +498,12 @@ static void sdma_v4_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
* Schedule an IB in the DMA ring (VEGA10).
*/
static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib,
+ uint32_t flags)
{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
/* IB packet must end on a 8 DW boundary */
sdma_v4_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
@@ -568,16 +625,16 @@ static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev)
amdgpu_ttm_set_buffer_funcs_status(adev, false);
for (i = 0; i < adev->sdma.num_instances; i++) {
- rb_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
+ rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
- ib_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
+ WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl);
+ ib_cntl = RREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL);
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
+ WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl);
}
- sdma0->ready = false;
- sdma1->ready = false;
+ sdma0->sched.ready = false;
+ sdma1->sched.ready = false;
}
/**
@@ -593,6 +650,39 @@ static void sdma_v4_0_rlc_stop(struct amdgpu_device *adev)
}
/**
+ * sdma_v4_0_page_stop - stop the page async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Stop the page async dma ring buffers (VEGA10).
+ */
+static void sdma_v4_0_page_stop(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].page;
+ struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].page;
+ u32 rb_cntl, ib_cntl;
+ int i;
+
+ if ((adev->mman.buffer_funcs_ring == sdma0) ||
+ (adev->mman.buffer_funcs_ring == sdma1))
+ amdgpu_ttm_set_buffer_funcs_status(adev, false);
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ rb_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL);
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL,
+ RB_ENABLE, 0);
+ WREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL, rb_cntl);
+ ib_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL);
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL,
+ IB_ENABLE, 0);
+ WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl);
+ }
+
+ sdma0->sched.ready = false;
+ sdma1->sched.ready = false;
+}
+
+/**
* sdma_v_0_ctx_switch_enable - stop the async dma engines context switch
*
* @adev: amdgpu_device pointer
@@ -630,18 +720,15 @@ static void sdma_v4_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
}
for (i = 0; i < adev->sdma.num_instances; i++) {
- f32_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
+ f32_cntl = RREG32_SDMA(i, mmSDMA0_CNTL);
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
AUTO_CTXSW_ENABLE, enable ? 1 : 0);
if (enable && amdgpu_sdma_phase_quantum) {
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM),
- phase_quantum);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM),
- phase_quantum);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM),
- phase_quantum);
+ WREG32_SDMA(i, mmSDMA0_PHASE0_QUANTUM, phase_quantum);
+ WREG32_SDMA(i, mmSDMA0_PHASE1_QUANTUM, phase_quantum);
+ WREG32_SDMA(i, mmSDMA0_PHASE2_QUANTUM, phase_quantum);
}
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
+ WREG32_SDMA(i, mmSDMA0_CNTL, f32_cntl);
}
}
@@ -662,156 +749,213 @@ static void sdma_v4_0_enable(struct amdgpu_device *adev, bool enable)
if (enable == false) {
sdma_v4_0_gfx_stop(adev);
sdma_v4_0_rlc_stop(adev);
+ if (adev->sdma.has_page_queue)
+ sdma_v4_0_page_stop(adev);
}
for (i = 0; i < adev->sdma.num_instances; i++) {
- f32_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
+ f32_cntl = RREG32_SDMA(i, mmSDMA0_F32_CNTL);
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl);
+ WREG32_SDMA(i, mmSDMA0_F32_CNTL, f32_cntl);
}
}
/**
+ * sdma_v4_0_rb_cntl - get parameters for rb_cntl
+ */
+static uint32_t sdma_v4_0_rb_cntl(struct amdgpu_ring *ring, uint32_t rb_cntl)
+{
+ /* Set ring buffer size in dwords */
+ uint32_t rb_bufsz = order_base_2(ring->ring_size / 4);
+
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
+#ifdef __BIG_ENDIAN
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
+ RPTR_WRITEBACK_SWAP_ENABLE, 1);
+#endif
+ return rb_cntl;
+}
+
+/**
* sdma_v4_0_gfx_resume - setup and start the async dma engines
*
* @adev: amdgpu_device pointer
+ * @i: instance to resume
*
* Set up the gfx DMA ring buffers and enable them (VEGA10).
* Returns 0 for success, error for failure.
*/
-static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev)
+static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i)
{
- struct amdgpu_ring *ring;
+ struct amdgpu_ring *ring = &adev->sdma.instance[i].ring;
u32 rb_cntl, ib_cntl, wptr_poll_cntl;
- u32 rb_bufsz;
u32 wb_offset;
u32 doorbell;
u32 doorbell_offset;
- u32 temp;
u64 wptr_gpu_addr;
- int i, r;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- ring = &adev->sdma.instance[i].ring;
- wb_offset = (ring->rptr_offs * 4);
+ wb_offset = (ring->rptr_offs * 4);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
+ rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL);
+ rb_cntl = sdma_v4_0_rb_cntl(ring, rb_cntl);
+ WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl);
- /* Set ring buffer size in dwords */
- rb_bufsz = order_base_2(ring->ring_size / 4);
- rb_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
-#ifdef __BIG_ENDIAN
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
- RPTR_WRITEBACK_SWAP_ENABLE, 1);
-#endif
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR, 0);
+ WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR_HI, 0);
+ WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR, 0);
+ WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_HI, 0);
- /* Initialize the ring buffer's read and write pointers */
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0);
+ /* set the wb address whether it's enabled or not */
+ WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR_ADDR_HI,
+ upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
+ WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR_ADDR_LO,
+ lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
- /* set the wb address whether it's enabled or not */
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI),
- upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO),
- lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
+ RPTR_WRITEBACK_ENABLE, 1);
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
+ WREG32_SDMA(i, mmSDMA0_GFX_RB_BASE, ring->gpu_addr >> 8);
+ WREG32_SDMA(i, mmSDMA0_GFX_RB_BASE_HI, ring->gpu_addr >> 40);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), ring->gpu_addr >> 8);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), ring->gpu_addr >> 40);
+ ring->wptr = 0;
- ring->wptr = 0;
+ /* before programing wptr to a less value, need set minor_ptr_update first */
+ WREG32_SDMA(i, mmSDMA0_GFX_MINOR_PTR_UPDATE, 1);
- /* before programing wptr to a less value, need set minor_ptr_update first */
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1);
+ doorbell = RREG32_SDMA(i, mmSDMA0_GFX_DOORBELL);
+ doorbell_offset = RREG32_SDMA(i, mmSDMA0_GFX_DOORBELL_OFFSET);
- if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr) << 2);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2);
- }
-
- doorbell = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL));
- doorbell_offset = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET));
-
- if (ring->use_doorbell) {
- doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1);
- doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET,
+ doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE,
+ ring->use_doorbell);
+ doorbell_offset = REG_SET_FIELD(doorbell_offset,
+ SDMA0_GFX_DOORBELL_OFFSET,
OFFSET, ring->doorbell_index);
- } else {
- doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0);
- }
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset);
- adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
- ring->doorbell_index);
+ WREG32_SDMA(i, mmSDMA0_GFX_DOORBELL, doorbell);
+ WREG32_SDMA(i, mmSDMA0_GFX_DOORBELL_OFFSET, doorbell_offset);
+
+ sdma_v4_0_ring_set_wptr(ring);
+
+ /* set minor_ptr_update to 0 after wptr programed */
+ WREG32_SDMA(i, mmSDMA0_GFX_MINOR_PTR_UPDATE, 0);
+
+ /* setup the wptr shadow polling */
+ wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
+ WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO,
+ lower_32_bits(wptr_gpu_addr));
+ WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI,
+ upper_32_bits(wptr_gpu_addr));
+ wptr_poll_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL);
+ wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
+ SDMA0_GFX_RB_WPTR_POLL_CNTL,
+ F32_POLL_ENABLE, amdgpu_sriov_vf(adev));
+ WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, wptr_poll_cntl);
+
+ /* enable DMA RB */
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
+ WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl);
+
+ ib_cntl = RREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL);
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
+#ifdef __BIG_ENDIAN
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
+#endif
+ /* enable DMA IBs */
+ WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl);
- if (amdgpu_sriov_vf(adev))
- sdma_v4_0_ring_set_wptr(ring);
+ ring->sched.ready = true;
+}
- /* set minor_ptr_update to 0 after wptr programed */
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
+/**
+ * sdma_v4_0_page_resume - setup and start the async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ * @i: instance to resume
+ *
+ * Set up the page DMA ring buffers and enable them (VEGA10).
+ * Returns 0 for success, error for failure.
+ */
+static void sdma_v4_0_page_resume(struct amdgpu_device *adev, unsigned int i)
+{
+ struct amdgpu_ring *ring = &adev->sdma.instance[i].page;
+ u32 rb_cntl, ib_cntl, wptr_poll_cntl;
+ u32 wb_offset;
+ u32 doorbell;
+ u32 doorbell_offset;
+ u64 wptr_gpu_addr;
- /* set utc l1 enable flag always to 1 */
- temp = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
- temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp);
+ wb_offset = (ring->rptr_offs * 4);
- if (!amdgpu_sriov_vf(adev)) {
- /* unhalt engine */
- temp = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
- temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp);
- }
+ rb_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL);
+ rb_cntl = sdma_v4_0_rb_cntl(ring, rb_cntl);
+ WREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL, rb_cntl);
- /* setup the wptr shadow polling */
- wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO),
- lower_32_bits(wptr_gpu_addr));
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI),
- upper_32_bits(wptr_gpu_addr));
- wptr_poll_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
- if (amdgpu_sriov_vf(adev))
- wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 1);
- else
- wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 0);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), wptr_poll_cntl);
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR, 0);
+ WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR_HI, 0);
+ WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR, 0);
+ WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_HI, 0);
- /* enable DMA RB */
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
+ /* set the wb address whether it's enabled or not */
+ WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR_ADDR_HI,
+ upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
+ WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR_ADDR_LO,
+ lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
- ib_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
- ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
-#ifdef __BIG_ENDIAN
- ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
-#endif
- /* enable DMA IBs */
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL,
+ RPTR_WRITEBACK_ENABLE, 1);
- ring->ready = true;
+ WREG32_SDMA(i, mmSDMA0_PAGE_RB_BASE, ring->gpu_addr >> 8);
+ WREG32_SDMA(i, mmSDMA0_PAGE_RB_BASE_HI, ring->gpu_addr >> 40);
- if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
- sdma_v4_0_ctx_switch_enable(adev, true);
- sdma_v4_0_enable(adev, true);
- }
+ ring->wptr = 0;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
- return r;
- }
+ /* before programing wptr to a less value, need set minor_ptr_update first */
+ WREG32_SDMA(i, mmSDMA0_PAGE_MINOR_PTR_UPDATE, 1);
- if (adev->mman.buffer_funcs_ring == ring)
- amdgpu_ttm_set_buffer_funcs_status(adev, true);
+ doorbell = RREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL);
+ doorbell_offset = RREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL_OFFSET);
- }
+ doorbell = REG_SET_FIELD(doorbell, SDMA0_PAGE_DOORBELL, ENABLE,
+ ring->use_doorbell);
+ doorbell_offset = REG_SET_FIELD(doorbell_offset,
+ SDMA0_PAGE_DOORBELL_OFFSET,
+ OFFSET, ring->doorbell_index);
+ WREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL, doorbell);
+ WREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL_OFFSET, doorbell_offset);
+
+ /* paging queue doorbell range is setup at sdma_v4_0_gfx_resume */
+ sdma_v4_0_page_ring_set_wptr(ring);
+
+ /* set minor_ptr_update to 0 after wptr programed */
+ WREG32_SDMA(i, mmSDMA0_PAGE_MINOR_PTR_UPDATE, 0);
+
+ /* setup the wptr shadow polling */
+ wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
+ WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_LO,
+ lower_32_bits(wptr_gpu_addr));
+ WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_HI,
+ upper_32_bits(wptr_gpu_addr));
+ wptr_poll_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL);
+ wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
+ SDMA0_PAGE_RB_WPTR_POLL_CNTL,
+ F32_POLL_ENABLE, amdgpu_sriov_vf(adev));
+ WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, wptr_poll_cntl);
+
+ /* enable DMA RB */
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL, RB_ENABLE, 1);
+ WREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL, rb_cntl);
+
+ ib_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL);
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL, IB_ENABLE, 1);
+#ifdef __BIG_ENDIAN
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL, IB_SWAP_ENABLE, 1);
+#endif
+ /* enable DMA IBs */
+ WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl);
- return 0;
+ ring->sched.ready = true;
}
static void
@@ -922,12 +1066,14 @@ static int sdma_v4_0_load_microcode(struct amdgpu_device *adev)
(adev->sdma.instance[i].fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), 0);
+ WREG32_SDMA(i, mmSDMA0_UCODE_ADDR, 0);
for (j = 0; j < fw_size; j++)
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_UCODE_DATA), le32_to_cpup(fw_data++));
+ WREG32_SDMA(i, mmSDMA0_UCODE_DATA,
+ le32_to_cpup(fw_data++));
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), adev->sdma.instance[i].fw_version);
+ WREG32_SDMA(i, mmSDMA0_UCODE_ADDR,
+ adev->sdma.instance[i].fw_version);
}
return 0;
@@ -943,33 +1089,78 @@ static int sdma_v4_0_load_microcode(struct amdgpu_device *adev)
*/
static int sdma_v4_0_start(struct amdgpu_device *adev)
{
- int r = 0;
+ struct amdgpu_ring *ring;
+ int i, r;
if (amdgpu_sriov_vf(adev)) {
sdma_v4_0_ctx_switch_enable(adev, false);
sdma_v4_0_enable(adev, false);
+ } else {
- /* set RB registers */
- r = sdma_v4_0_gfx_resume(adev);
- return r;
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+ r = sdma_v4_0_load_microcode(adev);
+ if (r)
+ return r;
+ }
+
+ /* unhalt the MEs */
+ sdma_v4_0_enable(adev, true);
+ /* enable sdma ring preemption */
+ sdma_v4_0_ctx_switch_enable(adev, true);
+ }
+
+ /* start the gfx rings and rlc compute queues */
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ uint32_t temp;
+
+ WREG32_SDMA(i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL, 0);
+ sdma_v4_0_gfx_resume(adev, i);
+ if (adev->sdma.has_page_queue)
+ sdma_v4_0_page_resume(adev, i);
+
+ /* set utc l1 enable flag always to 1 */
+ temp = RREG32_SDMA(i, mmSDMA0_CNTL);
+ temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
+ WREG32_SDMA(i, mmSDMA0_CNTL, temp);
+
+ if (!amdgpu_sriov_vf(adev)) {
+ /* unhalt engine */
+ temp = RREG32_SDMA(i, mmSDMA0_F32_CNTL);
+ temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
+ WREG32_SDMA(i, mmSDMA0_F32_CNTL, temp);
+ }
}
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
- r = sdma_v4_0_load_microcode(adev);
+ if (amdgpu_sriov_vf(adev)) {
+ sdma_v4_0_ctx_switch_enable(adev, true);
+ sdma_v4_0_enable(adev, true);
+ } else {
+ r = sdma_v4_0_rlc_resume(adev);
if (r)
return r;
}
- /* unhalt the MEs */
- sdma_v4_0_enable(adev, true);
- /* enable sdma ring preemption */
- sdma_v4_0_ctx_switch_enable(adev, true);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ ring = &adev->sdma.instance[i].ring;
- /* start the gfx rings and rlc compute queues */
- r = sdma_v4_0_gfx_resume(adev);
- if (r)
- return r;
- r = sdma_v4_0_rlc_resume(adev);
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ return r;
+
+ if (adev->sdma.has_page_queue) {
+ struct amdgpu_ring *page = &adev->sdma.instance[i].page;
+
+ r = amdgpu_ring_test_helper(page);
+ if (r)
+ return r;
+
+ if (adev->mman.buffer_funcs_ring == page)
+ amdgpu_ttm_set_buffer_funcs_status(adev, true);
+ }
+
+ if (adev->mman.buffer_funcs_ring == ring)
+ amdgpu_ttm_set_buffer_funcs_status(adev, true);
+ }
return r;
}
@@ -993,21 +1184,16 @@ static int sdma_v4_0_ring_test_ring(struct amdgpu_ring *ring)
u64 gpu_addr;
r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+ if (r)
return r;
- }
gpu_addr = adev->wb.gpu_addr + (index * 4);
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
r = amdgpu_ring_alloc(ring, 5);
- if (r) {
- DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- amdgpu_device_wb_free(adev, index);
- return r;
- }
+ if (r)
+ goto error_free_wb;
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
@@ -1024,15 +1210,11 @@ static int sdma_v4_0_ring_test_ring(struct amdgpu_ring *ring)
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
- ring->idx, tmp);
- r = -EINVAL;
- }
- amdgpu_device_wb_free(adev, index);
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+error_free_wb:
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1055,20 +1237,16 @@ static int sdma_v4_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
u64 gpu_addr;
r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+ if (r)
return r;
- }
gpu_addr = adev->wb.gpu_addr + (index * 4);
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 256, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ if (r)
goto err0;
- }
ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
@@ -1087,21 +1265,17 @@ static int sdma_v4_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
goto err1;
} else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err1;
}
tmp = le32_to_cpu(adev->wb.wb[index]);
- if (tmp == 0xDEADBEEF) {
- DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+ if (tmp == 0xDEADBEEF)
r = 0;
- } else {
- DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
+ else
r = -EINVAL;
- }
+
err1:
amdgpu_ib_free(adev, &ib, NULL);
dma_fence_put(f);
@@ -1206,7 +1380,7 @@ static void sdma_v4_0_vm_set_pte_pde(struct amdgpu_ib *ib,
*/
static void sdma_v4_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
{
- struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
+ struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
u32 pad_count;
int i;
@@ -1272,15 +1446,45 @@ static void sdma_v4_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
sdma_v4_0_wait_reg_mem(ring, 0, 0, reg, 0, val, mask, 10);
}
+static bool sdma_v4_0_fw_support_paging_queue(struct amdgpu_device *adev)
+{
+ uint fw_version = adev->sdma.instance[0].fw_version;
+
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ return fw_version >= 430;
+ case CHIP_VEGA12:
+ /*return fw_version >= 31;*/
+ return false;
+ case CHIP_VEGA20:
+ return fw_version >= 123;
+ default:
+ return false;
+ }
+}
+
static int sdma_v4_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
if (adev->asic_type == CHIP_RAVEN)
adev->sdma.num_instances = 1;
else
adev->sdma.num_instances = 2;
+ r = sdma_v4_0_init_microcode(adev);
+ if (r) {
+ DRM_ERROR("Failed to load sdma firmware!\n");
+ return r;
+ }
+
+ /* TODO: Page queue breaks driver reload under SRIOV */
+ if ((adev->asic_type == CHIP_VEGA10) && amdgpu_sriov_vf((adev)))
+ adev->sdma.has_page_queue = false;
+ else if (sdma_v4_0_fw_support_paging_queue(adev))
+ adev->sdma.has_page_queue = true;
+
sdma_v4_0_set_ring_funcs(adev);
sdma_v4_0_set_buffer_funcs(adev);
sdma_v4_0_set_vm_pte_funcs(adev);
@@ -1289,7 +1493,6 @@ static int sdma_v4_0_early_init(void *handle)
return 0;
}
-
static int sdma_v4_0_sw_init(void *handle)
{
struct amdgpu_ring *ring;
@@ -1308,12 +1511,6 @@ static int sdma_v4_0_sw_init(void *handle)
if (r)
return r;
- r = sdma_v4_0_init_microcode(adev);
- if (r) {
- DRM_ERROR("Failed to load sdma firmware!\n");
- return r;
- }
-
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
@@ -1322,15 +1519,8 @@ static int sdma_v4_0_sw_init(void *handle)
DRM_INFO("use_doorbell being set to: [%s]\n",
ring->use_doorbell?"true":"false");
- if (adev->asic_type == CHIP_VEGA10)
- ring->doorbell_index = (i == 0) ?
- (AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 << 1) //get DWORD offset
- : (AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 << 1); // get DWORD offset
- else
- ring->doorbell_index = (i == 0) ?
- (AMDGPU_DOORBELL64_sDMA_ENGINE0 << 1) //get DWORD offset
- : (AMDGPU_DOORBELL64_sDMA_ENGINE1 << 1); // get DWORD offset
-
+ /* doorbell size is 2 dwords, get DWORD offset */
+ ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1;
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024,
@@ -1340,6 +1530,27 @@ static int sdma_v4_0_sw_init(void *handle)
AMDGPU_SDMA_IRQ_TRAP1);
if (r)
return r;
+
+ if (adev->sdma.has_page_queue) {
+ ring = &adev->sdma.instance[i].page;
+ ring->ring_obj = NULL;
+ ring->use_doorbell = true;
+
+ /* paging queue use same doorbell index/routing as gfx queue
+ * with 0x400 (4096 dwords) offset on second doorbell page
+ */
+ ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1;
+ ring->doorbell_index += 0x400;
+
+ sprintf(ring->name, "page%d", i);
+ r = amdgpu_ring_init(adev, ring, 1024,
+ &adev->sdma.trap_irq,
+ (i == 0) ?
+ AMDGPU_SDMA_IRQ_TRAP0 :
+ AMDGPU_SDMA_IRQ_TRAP1);
+ if (r)
+ return r;
+ }
}
return r;
@@ -1350,8 +1561,11 @@ static int sdma_v4_0_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i;
- for (i = 0; i < adev->sdma.num_instances; i++)
+ for (i = 0; i < adev->sdma.num_instances; i++) {
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
+ if (adev->sdma.has_page_queue)
+ amdgpu_ring_fini(&adev->sdma.instance[i].page);
+ }
for (i = 0; i < adev->sdma.num_instances; i++) {
release_firmware(adev->sdma.instance[i].fw);
@@ -1414,7 +1628,7 @@ static bool sdma_v4_0_is_idle(void *handle)
u32 i;
for (i = 0; i < adev->sdma.num_instances; i++) {
- u32 tmp = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_STATUS_REG));
+ u32 tmp = RREG32_SDMA(i, mmSDMA0_STATUS_REG);
if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
return false;
@@ -1430,8 +1644,8 @@ static int sdma_v4_0_wait_for_idle(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->usec_timeout; i++) {
- sdma0 = RREG32(sdma_v4_0_get_reg_offset(adev, 0, mmSDMA0_STATUS_REG));
- sdma1 = RREG32(sdma_v4_0_get_reg_offset(adev, 1, mmSDMA0_STATUS_REG));
+ sdma0 = RREG32_SDMA(0, mmSDMA0_STATUS_REG);
+ sdma1 = RREG32_SDMA(1, mmSDMA0_STATUS_REG);
if (sdma0 & sdma1 & SDMA0_STATUS_REG__IDLE_MASK)
return 0;
@@ -1452,16 +1666,13 @@ static int sdma_v4_0_set_trap_irq_state(struct amdgpu_device *adev,
unsigned type,
enum amdgpu_interrupt_state state)
{
+ unsigned int instance = (type == AMDGPU_SDMA_IRQ_TRAP0) ? 0 : 1;
u32 sdma_cntl;
- u32 reg_offset = (type == AMDGPU_SDMA_IRQ_TRAP0) ?
- sdma_v4_0_get_reg_offset(adev, 0, mmSDMA0_CNTL) :
- sdma_v4_0_get_reg_offset(adev, 1, mmSDMA0_CNTL);
-
- sdma_cntl = RREG32(reg_offset);
+ sdma_cntl = RREG32_SDMA(instance, mmSDMA0_CNTL);
sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
- WREG32(reg_offset, sdma_cntl);
+ WREG32_SDMA(instance, mmSDMA0_CNTL, sdma_cntl);
return 0;
}
@@ -1470,39 +1681,34 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
+ uint32_t instance;
+
DRM_DEBUG("IH: SDMA trap\n");
switch (entry->client_id) {
case SOC15_IH_CLIENTID_SDMA0:
- switch (entry->ring_id) {
- case 0:
- amdgpu_fence_process(&adev->sdma.instance[0].ring);
- break;
- case 1:
- /* XXX compute */
- break;
- case 2:
- /* XXX compute */
- break;
- case 3:
- /* XXX page queue*/
- break;
- }
+ instance = 0;
break;
case SOC15_IH_CLIENTID_SDMA1:
- switch (entry->ring_id) {
- case 0:
- amdgpu_fence_process(&adev->sdma.instance[1].ring);
- break;
- case 1:
- /* XXX compute */
- break;
- case 2:
- /* XXX compute */
- break;
- case 3:
- /* XXX page queue*/
- break;
- }
+ instance = 1;
+ break;
+ default:
+ return 0;
+ }
+
+ switch (entry->ring_id) {
+ case 0:
+ amdgpu_fence_process(&adev->sdma.instance[instance].ring);
+ break;
+ case 1:
+ if (adev->asic_type == CHIP_VEGA20)
+ amdgpu_fence_process(&adev->sdma.instance[instance].page);
+ break;
+ case 2:
+ /* XXX compute */
+ break;
+ case 3:
+ if (adev->asic_type != CHIP_VEGA20)
+ amdgpu_fence_process(&adev->sdma.instance[instance].page);
break;
}
return 0;
@@ -1512,12 +1718,29 @@ static int sdma_v4_0_process_illegal_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
+ int instance;
+
DRM_ERROR("Illegal instruction in SDMA command stream\n");
- schedule_work(&adev->reset_work);
+
+ switch (entry->client_id) {
+ case SOC15_IH_CLIENTID_SDMA0:
+ instance = 0;
+ break;
+ case SOC15_IH_CLIENTID_SDMA1:
+ instance = 1;
+ break;
+ default:
+ return 0;
+ }
+
+ switch (entry->ring_id) {
+ case 0:
+ drm_sched_fault(&adev->sdma.instance[instance].ring.sched);
+ break;
+ }
return 0;
}
-
static void sdma_v4_0_update_medium_grain_clock_gating(
struct amdgpu_device *adev,
bool enable)
@@ -1730,6 +1953,38 @@ static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = {
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
};
+static const struct amdgpu_ring_funcs sdma_v4_0_page_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_SDMA,
+ .align_mask = 0xf,
+ .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
+ .support_64bit_ptrs = true,
+ .vmhub = AMDGPU_MMHUB,
+ .get_rptr = sdma_v4_0_ring_get_rptr,
+ .get_wptr = sdma_v4_0_page_ring_get_wptr,
+ .set_wptr = sdma_v4_0_page_ring_set_wptr,
+ .emit_frame_size =
+ 6 + /* sdma_v4_0_ring_emit_hdp_flush */
+ 3 + /* hdp invalidate */
+ 6 + /* sdma_v4_0_ring_emit_pipeline_sync */
+ /* sdma_v4_0_ring_emit_vm_flush */
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
+ 10 + 10 + 10, /* sdma_v4_0_ring_emit_fence x3 for user fence, vm fence */
+ .emit_ib_size = 7 + 6, /* sdma_v4_0_ring_emit_ib */
+ .emit_ib = sdma_v4_0_ring_emit_ib,
+ .emit_fence = sdma_v4_0_ring_emit_fence,
+ .emit_pipeline_sync = sdma_v4_0_ring_emit_pipeline_sync,
+ .emit_vm_flush = sdma_v4_0_ring_emit_vm_flush,
+ .emit_hdp_flush = sdma_v4_0_ring_emit_hdp_flush,
+ .test_ring = sdma_v4_0_ring_test_ring,
+ .test_ib = sdma_v4_0_ring_test_ib,
+ .insert_nop = sdma_v4_0_ring_insert_nop,
+ .pad_ib = sdma_v4_0_ring_pad_ib,
+ .emit_wreg = sdma_v4_0_ring_emit_wreg,
+ .emit_reg_wait = sdma_v4_0_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+};
+
static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev)
{
int i;
@@ -1737,6 +1992,10 @@ static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev)
for (i = 0; i < adev->sdma.num_instances; i++) {
adev->sdma.instance[i].ring.funcs = &sdma_v4_0_ring_funcs;
adev->sdma.instance[i].ring.me = i;
+ if (adev->sdma.has_page_queue) {
+ adev->sdma.instance[i].page.funcs = &sdma_v4_0_page_ring_funcs;
+ adev->sdma.instance[i].page.me = i;
+ }
}
}
@@ -1818,7 +2077,10 @@ static const struct amdgpu_buffer_funcs sdma_v4_0_buffer_funcs = {
static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev)
{
adev->mman.buffer_funcs = &sdma_v4_0_buffer_funcs;
- adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
+ if (adev->sdma.has_page_queue)
+ adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].page;
+ else
+ adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
}
static const struct amdgpu_vm_pte_funcs sdma_v4_0_vm_pte_funcs = {
@@ -1836,7 +2098,10 @@ static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev)
adev->vm_manager.vm_pte_funcs = &sdma_v4_0_vm_pte_funcs;
for (i = 0; i < adev->sdma.num_instances; i++) {
- sched = &adev->sdma.instance[i].ring.sched;
+ if (adev->sdma.has_page_queue)
+ sched = &adev->sdma.instance[i].page.sched;
+ else
+ sched = &adev->sdma.instance[i].ring.sched;
adev->vm_manager.vm_pte_rqs[i] =
&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
}
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index f8408f88cd37..9d8df68893b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -47,6 +47,7 @@
#include "dce/dce_6_0_d.h"
#include "uvd/uvd_4_0_d.h"
#include "bif/bif_3_0_d.h"
+#include "bif/bif_3_0_sh_mask.h"
static const u32 tahiti_golden_registers[] =
{
@@ -1258,6 +1259,11 @@ static bool si_need_full_reset(struct amdgpu_device *adev)
return true;
}
+static bool si_need_reset_on_init(struct amdgpu_device *adev)
+{
+ return false;
+}
+
static int si_get_pcie_lanes(struct amdgpu_device *adev)
{
u32 link_width_cntl;
@@ -1323,6 +1329,52 @@ static void si_set_pcie_lanes(struct amdgpu_device *adev, int lanes)
WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
}
+static void si_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
+ uint64_t *count1)
+{
+ uint32_t perfctr = 0;
+ uint64_t cnt0_of, cnt1_of;
+ int tmp;
+
+ /* This reports 0 on APUs, so return to avoid writing/reading registers
+ * that may or may not be different from their GPU counterparts
+ */
+ if (adev->flags & AMD_IS_APU)
+ return;
+
+ /* Set the 2 events that we wish to watch, defined above */
+ /* Reg 40 is # received msgs, Reg 104 is # of posted requests sent */
+ perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40);
+ perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104);
+
+ /* Write to enable desired perf counters */
+ WREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK, perfctr);
+ /* Zero out and enable the perf counters
+ * Write 0x5:
+ * Bit 0 = Start all counters(1)
+ * Bit 2 = Global counter reset enable(1)
+ */
+ WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000005);
+
+ msleep(1000);
+
+ /* Load the shadow and disable the perf counters
+ * Write 0x2:
+ * Bit 0 = Stop counters(0)
+ * Bit 1 = Load the shadow counters(1)
+ */
+ WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000002);
+
+ /* Read register values to get any >32bit overflow */
+ tmp = RREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK);
+ cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER);
+ cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER);
+
+ /* Get the values and add the overflow */
+ *count0 = RREG32_PCIE(ixPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32);
+ *count1 = RREG32_PCIE(ixPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
+}
+
static const struct amdgpu_asic_funcs si_asic_funcs =
{
.read_disabled_bios = &si_read_disabled_bios,
@@ -1339,6 +1391,8 @@ static const struct amdgpu_asic_funcs si_asic_funcs =
.flush_hdp = &si_flush_hdp,
.invalidate_hdp = &si_invalidate_hdp,
.need_full_reset = &si_need_full_reset,
+ .get_pcie_usage = &si_get_pcie_usage,
+ .need_reset_on_init = &si_need_reset_on_init,
};
static uint32_t si_get_rev_id(struct amdgpu_device *adev)
@@ -1382,7 +1436,7 @@ static int si_common_early_init(void *handle)
AMD_CG_SUPPORT_UVD_MGCG |
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_HDP_MGCG;
- adev->pg_flags = 0;
+ adev->pg_flags = 0;
adev->external_rev_id = (adev->rev_id == 0) ? 1 :
(adev->rev_id == 1) ? 5 : 6;
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index adbaea6da0d7..f15f196684ba 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -61,9 +61,11 @@ static void si_dma_ring_set_wptr(struct amdgpu_ring *ring)
}
static void si_dma_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ uint32_t flags)
{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
* Pad as necessary with NOPs.
*/
@@ -122,7 +124,7 @@ static void si_dma_stop(struct amdgpu_device *adev)
if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_buffer_funcs_status(adev, false);
- ring->ready = false;
+ ring->sched.ready = false;
}
}
@@ -175,13 +177,11 @@ static int si_dma_start(struct amdgpu_device *adev)
WREG32(DMA_RB_WPTR + sdma_offsets[i], lower_32_bits(ring->wptr) << 2);
WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_RB_ENABLE);
- ring->ready = true;
+ ring->sched.ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
return r;
- }
if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_buffer_funcs_status(adev, true);
@@ -209,21 +209,16 @@ static int si_dma_ring_test_ring(struct amdgpu_ring *ring)
u64 gpu_addr;
r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+ if (r)
return r;
- }
gpu_addr = adev->wb.gpu_addr + (index * 4);
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
r = amdgpu_ring_alloc(ring, 4);
- if (r) {
- DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- amdgpu_device_wb_free(adev, index);
- return r;
- }
+ if (r)
+ goto error_free_wb;
amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1));
amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
@@ -238,15 +233,11 @@ static int si_dma_ring_test_ring(struct amdgpu_ring *ring)
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
- ring->idx, tmp);
- r = -EINVAL;
- }
- amdgpu_device_wb_free(adev, index);
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+error_free_wb:
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -269,20 +260,16 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+ if (r)
return r;
- }
gpu_addr = adev->wb.gpu_addr + (index * 4);
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 256, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ if (r)
goto err0;
- }
ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1);
ib.ptr[1] = lower_32_bits(gpu_addr);
@@ -295,21 +282,16 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
goto err1;
} else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err1;
}
tmp = le32_to_cpu(adev->wb.wb[index]);
- if (tmp == 0xDEADBEEF) {
- DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+ if (tmp == 0xDEADBEEF)
r = 0;
- } else {
- DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
+ else
r = -EINVAL;
- }
err1:
amdgpu_ib_free(adev, &ib, NULL);
@@ -658,15 +640,6 @@ static int si_dma_process_trap_irq(struct amdgpu_device *adev,
return 0;
}
-static int si_dma_process_illegal_inst_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
-{
- DRM_ERROR("Illegal instruction in SDMA command stream\n");
- schedule_work(&adev->reset_work);
- return 0;
-}
-
static int si_dma_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
@@ -781,15 +754,10 @@ static const struct amdgpu_irq_src_funcs si_dma_trap_irq_funcs = {
.process = si_dma_process_trap_irq,
};
-static const struct amdgpu_irq_src_funcs si_dma_illegal_inst_irq_funcs = {
- .process = si_dma_process_illegal_inst_irq,
-};
-
static void si_dma_set_irq_funcs(struct amdgpu_device *adev)
{
adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
adev->sdma.trap_irq.funcs = &si_dma_trap_irq_funcs;
- adev->sdma.illegal_inst_irq.funcs = &si_dma_illegal_inst_irq_funcs;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index da58040fdbdc..41e01a7f57a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -6216,10 +6216,12 @@ static void si_request_link_speed_change_before_state_change(struct amdgpu_devic
si_pi->force_pcie_gen = AMDGPU_PCIE_GEN2;
if (current_link_speed == AMDGPU_PCIE_GEN2)
break;
+ /* fall through */
case AMDGPU_PCIE_GEN2:
if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
break;
#endif
+ /* fall through */
default:
si_pi->force_pcie_gen = si_get_current_pcie_speed(adev);
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
index b3d7d9f83202..8c50c9cab455 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -57,9 +57,9 @@ static void si_ih_disable_interrupts(struct amdgpu_device *adev)
static int si_ih_irq_init(struct amdgpu_device *adev)
{
+ struct amdgpu_ih_ring *ih = &adev->irq.ih;
int rb_bufsz;
u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
- u64 wptr_off;
si_ih_disable_interrupts(adev);
WREG32(INTERRUPT_CNTL2, adev->irq.ih.gpu_addr >> 8);
@@ -76,9 +76,8 @@ static int si_ih_irq_init(struct amdgpu_device *adev)
(rb_bufsz << 1) |
IH_WPTR_WRITEBACK_ENABLE;
- wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4);
- WREG32(IH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off));
- WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF);
+ WREG32(IH_RB_WPTR_ADDR_LO, lower_32_bits(ih->wptr_addr));
+ WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(ih->wptr_addr) & 0xFF);
WREG32(IH_RB_CNTL, ih_rb_cntl);
WREG32(IH_RB_RPTR, 0);
WREG32(IH_RB_WPTR, 0);
@@ -100,47 +99,36 @@ static void si_ih_irq_disable(struct amdgpu_device *adev)
mdelay(1);
}
-static u32 si_ih_get_wptr(struct amdgpu_device *adev)
+static u32 si_ih_get_wptr(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
{
u32 wptr, tmp;
- wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]);
+ wptr = le32_to_cpu(*ih->wptr_cpu);
if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) {
wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK;
dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
- wptr, adev->irq.ih.rptr, (wptr + 16) & adev->irq.ih.ptr_mask);
- adev->irq.ih.rptr = (wptr + 16) & adev->irq.ih.ptr_mask;
+ wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
+ ih->rptr = (wptr + 16) & ih->ptr_mask;
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
WREG32(IH_RB_CNTL, tmp);
}
- return (wptr & adev->irq.ih.ptr_mask);
-}
-
-/**
- * si_ih_prescreen_iv - prescreen an interrupt vector
- *
- * @adev: amdgpu_device pointer
- *
- * Returns true if the interrupt vector should be further processed.
- */
-static bool si_ih_prescreen_iv(struct amdgpu_device *adev)
-{
- /* Process all interrupts */
- return true;
+ return (wptr & ih->ptr_mask);
}
static void si_ih_decode_iv(struct amdgpu_device *adev,
- struct amdgpu_iv_entry *entry)
+ struct amdgpu_ih_ring *ih,
+ struct amdgpu_iv_entry *entry)
{
- u32 ring_index = adev->irq.ih.rptr >> 2;
+ u32 ring_index = ih->rptr >> 2;
uint32_t dw[4];
- dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
- dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]);
- dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
- dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
+ dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
+ dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
+ dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
+ dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
entry->src_id = dw[0] & 0xff;
@@ -148,12 +136,13 @@ static void si_ih_decode_iv(struct amdgpu_device *adev,
entry->ring_id = dw[2] & 0xff;
entry->vmid = (dw[2] >> 8) & 0xff;
- adev->irq.ih.rptr += 16;
+ ih->rptr += 16;
}
-static void si_ih_set_rptr(struct amdgpu_device *adev)
+static void si_ih_set_rptr(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
{
- WREG32(IH_RB_RPTR, adev->irq.ih.rptr);
+ WREG32(IH_RB_RPTR, ih->rptr);
}
static int si_ih_early_init(void *handle)
@@ -301,7 +290,6 @@ static const struct amd_ip_funcs si_ih_ip_funcs = {
static const struct amdgpu_ih_funcs si_ih_funcs = {
.get_wptr = si_ih_get_wptr,
- .prescreen_iv = si_ih_prescreen_iv,
.decode_iv = si_ih_decode_iv,
.set_rptr = si_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 4cc0dcb1a187..ed89a101f73f 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -43,6 +43,10 @@
#include "hdp/hdp_4_0_sh_mask.h"
#include "smuio/smuio_9_0_offset.h"
#include "smuio/smuio_9_0_sh_mask.h"
+#include "nbio/nbio_7_0_default.h"
+#include "nbio/nbio_7_0_sh_mask.h"
+#include "nbio/nbio_7_0_smn.h"
+#include "mp/mp_9_0_offset.h"
#include "soc15.h"
#include "soc15_common.h"
@@ -385,14 +389,13 @@ void soc15_program_register_sequence(struct amdgpu_device *adev,
}
-
-static int soc15_asic_reset(struct amdgpu_device *adev)
+static int soc15_asic_mode1_reset(struct amdgpu_device *adev)
{
u32 i;
amdgpu_atombios_scratch_regs_engine_hung(adev, true);
- dev_info(adev->dev, "GPU reset\n");
+ dev_info(adev->dev, "GPU mode1 reset\n");
/* disable BM */
pci_clear_master(adev->pdev);
@@ -417,6 +420,62 @@ static int soc15_asic_reset(struct amdgpu_device *adev)
return 0;
}
+static int soc15_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap)
+{
+ void *pp_handle = adev->powerplay.pp_handle;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+ if (!pp_funcs || !pp_funcs->get_asic_baco_capability) {
+ *cap = false;
+ return -ENOENT;
+ }
+
+ return pp_funcs->get_asic_baco_capability(pp_handle, cap);
+}
+
+static int soc15_asic_baco_reset(struct amdgpu_device *adev)
+{
+ void *pp_handle = adev->powerplay.pp_handle;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+ if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state)
+ return -ENOENT;
+
+ /* enter BACO state */
+ if (pp_funcs->set_asic_baco_state(pp_handle, 1))
+ return -EIO;
+
+ /* exit BACO state */
+ if (pp_funcs->set_asic_baco_state(pp_handle, 0))
+ return -EIO;
+
+ dev_info(adev->dev, "GPU BACO reset\n");
+
+ return 0;
+}
+
+static int soc15_asic_reset(struct amdgpu_device *adev)
+{
+ int ret;
+ bool baco_reset;
+
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ soc15_asic_get_baco_capability(adev, &baco_reset);
+ break;
+ default:
+ baco_reset = false;
+ break;
+ }
+
+ if (baco_reset)
+ ret = soc15_asic_baco_reset(adev);
+ else
+ ret = soc15_asic_mode1_reset(adev);
+
+ return ret;
+}
+
/*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
u32 cntl_reg, u32 status_reg)
{
@@ -507,6 +566,9 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
return -EINVAL;
}
+ if (adev->asic_type == CHIP_VEGA20)
+ adev->gmc.xgmi.supported = true;
+
if (adev->flags & AMD_IS_APU)
adev->nbio_funcs = &nbio_v7_0_funcs;
else if (adev->asic_type == CHIP_VEGA20)
@@ -532,10 +594,12 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
- if (adev->asic_type == CHIP_VEGA20)
- amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
- else
- amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
+ if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
+ if (adev->asic_type == CHIP_VEGA20)
+ amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
+ else
+ amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
+ }
amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
if (!amdgpu_sriov_vf(adev))
@@ -557,7 +621,8 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
- amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
+ if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
+ amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
@@ -598,6 +663,68 @@ static bool soc15_need_full_reset(struct amdgpu_device *adev)
/* change this when we implement soft reset */
return true;
}
+static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
+ uint64_t *count1)
+{
+ uint32_t perfctr = 0;
+ uint64_t cnt0_of, cnt1_of;
+ int tmp;
+
+ /* This reports 0 on APUs, so return to avoid writing/reading registers
+ * that may or may not be different from their GPU counterparts
+ */
+ if (adev->flags & AMD_IS_APU)
+ return;
+
+ /* Set the 2 events that we wish to watch, defined above */
+ /* Reg 40 is # received msgs, Reg 104 is # of posted requests sent */
+ perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40);
+ perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104);
+
+ /* Write to enable desired perf counters */
+ WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK, perfctr);
+ /* Zero out and enable the perf counters
+ * Write 0x5:
+ * Bit 0 = Start all counters(1)
+ * Bit 2 = Global counter reset enable(1)
+ */
+ WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005);
+
+ msleep(1000);
+
+ /* Load the shadow and disable the perf counters
+ * Write 0x2:
+ * Bit 0 = Stop counters(0)
+ * Bit 1 = Load the shadow counters(1)
+ */
+ WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002);
+
+ /* Read register values to get any >32bit overflow */
+ tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK);
+ cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER);
+ cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER);
+
+ /* Get the values and add the overflow */
+ *count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32);
+ *count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
+}
+
+static bool soc15_need_reset_on_init(struct amdgpu_device *adev)
+{
+ u32 sol_reg;
+
+ if (adev->flags & AMD_IS_APU)
+ return false;
+
+ /* Check sOS sign of life register to confirm sys driver and sOS
+ * are already been loaded.
+ */
+ sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
+ if (sol_reg)
+ return true;
+
+ return false;
+}
static const struct amdgpu_asic_funcs soc15_asic_funcs =
{
@@ -613,6 +740,28 @@ static const struct amdgpu_asic_funcs soc15_asic_funcs =
.flush_hdp = &soc15_flush_hdp,
.invalidate_hdp = &soc15_invalidate_hdp,
.need_full_reset = &soc15_need_full_reset,
+ .init_doorbell_index = &vega10_doorbell_index_init,
+ .get_pcie_usage = &soc15_get_pcie_usage,
+ .need_reset_on_init = &soc15_need_reset_on_init,
+};
+
+static const struct amdgpu_asic_funcs vega20_asic_funcs =
+{
+ .read_disabled_bios = &soc15_read_disabled_bios,
+ .read_bios_from_rom = &soc15_read_bios_from_rom,
+ .read_register = &soc15_read_register,
+ .reset = &soc15_asic_reset,
+ .set_vga_state = &soc15_vga_set_state,
+ .get_xclk = &soc15_get_xclk,
+ .set_uvd_clocks = &soc15_set_uvd_clocks,
+ .set_vce_clocks = &soc15_set_vce_clocks,
+ .get_config_memsize = &soc15_get_config_memsize,
+ .flush_hdp = &soc15_flush_hdp,
+ .invalidate_hdp = &soc15_invalidate_hdp,
+ .need_full_reset = &soc15_need_full_reset,
+ .init_doorbell_index = &vega20_doorbell_index_init,
+ .get_pcie_usage = &soc15_get_pcie_usage,
+ .need_reset_on_init = &soc15_need_reset_on_init,
};
static int soc15_common_early_init(void *handle)
@@ -632,11 +781,11 @@ static int soc15_common_early_init(void *handle)
adev->se_cac_rreg = &soc15_se_cac_rreg;
adev->se_cac_wreg = &soc15_se_cac_wreg;
- adev->asic_funcs = &soc15_asic_funcs;
adev->external_rev_id = 0xFF;
switch (adev->asic_type) {
case CHIP_VEGA10:
+ adev->asic_funcs = &soc15_asic_funcs;
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_RLC_LS |
@@ -660,6 +809,7 @@ static int soc15_common_early_init(void *handle)
adev->external_rev_id = 0x1;
break;
case CHIP_VEGA12:
+ adev->asic_funcs = &soc15_asic_funcs;
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_CGCG |
@@ -682,6 +832,7 @@ static int soc15_common_early_init(void *handle)
adev->external_rev_id = adev->rev_id + 0x14;
break;
case CHIP_VEGA20:
+ adev->asic_funcs = &vega20_asic_funcs;
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_CGCG |
@@ -704,12 +855,15 @@ static int soc15_common_early_init(void *handle)
adev->external_rev_id = adev->rev_id + 0x28;
break;
case CHIP_RAVEN:
+ adev->asic_funcs = &soc15_asic_funcs;
if (adev->rev_id >= 0x8)
- adev->external_rev_id = adev->rev_id + 0x81;
+ adev->external_rev_id = adev->rev_id + 0x79;
else if (adev->pdev->device == 0x15d8)
adev->external_rev_id = adev->rev_id + 0x41;
+ else if (adev->rev_id == 1)
+ adev->external_rev_id = adev->rev_id + 0x20;
else
- adev->external_rev_id = 0x1;
+ adev->external_rev_id = adev->rev_id + 0x01;
if (adev->rev_id >= 0x8) {
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
@@ -816,6 +970,22 @@ static int soc15_common_sw_fini(void *handle)
return 0;
}
+static void soc15_doorbell_range_init(struct amdgpu_device *adev)
+{
+ int i;
+ struct amdgpu_ring *ring;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ ring = &adev->sdma.instance[i].ring;
+ adev->nbio_funcs->sdma_doorbell_range(adev, i,
+ ring->use_doorbell, ring->doorbell_index,
+ adev->doorbell_index.sdma_doorbell_range);
+ }
+
+ adev->nbio_funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
+ adev->irq.ih.doorbell_index);
+}
+
static int soc15_common_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -828,6 +998,12 @@ static int soc15_common_hw_init(void *handle)
adev->nbio_funcs->init_registers(adev);
/* enable the doorbell aperture */
soc15_enable_doorbell_aperture(adev, true);
+ /* HW doorbell routing policy: doorbell writing not
+ * in SDMA/IH/MM/ACV range will be routed to CP. So
+ * we need to init SDMA/IH/MM/ACV doorbell range prior
+ * to CP ip block init and ring test.
+ */
+ soc15_doorbell_range_init(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h
index f8ad7804dc40..a66c8bfbbaa6 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.h
@@ -58,4 +58,6 @@ void soc15_program_register_sequence(struct amdgpu_device *adev,
int vega10_reg_base_init(struct amdgpu_device *adev);
int vega20_reg_base_init(struct amdgpu_device *adev);
+void vega10_doorbell_index_init(struct amdgpu_device *adev);
+void vega20_doorbell_index_init(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
index 958b10a57073..49c262540940 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
@@ -49,14 +49,19 @@
#define SOC15_WAIT_ON_RREG(ip, inst, reg, expected_value, mask, ret) \
do { \
+ uint32_t old_ = 0; \
uint32_t tmp_ = RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \
uint32_t loop = adev->usec_timeout; \
while ((tmp_ & (mask)) != (expected_value)) { \
- udelay(2); \
+ if (old_ != tmp_) { \
+ loop = adev->usec_timeout; \
+ old_ = tmp_; \
+ } else \
+ udelay(1); \
tmp_ = RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \
loop--; \
if (!loop) { \
- DRM_ERROR("Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n", \
+ DRM_WARN("Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n", \
inst, #reg, (unsigned)expected_value, (unsigned)(tmp_ & (mask))); \
ret = -ETIMEDOUT; \
break; \
diff --git a/drivers/gpu/drm/amd/amdgpu/ta_xgmi_if.h b/drivers/gpu/drm/amd/amdgpu/ta_xgmi_if.h
new file mode 100644
index 000000000000..ac2c27b7630c
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/ta_xgmi_if.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _TA_XGMI_IF_H
+#define _TA_XGMI_IF_H
+
+/* Responses have bit 31 set */
+#define RSP_ID_MASK (1U << 31)
+#define RSP_ID(cmdId) (((uint32_t)(cmdId)) | RSP_ID_MASK)
+
+enum ta_command_xgmi {
+ TA_COMMAND_XGMI__INITIALIZE = 0x00,
+ TA_COMMAND_XGMI__GET_NODE_ID = 0x01,
+ TA_COMMAND_XGMI__GET_HIVE_ID = 0x02,
+ TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO = 0x03,
+ TA_COMMAND_XGMI__SET_TOPOLOGY_INFO = 0x04
+};
+
+/* XGMI related enumerations */
+/**********************************************************/;
+enum ta_xgmi_connected_nodes {
+ TA_XGMI__MAX_CONNECTED_NODES = 64
+};
+
+enum ta_xgmi_status {
+ TA_XGMI_STATUS__SUCCESS = 0x00,
+ TA_XGMI_STATUS__GENERIC_FAILURE = 0x01,
+ TA_XGMI_STATUS__NULL_POINTER = 0x02,
+ TA_XGMI_STATUS__INVALID_PARAMETER = 0x03,
+ TA_XGMI_STATUS__NOT_INITIALIZED = 0x04,
+ TA_XGMI_STATUS__INVALID_NODE_NUM = 0x05,
+ TA_XGMI_STATUS__INVALID_NODE_ID = 0x06,
+ TA_XGMI_STATUS__INVALID_TOPOLOGY = 0x07,
+ TA_XGMI_STATUS__FAILED_ID_GEN = 0x08,
+ TA_XGMI_STATUS__FAILED_TOPOLOGY_INIT = 0x09,
+ TA_XGMI_STATUS__SET_SHARING_ERROR = 0x0A
+};
+
+enum ta_xgmi_assigned_sdma_engine {
+ TA_XGMI_ASSIGNED_SDMA_ENGINE__NOT_ASSIGNED = -1,
+ TA_XGMI_ASSIGNED_SDMA_ENGINE__SDMA0 = 0,
+ TA_XGMI_ASSIGNED_SDMA_ENGINE__SDMA1 = 1,
+ TA_XGMI_ASSIGNED_SDMA_ENGINE__SDMA2 = 2,
+ TA_XGMI_ASSIGNED_SDMA_ENGINE__SDMA3 = 3,
+ TA_XGMI_ASSIGNED_SDMA_ENGINE__SDMA4 = 4,
+ TA_XGMI_ASSIGNED_SDMA_ENGINE__SDMA5 = 5
+};
+
+/* input/output structures for XGMI commands */
+/**********************************************************/
+struct ta_xgmi_node_info {
+ uint64_t node_id;
+ uint8_t num_hops;
+ uint8_t is_sharing_enabled;
+ enum ta_xgmi_assigned_sdma_engine sdma_engine;
+};
+
+struct ta_xgmi_cmd_initialize_output {
+ uint32_t status;
+};
+
+struct ta_xgmi_cmd_get_node_id_output {
+ uint64_t node_id;
+};
+
+struct ta_xgmi_cmd_get_hive_id_output {
+ uint64_t hive_id;
+};
+
+struct ta_xgmi_cmd_get_topology_info_input {
+ uint32_t num_nodes;
+ struct ta_xgmi_node_info nodes[TA_XGMI__MAX_CONNECTED_NODES];
+};
+
+struct ta_xgmi_cmd_get_topology_info_output {
+ uint32_t num_nodes;
+ struct ta_xgmi_node_info nodes[TA_XGMI__MAX_CONNECTED_NODES];
+};
+
+struct ta_xgmi_cmd_set_topology_info_input {
+ uint32_t num_nodes;
+ struct ta_xgmi_node_info nodes[TA_XGMI__MAX_CONNECTED_NODES];
+};
+
+/**********************************************************/
+/* Common input structure for XGMI callbacks */
+union ta_xgmi_cmd_input {
+ struct ta_xgmi_cmd_get_topology_info_input get_topology_info;
+ struct ta_xgmi_cmd_set_topology_info_input set_topology_info;
+};
+
+/* Common output structure for XGMI callbacks */
+union ta_xgmi_cmd_output {
+ struct ta_xgmi_cmd_initialize_output initialize;
+ struct ta_xgmi_cmd_get_node_id_output get_node_id;
+ struct ta_xgmi_cmd_get_hive_id_output get_hive_id;
+ struct ta_xgmi_cmd_get_topology_info_output get_topology_info;
+};
+/**********************************************************/
+
+struct ta_xgmi_shared_memory {
+ uint32_t cmd_id;
+ uint32_t resp_id;
+ enum ta_xgmi_status xgmi_status;
+ uint32_t reserved;
+ union ta_xgmi_cmd_input xgmi_in_message;
+ union ta_xgmi_cmd_output xgmi_out_message;
+};
+
+#endif //_TA_XGMI_IF_H
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index 3abffd06b5c7..a20b711a6756 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -99,9 +99,9 @@ static void tonga_ih_disable_interrupts(struct amdgpu_device *adev)
*/
static int tonga_ih_irq_init(struct amdgpu_device *adev)
{
- int rb_bufsz;
u32 interrupt_cntl, ih_rb_cntl, ih_doorbell_rtpr;
- u64 wptr_off;
+ struct amdgpu_ih_ring *ih = &adev->irq.ih;
+ int rb_bufsz;
/* disable irqs */
tonga_ih_disable_interrupts(adev);
@@ -118,10 +118,7 @@ static int tonga_ih_irq_init(struct amdgpu_device *adev)
WREG32(mmINTERRUPT_CNTL, interrupt_cntl);
/* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
- if (adev->irq.ih.use_bus_addr)
- WREG32(mmIH_RB_BASE, adev->irq.ih.rb_dma_addr >> 8);
- else
- WREG32(mmIH_RB_BASE, adev->irq.ih.gpu_addr >> 8);
+ WREG32(mmIH_RB_BASE, ih->gpu_addr >> 8);
rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4);
ih_rb_cntl = REG_SET_FIELD(0, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
@@ -136,12 +133,8 @@ static int tonga_ih_irq_init(struct amdgpu_device *adev)
WREG32(mmIH_RB_CNTL, ih_rb_cntl);
/* set the writeback address whether it's enabled or not */
- if (adev->irq.ih.use_bus_addr)
- wptr_off = adev->irq.ih.rb_dma_addr + (adev->irq.ih.wptr_offs * 4);
- else
- wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4);
- WREG32(mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off));
- WREG32(mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF);
+ WREG32(mmIH_RB_WPTR_ADDR_LO, lower_32_bits(ih->wptr_addr));
+ WREG32(mmIH_RB_WPTR_ADDR_HI, upper_32_bits(ih->wptr_addr) & 0xFF);
/* set rptr, wptr to 0 */
WREG32(mmIH_RB_RPTR, 0);
@@ -193,14 +186,12 @@ static void tonga_ih_irq_disable(struct amdgpu_device *adev)
* Used by cz_irq_process(VI).
* Returns the value of the wptr.
*/
-static u32 tonga_ih_get_wptr(struct amdgpu_device *adev)
+static u32 tonga_ih_get_wptr(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
{
u32 wptr, tmp;
- if (adev->irq.ih.use_bus_addr)
- wptr = le32_to_cpu(adev->irq.ih.ring[adev->irq.ih.wptr_offs]);
- else
- wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]);
+ wptr = le32_to_cpu(*ih->wptr_cpu);
if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) {
wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
@@ -209,41 +200,13 @@ static u32 tonga_ih_get_wptr(struct amdgpu_device *adev)
* this should allow us to catchup.
*/
dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
- wptr, adev->irq.ih.rptr, (wptr + 16) & adev->irq.ih.ptr_mask);
- adev->irq.ih.rptr = (wptr + 16) & adev->irq.ih.ptr_mask;
+ wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
+ ih->rptr = (wptr + 16) & ih->ptr_mask;
tmp = RREG32(mmIH_RB_CNTL);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32(mmIH_RB_CNTL, tmp);
}
- return (wptr & adev->irq.ih.ptr_mask);
-}
-
-/**
- * tonga_ih_prescreen_iv - prescreen an interrupt vector
- *
- * @adev: amdgpu_device pointer
- *
- * Returns true if the interrupt vector should be further processed.
- */
-static bool tonga_ih_prescreen_iv(struct amdgpu_device *adev)
-{
- u32 ring_index = adev->irq.ih.rptr >> 2;
- u16 pasid;
-
- switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
- case 146:
- case 147:
- pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
- if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
- return true;
- break;
- default:
- /* Not a VM fault */
- return true;
- }
-
- adev->irq.ih.rptr += 16;
- return false;
+ return (wptr & ih->ptr_mask);
}
/**
@@ -255,16 +218,17 @@ static bool tonga_ih_prescreen_iv(struct amdgpu_device *adev)
* position and also advance the position.
*/
static void tonga_ih_decode_iv(struct amdgpu_device *adev,
- struct amdgpu_iv_entry *entry)
+ struct amdgpu_ih_ring *ih,
+ struct amdgpu_iv_entry *entry)
{
/* wptr/rptr are in bytes! */
- u32 ring_index = adev->irq.ih.rptr >> 2;
+ u32 ring_index = ih->rptr >> 2;
uint32_t dw[4];
- dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
- dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]);
- dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
- dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
+ dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
+ dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
+ dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
+ dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
entry->src_id = dw[0] & 0xff;
@@ -274,7 +238,7 @@ static void tonga_ih_decode_iv(struct amdgpu_device *adev,
entry->pasid = (dw[2] >> 16) & 0xffff;
/* wptr/rptr are in bytes! */
- adev->irq.ih.rptr += 16;
+ ih->rptr += 16;
}
/**
@@ -284,17 +248,15 @@ static void tonga_ih_decode_iv(struct amdgpu_device *adev,
*
* Set the IH ring buffer rptr.
*/
-static void tonga_ih_set_rptr(struct amdgpu_device *adev)
+static void tonga_ih_set_rptr(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
{
- if (adev->irq.ih.use_doorbell) {
+ if (ih->use_doorbell) {
/* XXX check if swapping is necessary on BE */
- if (adev->irq.ih.use_bus_addr)
- adev->irq.ih.ring[adev->irq.ih.rptr_offs] = adev->irq.ih.rptr;
- else
- adev->wb.wb[adev->irq.ih.rptr_offs] = adev->irq.ih.rptr;
- WDOORBELL32(adev->irq.ih.doorbell_index, adev->irq.ih.rptr);
+ *ih->rptr_cpu = ih->rptr;
+ WDOORBELL32(ih->doorbell_index, ih->rptr);
} else {
- WREG32(mmIH_RB_RPTR, adev->irq.ih.rptr);
+ WREG32(mmIH_RB_RPTR, ih->rptr);
}
}
@@ -322,7 +284,7 @@ static int tonga_ih_sw_init(void *handle)
return r;
adev->irq.ih.use_doorbell = true;
- adev->irq.ih.doorbell_index = AMDGPU_DOORBELL_IH;
+ adev->irq.ih.doorbell_index = adev->doorbell_index.ih;
r = amdgpu_irq_init(adev);
@@ -506,7 +468,6 @@ static const struct amd_ip_funcs tonga_ih_ip_funcs = {
static const struct amdgpu_ih_funcs tonga_ih_funcs = {
.get_wptr = tonga_ih_get_wptr,
- .prescreen_iv = tonga_ih_prescreen_iv,
.decode_iv = tonga_ih_decode_iv,
.set_rptr = tonga_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 1fc17bf39fed..c4fb58667fd4 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -116,16 +116,16 @@ static int uvd_v4_2_sw_init(void *handle)
if (r)
return r;
- r = amdgpu_uvd_resume(adev);
- if (r)
- return r;
-
ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
if (r)
return r;
+ r = amdgpu_uvd_resume(adev);
+ if (r)
+ return r;
+
r = amdgpu_uvd_entity_init(adev);
return r;
@@ -162,12 +162,9 @@ static int uvd_v4_2_hw_init(void *handle)
uvd_v4_2_enable_mgcg(adev, true);
amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
- ring->ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
r = amdgpu_ring_alloc(ring, 10);
if (r) {
@@ -218,7 +215,7 @@ static int uvd_v4_2_hw_fini(void *handle)
if (RREG32(mmUVD_STATUS) != 0)
uvd_v4_2_stop(adev);
- ring->ready = false;
+ ring->sched.ready = false;
return 0;
}
@@ -484,11 +481,9 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring)
WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 3);
- if (r) {
- DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
- ring->idx, r);
+ if (r)
return r;
- }
+
amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
amdgpu_ring_write(ring, 0xDEADBEEF);
amdgpu_ring_commit(ring);
@@ -499,14 +494,9 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring)
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
- ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
- ring->idx, tmp);
- r = -EINVAL;
- }
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+
return r;
}
@@ -519,8 +509,9 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring)
* Write ring commands to execute the indirect buffer
*/
static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ uint32_t flags)
{
amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0));
amdgpu_ring_write(ring, ib->gpu_addr);
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index fde6ad5ac9ab..52bd8a654734 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -113,16 +113,16 @@ static int uvd_v5_0_sw_init(void *handle)
if (r)
return r;
- r = amdgpu_uvd_resume(adev);
- if (r)
- return r;
-
ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
if (r)
return r;
+ r = amdgpu_uvd_resume(adev);
+ if (r)
+ return r;
+
r = amdgpu_uvd_entity_init(adev);
return r;
@@ -158,12 +158,9 @@ static int uvd_v5_0_hw_init(void *handle)
uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
uvd_v5_0_enable_mgcg(adev, true);
- ring->ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
r = amdgpu_ring_alloc(ring, 10);
if (r) {
@@ -215,7 +212,7 @@ static int uvd_v5_0_hw_fini(void *handle)
if (RREG32(mmUVD_STATUS) != 0)
uvd_v5_0_stop(adev);
- ring->ready = false;
+ ring->sched.ready = false;
return 0;
}
@@ -500,11 +497,8 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 3);
- if (r) {
- DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
- ring->idx, r);
+ if (r)
return r;
- }
amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
amdgpu_ring_write(ring, 0xDEADBEEF);
amdgpu_ring_commit(ring);
@@ -515,14 +509,9 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
- ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
- ring->idx, tmp);
- r = -EINVAL;
- }
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+
return r;
}
@@ -535,8 +524,9 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
* Write ring commands to execute the indirect buffer
*/
static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ uint32_t flags)
{
amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 7a5b40275e8e..c9edddf9f88a 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -175,11 +175,8 @@ static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
int r;
r = amdgpu_ring_alloc(ring, 16);
- if (r) {
- DRM_ERROR("amdgpu: uvd enc failed to lock ring %d (%d).\n",
- ring->idx, r);
+ if (r)
return r;
- }
amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
amdgpu_ring_commit(ring);
@@ -189,14 +186,8 @@ static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
- ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed\n",
- ring->idx);
+ if (i >= adev->usec_timeout)
r = -ETIMEDOUT;
- }
return r;
}
@@ -336,31 +327,24 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
r = uvd_v6_0_enc_get_create_msg(ring, 1, NULL);
- if (r) {
- DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
+ if (r)
goto error;
- }
r = uvd_v6_0_enc_get_destroy_msg(ring, 1, &fence);
- if (r) {
- DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
+ if (r)
goto error;
- }
r = dma_fence_wait_timeout(fence, false, timeout);
- if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out.\n");
+ if (r == 0)
r = -ETIMEDOUT;
- } else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
- } else {
- DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+ else if (r > 0)
r = 0;
- }
+
error:
dma_fence_put(fence);
return r;
}
+
static int uvd_v6_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -416,16 +400,16 @@ static int uvd_v6_0_sw_init(void *handle)
DRM_INFO("UVD ENC is disabled\n");
}
- r = amdgpu_uvd_resume(adev);
- if (r)
- return r;
-
ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
if (r)
return r;
+ r = amdgpu_uvd_resume(adev);
+ if (r)
+ return r;
+
if (uvd_v6_0_enc_support(adev)) {
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
ring = &adev->uvd.inst->ring_enc[i];
@@ -476,12 +460,9 @@ static int uvd_v6_0_hw_init(void *handle)
uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
uvd_v6_0_enable_mgcg(adev, true);
- ring->ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
r = amdgpu_ring_alloc(ring, 10);
if (r) {
@@ -513,12 +494,9 @@ static int uvd_v6_0_hw_init(void *handle)
if (uvd_v6_0_enc_support(adev)) {
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
ring = &adev->uvd.inst->ring_enc[i];
- ring->ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
}
}
@@ -548,7 +526,7 @@ static int uvd_v6_0_hw_fini(void *handle)
if (RREG32(mmUVD_STATUS) != 0)
uvd_v6_0_stop(adev);
- ring->ready = false;
+ ring->sched.ready = false;
return 0;
}
@@ -969,11 +947,9 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 3);
- if (r) {
- DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
- ring->idx, r);
+ if (r)
return r;
- }
+
amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
amdgpu_ring_write(ring, 0xDEADBEEF);
amdgpu_ring_commit(ring);
@@ -984,14 +960,9 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
- ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
- ring->idx, tmp);
- r = -EINVAL;
- }
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+
return r;
}
@@ -1004,9 +975,12 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
* Write ring commands to execute the indirect buffer
*/
static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ uint32_t flags)
{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0));
amdgpu_ring_write(ring, vmid);
@@ -1027,8 +1001,12 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
* Write enc ring commands to execute the indirect buffer
*/
static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib,
+ uint32_t flags)
{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 58b39afcfb86..dc461df48da0 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -183,11 +183,8 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
return 0;
r = amdgpu_ring_alloc(ring, 16);
- if (r) {
- DRM_ERROR("amdgpu: uvd enc failed to lock (%d)ring %d (%d).\n",
- ring->me, ring->idx, r);
+ if (r)
return r;
- }
amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
amdgpu_ring_commit(ring);
@@ -197,14 +194,8 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("(%d)ring test on %d succeeded in %d usecs\n",
- ring->me, ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: (%d)ring %d test failed\n",
- ring->me, ring->idx);
+ if (i >= adev->usec_timeout)
r = -ETIMEDOUT;
- }
return r;
}
@@ -343,27 +334,19 @@ static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
r = uvd_v7_0_enc_get_create_msg(ring, 1, NULL);
- if (r) {
- DRM_ERROR("amdgpu: (%d)failed to get create msg (%ld).\n", ring->me, r);
+ if (r)
goto error;
- }
r = uvd_v7_0_enc_get_destroy_msg(ring, 1, &fence);
- if (r) {
- DRM_ERROR("amdgpu: (%d)failed to get destroy ib (%ld).\n", ring->me, r);
+ if (r)
goto error;
- }
r = dma_fence_wait_timeout(fence, false, timeout);
- if (r == 0) {
- DRM_ERROR("amdgpu: (%d)IB test timed out.\n", ring->me);
+ if (r == 0)
r = -ETIMEDOUT;
- } else if (r < 0) {
- DRM_ERROR("amdgpu: (%d)fence wait failed (%ld).\n", ring->me, r);
- } else {
- DRM_DEBUG("ib test on (%d)ring %d succeeded\n", ring->me, ring->idx);
+ else if (r > 0)
r = 0;
- }
+
error:
dma_fence_put(fence);
return r;
@@ -447,16 +430,12 @@ static int uvd_v7_0_sw_init(void *handle)
DRM_INFO("PSP loading UVD firmware\n");
}
- r = amdgpu_uvd_resume(adev);
- if (r)
- return r;
-
for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
if (adev->uvd.harvest_config & (1 << j))
continue;
if (!amdgpu_sriov_vf(adev)) {
ring = &adev->uvd.inst[j].ring;
- sprintf(ring->name, "uvd<%d>", j);
+ sprintf(ring->name, "uvd_%d", ring->me);
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0);
if (r)
return r;
@@ -464,7 +443,7 @@ static int uvd_v7_0_sw_init(void *handle)
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
ring = &adev->uvd.inst[j].ring_enc[i];
- sprintf(ring->name, "uvd_enc%d<%d>", i, j);
+ sprintf(ring->name, "uvd_enc_%d.%d", ring->me, i);
if (amdgpu_sriov_vf(adev)) {
ring->use_doorbell = true;
@@ -472,9 +451,9 @@ static int uvd_v7_0_sw_init(void *handle)
* sriov, so set unused location for other unused rings.
*/
if (i == 0)
- ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING0_1 * 2;
+ ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring0_1 * 2;
else
- ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING2_3 * 2 + 1;
+ ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring2_3 * 2 + 1;
}
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0);
if (r)
@@ -482,6 +461,10 @@ static int uvd_v7_0_sw_init(void *handle)
}
}
+ r = amdgpu_uvd_resume(adev);
+ if (r)
+ return r;
+
r = amdgpu_uvd_entity_init(adev);
if (r)
return r;
@@ -540,12 +523,9 @@ static int uvd_v7_0_hw_init(void *handle)
ring = &adev->uvd.inst[j].ring;
if (!amdgpu_sriov_vf(adev)) {
- ring->ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
r = amdgpu_ring_alloc(ring, 10);
if (r) {
@@ -582,12 +562,9 @@ static int uvd_v7_0_hw_init(void *handle)
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
ring = &adev->uvd.inst[j].ring_enc[i];
- ring->ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
}
}
done:
@@ -619,7 +596,7 @@ static int uvd_v7_0_hw_fini(void *handle)
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
if (adev->uvd.harvest_config & (1 << i))
continue;
- adev->uvd.inst[i].ring.ready = false;
+ adev->uvd.inst[i].ring.sched.ready = false;
}
return 0;
@@ -1235,11 +1212,9 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
WREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID, 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 3);
- if (r) {
- DRM_ERROR("amdgpu: (%d)cp failed to lock ring %d (%d).\n",
- ring->me, ring->idx, r);
+ if (r)
return r;
- }
+
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
amdgpu_ring_write(ring, 0xDEADBEEF);
@@ -1251,14 +1226,9 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("(%d)ring test on %d succeeded in %d usecs\n",
- ring->me, ring->idx, i);
- } else {
- DRM_ERROR("(%d)amdgpu: ring %d test failed (0x%08X)\n",
- ring->me, ring->idx, tmp);
- r = -EINVAL;
- }
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+
return r;
}
@@ -1300,10 +1270,12 @@ static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
* Write ring commands to execute the indirect buffer
*/
static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ uint32_t flags)
{
struct amdgpu_device *adev = ring->adev;
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_VMID), 0));
@@ -1329,8 +1301,12 @@ static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
* Write enc ring commands to execute the indirect buffer
*/
static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib,
+ uint32_t flags)
{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index ea28828360d3..bed78a778e3f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -463,15 +463,11 @@ static int vce_v2_0_hw_init(void *handle)
amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
vce_v2_0_enable_mgcg(adev, true, false);
- for (i = 0; i < adev->vce.num_rings; i++)
- adev->vce.ring[i].ready = false;
for (i = 0; i < adev->vce.num_rings; i++) {
- r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
+ r = amdgpu_ring_test_helper(&adev->vce.ring[i]);
if (r)
return r;
- else
- adev->vce.ring[i].ready = true;
}
DRM_INFO("VCE initialized successfully.\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 6dbd39730070..6ec65cf11112 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -37,7 +37,6 @@
#include "gca/gfx_8_0_d.h"
#include "smu/smu_7_1_2_d.h"
#include "smu/smu_7_1_2_sh_mask.h"
-#include "gca/gfx_8_0_d.h"
#include "gca/gfx_8_0_sh_mask.h"
#include "ivsrcid/ivsrcid_vislands30.h"
@@ -474,15 +473,10 @@ static int vce_v3_0_hw_init(void *handle)
amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
- for (i = 0; i < adev->vce.num_rings; i++)
- adev->vce.ring[i].ready = false;
-
for (i = 0; i < adev->vce.num_rings; i++) {
- r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
+ r = amdgpu_ring_test_helper(&adev->vce.ring[i]);
if (r)
return r;
- else
- adev->vce.ring[i].ready = true;
}
DRM_INFO("VCE initialized successfully.\n");
@@ -838,8 +832,12 @@ out:
}
static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib,
+ uint32_t flags)
{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
amdgpu_ring_write(ring, VCE_CMD_IB_VM);
amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 1c9471890bf7..aadc3e66ebd7 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -466,9 +466,9 @@ static int vce_v4_0_sw_init(void *handle)
* so set unused location for other unused rings.
*/
if (i == 0)
- ring->doorbell_index = AMDGPU_DOORBELL64_VCE_RING0_1 * 2;
+ ring->doorbell_index = adev->doorbell_index.uvd_vce.vce_ring0_1 * 2;
else
- ring->doorbell_index = AMDGPU_DOORBELL64_VCE_RING2_3 * 2 + 1;
+ ring->doorbell_index = adev->doorbell_index.uvd_vce.vce_ring2_3 * 2 + 1;
}
r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0);
if (r)
@@ -519,15 +519,10 @@ static int vce_v4_0_hw_init(void *handle)
if (r)
return r;
- for (i = 0; i < adev->vce.num_rings; i++)
- adev->vce.ring[i].ready = false;
-
for (i = 0; i < adev->vce.num_rings; i++) {
- r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
+ r = amdgpu_ring_test_helper(&adev->vce.ring[i]);
if (r)
return r;
- else
- adev->vce.ring[i].ready = true;
}
DRM_INFO("VCE initialized successfully.\n");
@@ -549,7 +544,7 @@ static int vce_v4_0_hw_fini(void *handle)
}
for (i = 0; i < adev->vce.num_rings; i++)
- adev->vce.ring[i].ready = false;
+ adev->vce.ring[i].sched.ready = false;
return 0;
}
@@ -951,9 +946,11 @@ static int vce_v4_0_set_powergating_state(void *handle,
}
#endif
-static void vce_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
+static void vce_v4_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job,
+ struct amdgpu_ib *ib, uint32_t flags)
{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
amdgpu_ring_write(ring, VCE_CMD_IB_VM);
amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 322e09b5b448..3dbc51f9d3b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -177,30 +177,22 @@ static int vcn_v1_0_hw_init(void *handle)
struct amdgpu_ring *ring = &adev->vcn.ring_dec;
int i, r;
- ring->ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
ring = &adev->vcn.ring_enc[i];
- ring->ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
+ ring->sched.ready = true;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
}
ring = &adev->vcn.ring_jpeg;
- ring->ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
done:
if (!r)
@@ -222,10 +214,11 @@ static int vcn_v1_0_hw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring = &adev->vcn.ring_dec;
- if (RREG32_SOC15(VCN, 0, mmUVD_STATUS))
+ if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
+ RREG32_SOC15(VCN, 0, mmUVD_STATUS))
vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
- ring->ready = false;
+ ring->sched.ready = false;
return 0;
}
@@ -1095,7 +1088,8 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
- /* initialize wptr */
+ /* initialize JPEG wptr */
+ ring = &adev->vcn.ring_jpeg;
ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
/* copy patch commands to the jpeg ring */
@@ -1167,21 +1161,29 @@ static int vcn_v1_0_stop_spg_mode(struct amdgpu_device *adev)
static int vcn_v1_0_stop_dpg_mode(struct amdgpu_device *adev)
{
int ret_code = 0;
+ uint32_t tmp;
/* Wait for power status to be UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF */
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
- if (!ret_code) {
- int tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
- /* wait for read ptr to be equal to write ptr */
- SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
+ /* wait for read ptr to be equal to write ptr */
+ tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
- SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
- UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
- }
+ tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code);
+
+ tmp = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_JRBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
+
+ tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
+
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
/* disable dynamic power gating mode */
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0,
@@ -1367,10 +1369,12 @@ static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64
* Write ring commands to execute the indirect buffer
*/
static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib,
+ uint32_t flags)
{
struct amdgpu_device *adev = ring->adev;
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0));
@@ -1525,8 +1529,12 @@ static void vcn_v1_0_enc_ring_insert_end(struct amdgpu_ring *ring)
* Write enc ring commands to execute the indirect buffer
*/
static void vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib,
+ uint32_t flags)
{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
amdgpu_ring_write(ring, VCN_ENC_CMD_IB);
amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
@@ -1726,10 +1734,12 @@ static void vcn_v1_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u6
* Write ring commands to execute the indirect buffer.
*/
static void vcn_v1_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib,
+ uint32_t flags)
{
struct amdgpu_device *adev = ring->adev;
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
amdgpu_ring_write(ring,
PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_VMID), 0, 0, PACKETJ_TYPE0));
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index a0fda6f9252a..6d1f804277f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -50,6 +50,22 @@ static void vega10_ih_enable_interrupts(struct amdgpu_device *adev)
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1);
WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
adev->irq.ih.enabled = true;
+
+ if (adev->irq.ih1.ring_size) {
+ ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
+ RB_ENABLE, 1);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
+ adev->irq.ih1.enabled = true;
+ }
+
+ if (adev->irq.ih2.ring_size) {
+ ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
+ RB_ENABLE, 1);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
+ adev->irq.ih2.enabled = true;
+ }
}
/**
@@ -71,6 +87,53 @@ static void vega10_ih_disable_interrupts(struct amdgpu_device *adev)
WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
adev->irq.ih.enabled = false;
adev->irq.ih.rptr = 0;
+
+ if (adev->irq.ih1.ring_size) {
+ ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
+ RB_ENABLE, 0);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
+ /* set rptr, wptr to 0 */
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
+ adev->irq.ih1.enabled = false;
+ adev->irq.ih1.rptr = 0;
+ }
+
+ if (adev->irq.ih2.ring_size) {
+ ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
+ RB_ENABLE, 0);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
+ /* set rptr, wptr to 0 */
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0);
+ adev->irq.ih2.enabled = false;
+ adev->irq.ih2.rptr = 0;
+ }
+}
+
+static uint32_t vega10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl)
+{
+ int rb_bufsz = order_base_2(ih->ring_size / 4);
+
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
+ MC_SPACE, ih->use_bus_addr ? 1 : 4);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
+ WPTR_OVERFLOW_CLEAR, 1);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
+ WPTR_OVERFLOW_ENABLE, 1);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz);
+ /* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register
+ * value is written to memory
+ */
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
+ WPTR_WRITEBACK_ENABLE, 1);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SNOOP, 1);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_RO, 0);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_VMID, 0);
+
+ return ih_rb_cntl;
}
/**
@@ -86,50 +149,32 @@ static void vega10_ih_disable_interrupts(struct amdgpu_device *adev)
*/
static int vega10_ih_irq_init(struct amdgpu_device *adev)
{
+ struct amdgpu_ih_ring *ih;
int ret = 0;
- int rb_bufsz;
u32 ih_rb_cntl, ih_doorbell_rtpr;
u32 tmp;
- u64 wptr_off;
/* disable irqs */
vega10_ih_disable_interrupts(adev);
adev->nbio_funcs->ih_control(adev);
- ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
+ ih = &adev->irq.ih;
/* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
- if (adev->irq.ih.use_bus_addr) {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE, adev->irq.ih.rb_dma_addr >> 8);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI, ((u64)adev->irq.ih.rb_dma_addr >> 40) & 0xff);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SPACE, 1);
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE, adev->irq.ih.gpu_addr >> 8);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI, (adev->irq.ih.gpu_addr >> 40) & 0xff);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SPACE, 4);
- }
- rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 1);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz);
- /* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register value is written to memory */
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_WRITEBACK_ENABLE, 1);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SNOOP, 1);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_RO, 0);
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_VMID, 0);
-
- if (adev->irq.msi_enabled)
- ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM, 1);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE, ih->gpu_addr >> 8);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI, (ih->gpu_addr >> 40) & 0xff);
+ ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
+ ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM,
+ !!adev->irq.msi_enabled);
WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
/* set the writeback address whether it's enabled or not */
- if (adev->irq.ih.use_bus_addr)
- wptr_off = adev->irq.ih.rb_dma_addr + (adev->irq.ih.wptr_offs * 4);
- else
- wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off));
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFFFF);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO,
+ lower_32_bits(ih->wptr_addr));
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI,
+ upper_32_bits(ih->wptr_addr) & 0xFFFF);
/* set rptr, wptr to 0 */
WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
@@ -137,17 +182,48 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
ih_doorbell_rtpr = RREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR);
if (adev->irq.ih.use_doorbell) {
- ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, IH_DOORBELL_RPTR,
- OFFSET, adev->irq.ih.doorbell_index);
- ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, IH_DOORBELL_RPTR,
+ ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
+ IH_DOORBELL_RPTR, OFFSET,
+ adev->irq.ih.doorbell_index);
+ ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
+ IH_DOORBELL_RPTR,
ENABLE, 1);
} else {
- ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, IH_DOORBELL_RPTR,
+ ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
+ IH_DOORBELL_RPTR,
ENABLE, 0);
}
WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR, ih_doorbell_rtpr);
- adev->nbio_funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
- adev->irq.ih.doorbell_index);
+
+ ih = &adev->irq.ih1;
+ if (ih->ring_size) {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING1, ih->gpu_addr >> 8);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING1,
+ (ih->gpu_addr >> 40) & 0xff);
+
+ ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
+ ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
+
+ /* set rptr, wptr to 0 */
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
+ }
+
+ ih = &adev->irq.ih2;
+ if (ih->ring_size) {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING2, ih->gpu_addr >> 8);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING2,
+ (ih->gpu_addr >> 40) & 0xff);
+
+ ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
+ ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
+
+ /* set rptr, wptr to 0 */
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0);
+ }
tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
@@ -191,116 +267,58 @@ static void vega10_ih_irq_disable(struct amdgpu_device *adev)
* ring buffer overflow and deal with it.
* Returns the value of the wptr.
*/
-static u32 vega10_ih_get_wptr(struct amdgpu_device *adev)
+static u32 vega10_ih_get_wptr(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
{
- u32 wptr, tmp;
+ u32 wptr, reg, tmp;
- if (adev->irq.ih.use_bus_addr)
- wptr = le32_to_cpu(adev->irq.ih.ring[adev->irq.ih.wptr_offs]);
- else
- wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]);
-
- if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) {
- wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
-
- /* When a ring buffer overflow happen start parsing interrupt
- * from the last not overwritten vector (wptr + 32). Hopefully
- * this should allow us to catchup.
- */
- tmp = (wptr + 32) & adev->irq.ih.ptr_mask;
- dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
- wptr, adev->irq.ih.rptr, tmp);
- adev->irq.ih.rptr = tmp;
-
- tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL));
- tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
- WREG32_NO_KIQ(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL), tmp);
- }
- return (wptr & adev->irq.ih.ptr_mask);
-}
+ wptr = le32_to_cpu(*ih->wptr_cpu);
-/**
- * vega10_ih_prescreen_iv - prescreen an interrupt vector
- *
- * @adev: amdgpu_device pointer
- *
- * Returns true if the interrupt vector should be further processed.
- */
-static bool vega10_ih_prescreen_iv(struct amdgpu_device *adev)
-{
- u32 ring_index = adev->irq.ih.rptr >> 2;
- u32 dw0, dw3, dw4, dw5;
- u16 pasid;
- u64 addr, key;
- struct amdgpu_vm *vm;
- int r;
+ if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+ goto out;
- dw0 = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
- dw3 = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
- dw4 = le32_to_cpu(adev->irq.ih.ring[ring_index + 4]);
- dw5 = le32_to_cpu(adev->irq.ih.ring[ring_index + 5]);
+ /* Double check that the overflow wasn't already cleared. */
- /* Filter retry page faults, let only the first one pass. If
- * there are too many outstanding faults, ignore them until
- * some faults get cleared.
- */
- switch (dw0 & 0xff) {
- case SOC15_IH_CLIENTID_VMC:
- case SOC15_IH_CLIENTID_UTCL2:
- break;
- default:
- /* Not a VM fault */
- return true;
- }
+ if (ih == &adev->irq.ih)
+ reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR);
+ else if (ih == &adev->irq.ih1)
+ reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING1);
+ else if (ih == &adev->irq.ih2)
+ reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING2);
+ else
+ BUG();
- pasid = dw3 & 0xffff;
- /* No PASID, can't identify faulting process */
- if (!pasid)
- return true;
+ wptr = RREG32_NO_KIQ(reg);
+ if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+ goto out;
- /* Not a retry fault, check fault credit */
- if (!(dw5 & 0x80)) {
- if (!amdgpu_vm_pasid_fault_credit(adev, pasid))
- goto ignore_iv;
- return true;
- }
+ wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
- /* Track retry faults in per-VM fault FIFO. */
- spin_lock(&adev->vm_manager.pasid_lock);
- vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
- addr = ((u64)(dw5 & 0xf) << 44) | ((u64)dw4 << 12);
- key = AMDGPU_VM_FAULT(pasid, addr);
- if (!vm) {
- /* VM not found, process it normally */
- spin_unlock(&adev->vm_manager.pasid_lock);
- return true;
- } else {
- r = amdgpu_vm_add_fault(vm->fault_hash, key);
-
- /* Hash table is full or the fault is already being processed,
- * ignore further page faults
- */
- if (r != 0) {
- spin_unlock(&adev->vm_manager.pasid_lock);
- goto ignore_iv;
- }
- }
- /* No locking required with single writer and single reader */
- r = kfifo_put(&vm->faults, key);
- if (!r) {
- /* FIFO is full. Ignore it until there is space */
- amdgpu_vm_clear_fault(vm->fault_hash, key);
- spin_unlock(&adev->vm_manager.pasid_lock);
- goto ignore_iv;
- }
+ /* When a ring buffer overflow happen start parsing interrupt
+ * from the last not overwritten vector (wptr + 32). Hopefully
+ * this should allow us to catchup.
+ */
+ tmp = (wptr + 32) & ih->ptr_mask;
+ dev_warn(adev->dev, "IH ring buffer overflow "
+ "(0x%08X, 0x%08X, 0x%08X)\n",
+ wptr, ih->rptr, tmp);
+ ih->rptr = tmp;
+
+ if (ih == &adev->irq.ih)
+ reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL);
+ else if (ih == &adev->irq.ih1)
+ reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING1);
+ else if (ih == &adev->irq.ih2)
+ reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING2);
+ else
+ BUG();
- spin_unlock(&adev->vm_manager.pasid_lock);
- /* It's the first fault for this address, process it normally */
- return true;
+ tmp = RREG32_NO_KIQ(reg);
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+ WREG32_NO_KIQ(reg, tmp);
-ignore_iv:
- adev->irq.ih.rptr += 32;
- return false;
+out:
+ return (wptr & ih->ptr_mask);
}
/**
@@ -312,20 +330,21 @@ ignore_iv:
* position and also advance the position.
*/
static void vega10_ih_decode_iv(struct amdgpu_device *adev,
- struct amdgpu_iv_entry *entry)
+ struct amdgpu_ih_ring *ih,
+ struct amdgpu_iv_entry *entry)
{
/* wptr/rptr are in bytes! */
- u32 ring_index = adev->irq.ih.rptr >> 2;
+ u32 ring_index = ih->rptr >> 2;
uint32_t dw[8];
- dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
- dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]);
- dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
- dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
- dw[4] = le32_to_cpu(adev->irq.ih.ring[ring_index + 4]);
- dw[5] = le32_to_cpu(adev->irq.ih.ring[ring_index + 5]);
- dw[6] = le32_to_cpu(adev->irq.ih.ring[ring_index + 6]);
- dw[7] = le32_to_cpu(adev->irq.ih.ring[ring_index + 7]);
+ dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
+ dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
+ dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
+ dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
+ dw[4] = le32_to_cpu(ih->ring[ring_index + 4]);
+ dw[5] = le32_to_cpu(ih->ring[ring_index + 5]);
+ dw[6] = le32_to_cpu(ih->ring[ring_index + 6]);
+ dw[7] = le32_to_cpu(ih->ring[ring_index + 7]);
entry->client_id = dw[0] & 0xff;
entry->src_id = (dw[0] >> 8) & 0xff;
@@ -341,9 +360,8 @@ static void vega10_ih_decode_iv(struct amdgpu_device *adev,
entry->src_data[2] = dw[6];
entry->src_data[3] = dw[7];
-
/* wptr/rptr are in bytes! */
- adev->irq.ih.rptr += 32;
+ ih->rptr += 32;
}
/**
@@ -353,18 +371,59 @@ static void vega10_ih_decode_iv(struct amdgpu_device *adev,
*
* Set the IH ring buffer rptr.
*/
-static void vega10_ih_set_rptr(struct amdgpu_device *adev)
+static void vega10_ih_set_rptr(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
{
- if (adev->irq.ih.use_doorbell) {
+ if (ih->use_doorbell) {
/* XXX check if swapping is necessary on BE */
- if (adev->irq.ih.use_bus_addr)
- adev->irq.ih.ring[adev->irq.ih.rptr_offs] = adev->irq.ih.rptr;
- else
- adev->wb.wb[adev->irq.ih.rptr_offs] = adev->irq.ih.rptr;
- WDOORBELL32(adev->irq.ih.doorbell_index, adev->irq.ih.rptr);
- } else {
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, adev->irq.ih.rptr);
+ *ih->rptr_cpu = ih->rptr;
+ WDOORBELL32(ih->doorbell_index, ih->rptr);
+ } else if (ih == &adev->irq.ih) {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, ih->rptr);
+ } else if (ih == &adev->irq.ih1) {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, ih->rptr);
+ } else if (ih == &adev->irq.ih2) {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, ih->rptr);
+ }
+}
+
+/**
+ * vega10_ih_self_irq - dispatch work for ring 1 and 2
+ *
+ * @adev: amdgpu_device pointer
+ * @source: irq source
+ * @entry: IV with WPTR update
+ *
+ * Update the WPTR from the IV and schedule work to handle the entries.
+ */
+static int vega10_ih_self_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ uint32_t wptr = cpu_to_le32(entry->src_data[0]);
+
+ switch (entry->ring_id) {
+ case 1:
+ *adev->irq.ih1.wptr_cpu = wptr;
+ schedule_work(&adev->irq.ih1_work);
+ break;
+ case 2:
+ *adev->irq.ih2.wptr_cpu = wptr;
+ schedule_work(&adev->irq.ih2_work);
+ break;
+ default: break;
}
+ return 0;
+}
+
+static const struct amdgpu_irq_src_funcs vega10_ih_self_irq_funcs = {
+ .process = vega10_ih_self_irq,
+};
+
+static void vega10_ih_set_self_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->irq.self_irq.num_types = 0;
+ adev->irq.self_irq.funcs = &vega10_ih_self_irq_funcs;
}
static int vega10_ih_early_init(void *handle)
@@ -372,20 +431,37 @@ static int vega10_ih_early_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
vega10_ih_set_interrupt_funcs(adev);
+ vega10_ih_set_self_irq_funcs(adev);
return 0;
}
static int vega10_ih_sw_init(void *handle)
{
- int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_IH, 0,
+ &adev->irq.self_irq);
+ if (r)
+ return r;
r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, true);
if (r)
return r;
+ if (adev->asic_type == CHIP_VEGA10) {
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true);
+ if (r)
+ return r;
+
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
+ if (r)
+ return r;
+ }
+
+ /* TODO add doorbell for IH1 & IH2 as well */
adev->irq.ih.use_doorbell = true;
- adev->irq.ih.doorbell_index = AMDGPU_DOORBELL64_IH << 1;
+ adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
r = amdgpu_irq_init(adev);
@@ -397,6 +473,8 @@ static int vega10_ih_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_irq_fini(adev);
+ amdgpu_ih_ring_fini(adev, &adev->irq.ih2);
+ amdgpu_ih_ring_fini(adev, &adev->irq.ih1);
amdgpu_ih_ring_fini(adev, &adev->irq.ih);
return 0;
@@ -487,7 +565,6 @@ const struct amd_ip_funcs vega10_ih_ip_funcs = {
static const struct amdgpu_ih_funcs vega10_ih_funcs = {
.get_wptr = vega10_ih_get_wptr,
- .prescreen_iv = vega10_ih_prescreen_iv,
.decode_iv = vega10_ih_decode_iv,
.set_rptr = vega10_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
index c5c9b2bc190d..a8e92638a2e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
@@ -56,4 +56,37 @@ int vega10_reg_base_init(struct amdgpu_device *adev)
return 0;
}
+void vega10_doorbell_index_init(struct amdgpu_device *adev)
+{
+ adev->doorbell_index.kiq = AMDGPU_DOORBELL64_KIQ;
+ adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL64_MEC_RING0;
+ adev->doorbell_index.mec_ring1 = AMDGPU_DOORBELL64_MEC_RING1;
+ adev->doorbell_index.mec_ring2 = AMDGPU_DOORBELL64_MEC_RING2;
+ adev->doorbell_index.mec_ring3 = AMDGPU_DOORBELL64_MEC_RING3;
+ adev->doorbell_index.mec_ring4 = AMDGPU_DOORBELL64_MEC_RING4;
+ adev->doorbell_index.mec_ring5 = AMDGPU_DOORBELL64_MEC_RING5;
+ adev->doorbell_index.mec_ring6 = AMDGPU_DOORBELL64_MEC_RING6;
+ adev->doorbell_index.mec_ring7 = AMDGPU_DOORBELL64_MEC_RING7;
+ adev->doorbell_index.userqueue_start = AMDGPU_DOORBELL64_USERQUEUE_START;
+ adev->doorbell_index.userqueue_end = AMDGPU_DOORBELL64_USERQUEUE_END;
+ adev->doorbell_index.gfx_ring0 = AMDGPU_DOORBELL64_GFX_RING0;
+ adev->doorbell_index.sdma_engine[0] = AMDGPU_DOORBELL64_sDMA_ENGINE0;
+ adev->doorbell_index.sdma_engine[1] = AMDGPU_DOORBELL64_sDMA_ENGINE1;
+ adev->doorbell_index.ih = AMDGPU_DOORBELL64_IH;
+ adev->doorbell_index.uvd_vce.uvd_ring0_1 = AMDGPU_DOORBELL64_UVD_RING0_1;
+ adev->doorbell_index.uvd_vce.uvd_ring2_3 = AMDGPU_DOORBELL64_UVD_RING2_3;
+ adev->doorbell_index.uvd_vce.uvd_ring4_5 = AMDGPU_DOORBELL64_UVD_RING4_5;
+ adev->doorbell_index.uvd_vce.uvd_ring6_7 = AMDGPU_DOORBELL64_UVD_RING6_7;
+ adev->doorbell_index.uvd_vce.vce_ring0_1 = AMDGPU_DOORBELL64_VCE_RING0_1;
+ adev->doorbell_index.uvd_vce.vce_ring2_3 = AMDGPU_DOORBELL64_VCE_RING2_3;
+ adev->doorbell_index.uvd_vce.vce_ring4_5 = AMDGPU_DOORBELL64_VCE_RING4_5;
+ adev->doorbell_index.uvd_vce.vce_ring6_7 = AMDGPU_DOORBELL64_VCE_RING6_7;
+
+ adev->doorbell_index.first_non_cp = AMDGPU_DOORBELL64_FIRST_NON_CP;
+ adev->doorbell_index.last_non_cp = AMDGPU_DOORBELL64_LAST_NON_CP;
+
+ /* In unit of dword doorbell */
+ adev->doorbell_index.max_assignment = AMDGPU_DOORBELL64_MAX_ASSIGNMENT << 1;
+ adev->doorbell_index.sdma_doorbell_range = 4;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
index d13fc4fcb517..0db84386252a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
@@ -54,4 +54,42 @@ int vega20_reg_base_init(struct amdgpu_device *adev)
return 0;
}
+void vega20_doorbell_index_init(struct amdgpu_device *adev)
+{
+ adev->doorbell_index.kiq = AMDGPU_VEGA20_DOORBELL_KIQ;
+ adev->doorbell_index.mec_ring0 = AMDGPU_VEGA20_DOORBELL_MEC_RING0;
+ adev->doorbell_index.mec_ring1 = AMDGPU_VEGA20_DOORBELL_MEC_RING1;
+ adev->doorbell_index.mec_ring2 = AMDGPU_VEGA20_DOORBELL_MEC_RING2;
+ adev->doorbell_index.mec_ring3 = AMDGPU_VEGA20_DOORBELL_MEC_RING3;
+ adev->doorbell_index.mec_ring4 = AMDGPU_VEGA20_DOORBELL_MEC_RING4;
+ adev->doorbell_index.mec_ring5 = AMDGPU_VEGA20_DOORBELL_MEC_RING5;
+ adev->doorbell_index.mec_ring6 = AMDGPU_VEGA20_DOORBELL_MEC_RING6;
+ adev->doorbell_index.mec_ring7 = AMDGPU_VEGA20_DOORBELL_MEC_RING7;
+ adev->doorbell_index.userqueue_start = AMDGPU_VEGA20_DOORBELL_USERQUEUE_START;
+ adev->doorbell_index.userqueue_end = AMDGPU_VEGA20_DOORBELL_USERQUEUE_END;
+ adev->doorbell_index.gfx_ring0 = AMDGPU_VEGA20_DOORBELL_GFX_RING0;
+ adev->doorbell_index.sdma_engine[0] = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE0;
+ adev->doorbell_index.sdma_engine[1] = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE1;
+ adev->doorbell_index.sdma_engine[2] = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE2;
+ adev->doorbell_index.sdma_engine[3] = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE3;
+ adev->doorbell_index.sdma_engine[4] = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE4;
+ adev->doorbell_index.sdma_engine[5] = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE5;
+ adev->doorbell_index.sdma_engine[6] = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE6;
+ adev->doorbell_index.sdma_engine[7] = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE7;
+ adev->doorbell_index.ih = AMDGPU_VEGA20_DOORBELL_IH;
+ adev->doorbell_index.uvd_vce.uvd_ring0_1 = AMDGPU_VEGA20_DOORBELL64_UVD_RING0_1;
+ adev->doorbell_index.uvd_vce.uvd_ring2_3 = AMDGPU_VEGA20_DOORBELL64_UVD_RING2_3;
+ adev->doorbell_index.uvd_vce.uvd_ring4_5 = AMDGPU_VEGA20_DOORBELL64_UVD_RING4_5;
+ adev->doorbell_index.uvd_vce.uvd_ring6_7 = AMDGPU_VEGA20_DOORBELL64_UVD_RING6_7;
+ adev->doorbell_index.uvd_vce.vce_ring0_1 = AMDGPU_VEGA20_DOORBELL64_VCE_RING0_1;
+ adev->doorbell_index.uvd_vce.vce_ring2_3 = AMDGPU_VEGA20_DOORBELL64_VCE_RING2_3;
+ adev->doorbell_index.uvd_vce.vce_ring4_5 = AMDGPU_VEGA20_DOORBELL64_VCE_RING4_5;
+ adev->doorbell_index.uvd_vce.vce_ring6_7 = AMDGPU_VEGA20_DOORBELL64_VCE_RING6_7;
+
+ adev->doorbell_index.first_non_cp = AMDGPU_VEGA20_DOORBELL64_FIRST_NON_CP;
+ adev->doorbell_index.last_non_cp = AMDGPU_VEGA20_DOORBELL64_LAST_NON_CP;
+
+ adev->doorbell_index.max_assignment = AMDGPU_VEGA20_DOORBELL_MAX_ASSIGNMENT << 1;
+ adev->doorbell_index.sdma_doorbell_range = 20;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 07880d35e9de..5e5b42a0744a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -87,9 +87,9 @@ static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg)
u32 r;
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
- WREG32(mmPCIE_INDEX, reg);
- (void)RREG32(mmPCIE_INDEX);
- r = RREG32(mmPCIE_DATA);
+ WREG32_NO_KIQ(mmPCIE_INDEX, reg);
+ (void)RREG32_NO_KIQ(mmPCIE_INDEX);
+ r = RREG32_NO_KIQ(mmPCIE_DATA);
spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
return r;
}
@@ -99,10 +99,10 @@ static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
unsigned long flags;
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
- WREG32(mmPCIE_INDEX, reg);
- (void)RREG32(mmPCIE_INDEX);
- WREG32(mmPCIE_DATA, v);
- (void)RREG32(mmPCIE_DATA);
+ WREG32_NO_KIQ(mmPCIE_INDEX, reg);
+ (void)RREG32_NO_KIQ(mmPCIE_INDEX);
+ WREG32_NO_KIQ(mmPCIE_DATA, v);
+ (void)RREG32_NO_KIQ(mmPCIE_DATA);
spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
}
@@ -123,8 +123,8 @@ static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
unsigned long flags;
spin_lock_irqsave(&adev->smc_idx_lock, flags);
- WREG32(mmSMC_IND_INDEX_11, (reg));
- WREG32(mmSMC_IND_DATA_11, (v));
+ WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg));
+ WREG32_NO_KIQ(mmSMC_IND_DATA_11, (v));
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
}
@@ -941,6 +941,69 @@ static bool vi_need_full_reset(struct amdgpu_device *adev)
}
}
+static void vi_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
+ uint64_t *count1)
+{
+ uint32_t perfctr = 0;
+ uint64_t cnt0_of, cnt1_of;
+ int tmp;
+
+ /* This reports 0 on APUs, so return to avoid writing/reading registers
+ * that may or may not be different from their GPU counterparts
+ */
+ if (adev->flags & AMD_IS_APU)
+ return;
+
+ /* Set the 2 events that we wish to watch, defined above */
+ /* Reg 40 is # received msgs, Reg 104 is # of posted requests sent */
+ perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40);
+ perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104);
+
+ /* Write to enable desired perf counters */
+ WREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK, perfctr);
+ /* Zero out and enable the perf counters
+ * Write 0x5:
+ * Bit 0 = Start all counters(1)
+ * Bit 2 = Global counter reset enable(1)
+ */
+ WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000005);
+
+ msleep(1000);
+
+ /* Load the shadow and disable the perf counters
+ * Write 0x2:
+ * Bit 0 = Stop counters(0)
+ * Bit 1 = Load the shadow counters(1)
+ */
+ WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000002);
+
+ /* Read register values to get any >32bit overflow */
+ tmp = RREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK);
+ cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER);
+ cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER);
+
+ /* Get the values and add the overflow */
+ *count0 = RREG32_PCIE(ixPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32);
+ *count1 = RREG32_PCIE(ixPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
+}
+
+static bool vi_need_reset_on_init(struct amdgpu_device *adev)
+{
+ u32 clock_cntl, pc;
+
+ if (adev->flags & AMD_IS_APU)
+ return false;
+
+ /* check if the SMC is already running */
+ clock_cntl = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
+ pc = RREG32_SMC(ixSMC_PC_C);
+ if ((0 == REG_GET_FIELD(clock_cntl, SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) &&
+ (0x20100 <= pc))
+ return true;
+
+ return false;
+}
+
static const struct amdgpu_asic_funcs vi_asic_funcs =
{
.read_disabled_bios = &vi_read_disabled_bios,
@@ -955,6 +1018,9 @@ static const struct amdgpu_asic_funcs vi_asic_funcs =
.flush_hdp = &vi_flush_hdp,
.invalidate_hdp = &vi_invalidate_hdp,
.need_full_reset = &vi_need_full_reset,
+ .init_doorbell_index = &legacy_doorbell_index_init,
+ .get_pcie_usage = &vi_get_pcie_usage,
+ .need_reset_on_init = &vi_need_reset_on_init,
};
#define CZ_REV_BRISTOL(rev) \
@@ -1712,3 +1778,21 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
return 0;
}
+
+void legacy_doorbell_index_init(struct amdgpu_device *adev)
+{
+ adev->doorbell_index.kiq = AMDGPU_DOORBELL_KIQ;
+ adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_MEC_RING0;
+ adev->doorbell_index.mec_ring1 = AMDGPU_DOORBELL_MEC_RING1;
+ adev->doorbell_index.mec_ring2 = AMDGPU_DOORBELL_MEC_RING2;
+ adev->doorbell_index.mec_ring3 = AMDGPU_DOORBELL_MEC_RING3;
+ adev->doorbell_index.mec_ring4 = AMDGPU_DOORBELL_MEC_RING4;
+ adev->doorbell_index.mec_ring5 = AMDGPU_DOORBELL_MEC_RING5;
+ adev->doorbell_index.mec_ring6 = AMDGPU_DOORBELL_MEC_RING6;
+ adev->doorbell_index.mec_ring7 = AMDGPU_DOORBELL_MEC_RING7;
+ adev->doorbell_index.gfx_ring0 = AMDGPU_DOORBELL_GFX_RING0;
+ adev->doorbell_index.sdma_engine[0] = AMDGPU_DOORBELL_sDMA_ENGINE0;
+ adev->doorbell_index.sdma_engine[1] = AMDGPU_DOORBELL_sDMA_ENGINE1;
+ adev->doorbell_index.ih = AMDGPU_DOORBELL_IH;
+ adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_MAX_ASSIGNMENT;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.h b/drivers/gpu/drm/amd/amdgpu/vi.h
index 0429fe332269..8de0772f986c 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.h
+++ b/drivers/gpu/drm/amd/amdgpu/vi.h
@@ -30,4 +30,5 @@ void vi_srbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid);
int vi_set_ip_blocks(struct amdgpu_device *adev);
+void legacy_doorbell_index_init(struct amdgpu_device *adev);
#endif