aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2018-09-28 09:48:35 +1000
committerDave Airlie <airlied@redhat.com>2018-09-28 09:48:40 +1000
commit87c2ee740c07f1edae9eec8bc45cb9b32a68f323 (patch)
tree1515f53eacb86689f2f96279e51cf0053ae8a308 /drivers/gpu/drm/amd/amdgpu
parentMerge tag 'drm/tegra/for-4.20-rc1' of git://anongit.freedesktop.org/tegra/linux into drm-next (diff)
parentdrm/scheduler: remove timeout work_struct from drm_sched_job (v3) (diff)
downloadlinux-dev-87c2ee740c07f1edae9eec8bc45cb9b32a68f323.tar.xz
linux-dev-87c2ee740c07f1edae9eec8bc45cb9b32a68f323.zip
Merge branch 'drm-next-4.20' of git://people.freedesktop.org/~agd5f/linux into drm-next
More new features and fixes for 4.20: - Add dynamic powergating support for VCN on picasso - Scheduler cleanup - Vega20 support for KFD - DC cleanups and bug fixes Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexdeucher@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180927184348.2696-1-alexander.deucher@amd.com
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c166
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c177
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c122
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15_common.h22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c331
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c7
55 files changed, 1011 insertions, 421 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 6cb35e3dab30..c05b39438663 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -146,6 +146,7 @@ extern int amdgpu_cik_support;
#define AMDGPU_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
+#define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2)
/* AMDGPU_IB_POOL_SIZE must be a power of 2 */
#define AMDGPU_IB_POOL_SIZE 16
#define AMDGPU_DEBUGFS_MAX_COMPONENTS 32
@@ -408,16 +409,25 @@ typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT
AMDGPU_DOORBELL64_GFX_RING0 = 0x8b,
/*
- * Other graphics doorbells can be allocated here: from 0x8c to 0xef
+ * Other graphics doorbells can be allocated here: from 0x8c to 0xdf
* Graphics voltage island aperture 1
- * default non-graphics QWORD index is 0xF0 - 0xFF inclusive
+ * default non-graphics QWORD index is 0xe0 - 0xFF inclusive
*/
- /* sDMA engines */
- AMDGPU_DOORBELL64_sDMA_ENGINE0 = 0xF0,
- AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE0 = 0xF1,
- AMDGPU_DOORBELL64_sDMA_ENGINE1 = 0xF2,
- AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE1 = 0xF3,
+ /* sDMA engines reserved from 0xe0 -oxef */
+ AMDGPU_DOORBELL64_sDMA_ENGINE0 = 0xE0,
+ AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE0 = 0xE1,
+ AMDGPU_DOORBELL64_sDMA_ENGINE1 = 0xE8,
+ AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE1 = 0xE9,
+
+ /* For vega10 sriov, the sdma doorbell must be fixed as follow
+ * to keep the same setting with host driver, or it will
+ * happen conflicts
+ */
+ AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 = 0xF0,
+ AMDGPU_VEGA10_DOORBELL64_sDMA_HI_PRI_ENGINE0 = 0xF1,
+ AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 = 0xF2,
+ AMDGPU_VEGA10_DOORBELL64_sDMA_HI_PRI_ENGINE1 = 0xF3,
/* Interrupt handler */
AMDGPU_DOORBELL64_IH = 0xF4, /* For legacy interrupt ring buffer */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 0f9947edb12a..c31a8849e9f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -76,6 +76,7 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
break;
case CHIP_VEGA10:
+ case CHIP_VEGA20:
case CHIP_RAVEN:
kfd2kgd = amdgpu_amdkfd_gfx_9_0_get_functions();
break;
@@ -123,7 +124,7 @@ static void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
{
- int i;
+ int i, n;
int last_valid_bit;
if (adev->kfd) {
struct kgd2kfd_shared_resources gpu_resources = {
@@ -162,7 +163,15 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
&gpu_resources.doorbell_physical_address,
&gpu_resources.doorbell_aperture_size,
&gpu_resources.doorbell_start_offset);
- if (adev->asic_type >= CHIP_VEGA10) {
+
+ if (adev->asic_type < CHIP_VEGA10) {
+ kgd2kfd->device_init(adev->kfd, &gpu_resources);
+ return;
+ }
+
+ n = (adev->asic_type < CHIP_VEGA20) ? 2 : 8;
+
+ for (i = 0; i < n; i += 2) {
/* On SOC15 the BIF is involved in routing
* doorbells using the low 12 bits of the
* address. Communicate the assignments to
@@ -170,20 +179,31 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
* process in case of 64-bit doorbells so we
* can use each doorbell assignment twice.
*/
- gpu_resources.sdma_doorbell[0][0] =
- AMDGPU_DOORBELL64_sDMA_ENGINE0;
- gpu_resources.sdma_doorbell[0][1] =
- AMDGPU_DOORBELL64_sDMA_ENGINE0 + 0x200;
- gpu_resources.sdma_doorbell[1][0] =
- AMDGPU_DOORBELL64_sDMA_ENGINE1;
- gpu_resources.sdma_doorbell[1][1] =
- AMDGPU_DOORBELL64_sDMA_ENGINE1 + 0x200;
- /* Doorbells 0x0f0-0ff and 0x2f0-2ff are reserved for
- * SDMA, IH and VCN. So don't use them for the CP.
- */
- gpu_resources.reserved_doorbell_mask = 0x1f0;
- gpu_resources.reserved_doorbell_val = 0x0f0;
+ if (adev->asic_type == CHIP_VEGA10) {
+ gpu_resources.sdma_doorbell[0][i] =
+ AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 + (i >> 1);
+ gpu_resources.sdma_doorbell[0][i+1] =
+ AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 + 0x200 + (i >> 1);
+ gpu_resources.sdma_doorbell[1][i] =
+ AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 + (i >> 1);
+ gpu_resources.sdma_doorbell[1][i+1] =
+ AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 + 0x200 + (i >> 1);
+ } else {
+ gpu_resources.sdma_doorbell[0][i] =
+ AMDGPU_DOORBELL64_sDMA_ENGINE0 + (i >> 1);
+ gpu_resources.sdma_doorbell[0][i+1] =
+ AMDGPU_DOORBELL64_sDMA_ENGINE0 + 0x200 + (i >> 1);
+ gpu_resources.sdma_doorbell[1][i] =
+ AMDGPU_DOORBELL64_sDMA_ENGINE1 + (i >> 1);
+ gpu_resources.sdma_doorbell[1][i+1] =
+ AMDGPU_DOORBELL64_sDMA_ENGINE1 + 0x200 + (i >> 1);
+ }
}
+ /* Doorbells 0x0e0-0ff and 0x2e0-2ff are reserved for
+ * SDMA, IH and VCN. So don't use them for the CP.
+ */
+ gpu_resources.reserved_doorbell_mask = 0x1e0;
+ gpu_resources.reserved_doorbell_val = 0x0e0;
kgd2kfd->device_init(adev->kfd, &gpu_resources);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 056fc6ef6c63..8e0d4f7196b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -174,7 +174,7 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm);
void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm);
-uint32_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm);
+uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm);
int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
struct kgd_dev *kgd, uint64_t va, uint64_t size,
void *vm, struct kgd_mem **mem,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index b2e45c8e2e0d..244d9834a381 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -142,7 +142,7 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
static void set_scratch_backing_va(struct kgd_dev *kgd,
uint64_t va, uint32_t vmid);
static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
- uint32_t page_table_base);
+ uint64_t page_table_base);
static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
static uint32_t read_vmid_from_vmfault_reg(struct kgd_dev *kgd);
@@ -874,7 +874,7 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
}
static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
- uint32_t page_table_base)
+ uint64_t page_table_base)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
@@ -882,7 +882,8 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
pr_err("trying to set page table base for wrong VMID\n");
return;
}
- WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8, page_table_base);
+ WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8,
+ lower_32_bits(page_table_base));
}
static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index ea7c18ce7754..9f149914ad6c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -45,8 +45,6 @@ enum hqd_dequeue_request_type {
RESET_WAVES
};
-struct vi_sdma_mqd;
-
/*
* Register access functions
*/
@@ -100,7 +98,7 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
static void set_scratch_backing_va(struct kgd_dev *kgd,
uint64_t va, uint32_t vmid);
static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
- uint32_t page_table_base);
+ uint64_t page_table_base);
static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
@@ -282,7 +280,8 @@ static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
lock_srbm(kgd, mec, pipe, 0, 0);
- WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK);
+ WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
+ CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
unlock_srbm(kgd);
@@ -834,7 +833,7 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
}
static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
- uint32_t page_table_base)
+ uint64_t page_table_base)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
@@ -842,7 +841,8 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
pr_err("trying to set page table base for wrong VMID\n");
return;
}
- WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8, page_table_base);
+ WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8,
+ lower_32_bits(page_table_base));
}
static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index c9176537550b..42cb4c4e0929 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -138,7 +138,7 @@ static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
uint8_t vmid);
static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
- uint32_t page_table_base);
+ uint64_t page_table_base);
static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
static void set_scratch_backing_va(struct kgd_dev *kgd,
uint64_t va, uint32_t vmid);
@@ -1013,11 +1013,10 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
}
static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
- uint32_t page_table_base)
+ uint64_t page_table_base)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
- uint64_t base = (uint64_t)page_table_base << PAGE_SHIFT |
- AMDGPU_PTE_VALID;
+ uint64_t base = page_table_base | AMDGPU_PTE_VALID;
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
pr_err("trying to set page table base for wrong VMID %u\n",
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 6ee9dc476c86..df0a059565f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1131,11 +1131,15 @@ void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm)
amdgpu_vm_release_compute(adev, avm);
}
-uint32_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm)
+uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm)
{
struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
+ struct amdgpu_bo *pd = avm->root.base.bo;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
- return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
+ if (adev->asic_type < CHIP_VEGA10)
+ return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
+ return avm->pd_phys_addr;
}
int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 176f28777f5e..5448cf27654e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -196,6 +196,19 @@ int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s)
}
/**
+ * amdgpu_fence_schedule_fallback - schedule fallback check
+ *
+ * @ring: pointer to struct amdgpu_ring
+ *
+ * Start a timer as fallback to our interrupts.
+ */
+static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
+{
+ mod_timer(&ring->fence_drv.fallback_timer,
+ jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT);
+}
+
+/**
* amdgpu_fence_process - check for fence activity
*
* @ring: pointer to struct amdgpu_ring
@@ -203,8 +216,10 @@ int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s)
* Checks the current fence value and calculates the last
* signalled fence value. Wakes the fence queue if the
* sequence number has increased.
+ *
+ * Returns true if fence was processed
*/
-void amdgpu_fence_process(struct amdgpu_ring *ring)
+bool amdgpu_fence_process(struct amdgpu_ring *ring)
{
struct amdgpu_fence_driver *drv = &ring->fence_drv;
uint32_t seq, last_seq;
@@ -216,8 +231,12 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
} while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq);
+ if (del_timer(&ring->fence_drv.fallback_timer) &&
+ seq != ring->fence_drv.sync_seq)
+ amdgpu_fence_schedule_fallback(ring);
+
if (unlikely(seq == last_seq))
- return;
+ return false;
last_seq &= drv->num_fences_mask;
seq &= drv->num_fences_mask;
@@ -244,6 +263,24 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
dma_fence_put(fence);
} while (last_seq != seq);
+
+ return true;
+}
+
+/**
+ * amdgpu_fence_fallback - fallback for hardware interrupts
+ *
+ * @work: delayed work item
+ *
+ * Checks for fence activity.
+ */
+static void amdgpu_fence_fallback(struct timer_list *t)
+{
+ struct amdgpu_ring *ring = from_timer(ring, t,
+ fence_drv.fallback_timer);
+
+ if (amdgpu_fence_process(ring))
+ DRM_WARN("Fence fallback timer expired on ring %s\n", ring->name);
}
/**
@@ -393,6 +430,8 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
atomic_set(&ring->fence_drv.last_seq, 0);
ring->fence_drv.initialized = false;
+ timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0);
+
ring->fence_drv.num_fences_mask = num_hw_submission * 2 - 1;
spin_lock_init(&ring->fence_drv.lock);
ring->fence_drv.fences = kcalloc(num_hw_submission * 2, sizeof(void *),
@@ -468,6 +507,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
ring->fence_drv.irq_type);
drm_sched_fini(&ring->sched);
+ del_timer_sync(&ring->fence_drv.fallback_timer);
for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
dma_fence_put(ring->fence_drv.fences[j]);
kfree(ring->fence_drv.fences);
@@ -561,6 +601,27 @@ static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
}
/**
+ * amdgpu_fence_enable_signaling - enable signalling on fence
+ * @fence: fence
+ *
+ * This function is called with fence_queue lock held, and adds a callback
+ * to fence_queue that checks if this fence is signaled, and if so it
+ * signals the fence and removes itself.
+ */
+static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
+{
+ struct amdgpu_fence *fence = to_amdgpu_fence(f);
+ struct amdgpu_ring *ring = fence->ring;
+
+ if (!timer_pending(&ring->fence_drv.fallback_timer))
+ amdgpu_fence_schedule_fallback(ring);
+
+ DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
+
+ return true;
+}
+
+/**
* amdgpu_fence_free - free up the fence memory
*
* @rcu: RCU callback head
@@ -590,6 +651,7 @@ static void amdgpu_fence_release(struct dma_fence *f)
static const struct dma_fence_ops amdgpu_fence_ops = {
.get_driver_name = amdgpu_fence_get_driver_name,
.get_timeline_name = amdgpu_fence_get_timeline_name,
+ .enable_signaling = amdgpu_fence_enable_signaling,
.release = amdgpu_fence_release,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index 4ed86218cef3..8af67f649660 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -24,46 +24,21 @@
#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_ih.h"
-#include "amdgpu_amdkfd.h"
-
-/**
- * amdgpu_ih_ring_alloc - allocate memory for the IH ring
- *
- * @adev: amdgpu_device pointer
- *
- * Allocate a ring buffer for the interrupt controller.
- * Returns 0 for success, errors for failure.
- */
-static int amdgpu_ih_ring_alloc(struct amdgpu_device *adev)
-{
- int r;
-
- /* Allocate ring buffer */
- if (adev->irq.ih.ring_obj == NULL) {
- r = amdgpu_bo_create_kernel(adev, adev->irq.ih.ring_size,
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
- &adev->irq.ih.ring_obj,
- &adev->irq.ih.gpu_addr,
- (void **)&adev->irq.ih.ring);
- if (r) {
- DRM_ERROR("amdgpu: failed to create ih ring buffer (%d).\n", r);
- return r;
- }
- }
- return 0;
-}
/**
* amdgpu_ih_ring_init - initialize the IH state
*
* @adev: amdgpu_device pointer
+ * @ih: ih ring to initialize
+ * @ring_size: ring size to allocate
+ * @use_bus_addr: true when we can use dma_alloc_coherent
*
* Initializes the IH state and allocates a buffer
* for the IH ring buffer.
* Returns 0 for success, errors for failure.
*/
-int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size,
- bool use_bus_addr)
+int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
+ unsigned ring_size, bool use_bus_addr)
{
u32 rb_bufsz;
int r;
@@ -71,70 +46,76 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size,
/* Align ring size */
rb_bufsz = order_base_2(ring_size / 4);
ring_size = (1 << rb_bufsz) * 4;
- adev->irq.ih.ring_size = ring_size;
- adev->irq.ih.ptr_mask = adev->irq.ih.ring_size - 1;
- adev->irq.ih.rptr = 0;
- adev->irq.ih.use_bus_addr = use_bus_addr;
-
- if (adev->irq.ih.use_bus_addr) {
- if (!adev->irq.ih.ring) {
- /* add 8 bytes for the rptr/wptr shadows and
- * add them to the end of the ring allocation.
- */
- adev->irq.ih.ring = pci_alloc_consistent(adev->pdev,
- adev->irq.ih.ring_size + 8,
- &adev->irq.ih.rb_dma_addr);
- if (adev->irq.ih.ring == NULL)
- return -ENOMEM;
- memset((void *)adev->irq.ih.ring, 0, adev->irq.ih.ring_size + 8);
- adev->irq.ih.wptr_offs = (adev->irq.ih.ring_size / 4) + 0;
- adev->irq.ih.rptr_offs = (adev->irq.ih.ring_size / 4) + 1;
- }
- return 0;
+ ih->ring_size = ring_size;
+ ih->ptr_mask = ih->ring_size - 1;
+ ih->rptr = 0;
+ ih->use_bus_addr = use_bus_addr;
+
+ if (use_bus_addr) {
+ if (ih->ring)
+ return 0;
+
+ /* add 8 bytes for the rptr/wptr shadows and
+ * add them to the end of the ring allocation.
+ */
+ ih->ring = dma_alloc_coherent(adev->dev, ih->ring_size + 8,
+ &ih->rb_dma_addr, GFP_KERNEL);
+ if (ih->ring == NULL)
+ return -ENOMEM;
+
+ memset((void *)ih->ring, 0, ih->ring_size + 8);
+ ih->wptr_offs = (ih->ring_size / 4) + 0;
+ ih->rptr_offs = (ih->ring_size / 4) + 1;
} else {
- r = amdgpu_device_wb_get(adev, &adev->irq.ih.wptr_offs);
+ r = amdgpu_device_wb_get(adev, &ih->wptr_offs);
+ if (r)
+ return r;
+
+ r = amdgpu_device_wb_get(adev, &ih->rptr_offs);
if (r) {
- dev_err(adev->dev, "(%d) ih wptr_offs wb alloc failed\n", r);
+ amdgpu_device_wb_free(adev, ih->wptr_offs);
return r;
}
- r = amdgpu_device_wb_get(adev, &adev->irq.ih.rptr_offs);
+ r = amdgpu_bo_create_kernel(adev, ih->ring_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT,
+ &ih->ring_obj, &ih->gpu_addr,
+ (void **)&ih->ring);
if (r) {
- amdgpu_device_wb_free(adev, adev->irq.ih.wptr_offs);
- dev_err(adev->dev, "(%d) ih rptr_offs wb alloc failed\n", r);
+ amdgpu_device_wb_free(adev, ih->rptr_offs);
+ amdgpu_device_wb_free(adev, ih->wptr_offs);
return r;
}
-
- return amdgpu_ih_ring_alloc(adev);
}
+ return 0;
}
/**
* amdgpu_ih_ring_fini - tear down the IH state
*
* @adev: amdgpu_device pointer
+ * @ih: ih ring to tear down
*
* Tears down the IH state and frees buffer
* used for the IH ring buffer.
*/
-void amdgpu_ih_ring_fini(struct amdgpu_device *adev)
+void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
{
- if (adev->irq.ih.use_bus_addr) {
- if (adev->irq.ih.ring) {
- /* add 8 bytes for the rptr/wptr shadows and
- * add them to the end of the ring allocation.
- */
- pci_free_consistent(adev->pdev, adev->irq.ih.ring_size + 8,
- (void *)adev->irq.ih.ring,
- adev->irq.ih.rb_dma_addr);
- adev->irq.ih.ring = NULL;
- }
+ if (ih->use_bus_addr) {
+ if (!ih->ring)
+ return;
+
+ /* add 8 bytes for the rptr/wptr shadows and
+ * add them to the end of the ring allocation.
+ */
+ dma_free_coherent(adev->dev, ih->ring_size + 8,
+ (void *)ih->ring, ih->rb_dma_addr);
+ ih->ring = NULL;
} else {
- amdgpu_bo_free_kernel(&adev->irq.ih.ring_obj,
- &adev->irq.ih.gpu_addr,
- (void **)&adev->irq.ih.ring);
- amdgpu_device_wb_free(adev, adev->irq.ih.wptr_offs);
- amdgpu_device_wb_free(adev, adev->irq.ih.rptr_offs);
+ amdgpu_bo_free_kernel(&ih->ring_obj, &ih->gpu_addr,
+ (void **)&ih->ring);
+ amdgpu_device_wb_free(adev, ih->wptr_offs);
+ amdgpu_device_wb_free(adev, ih->rptr_offs);
}
}
@@ -142,56 +123,43 @@ void amdgpu_ih_ring_fini(struct amdgpu_device *adev)
* amdgpu_ih_process - interrupt handler
*
* @adev: amdgpu_device pointer
+ * @ih: ih ring to process
*
* Interrupt hander (VI), walk the IH ring.
* Returns irq process return code.
*/
-int amdgpu_ih_process(struct amdgpu_device *adev)
+int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
+ void (*callback)(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih))
{
- struct amdgpu_iv_entry entry;
u32 wptr;
- if (!adev->irq.ih.enabled || adev->shutdown)
+ if (!ih->enabled || adev->shutdown)
return IRQ_NONE;
wptr = amdgpu_ih_get_wptr(adev);
restart_ih:
/* is somebody else already processing irqs? */
- if (atomic_xchg(&adev->irq.ih.lock, 1))
+ if (atomic_xchg(&ih->lock, 1))
return IRQ_NONE;
- DRM_DEBUG("%s: rptr %d, wptr %d\n", __func__, adev->irq.ih.rptr, wptr);
+ DRM_DEBUG("%s: rptr %d, wptr %d\n", __func__, ih->rptr, wptr);
/* Order reading of wptr vs. reading of IH ring data */
rmb();
- while (adev->irq.ih.rptr != wptr) {
- u32 ring_index = adev->irq.ih.rptr >> 2;
-
- /* Prescreening of high-frequency interrupts */
- if (!amdgpu_ih_prescreen_iv(adev)) {
- adev->irq.ih.rptr &= adev->irq.ih.ptr_mask;
- continue;
- }
-
- /* Before dispatching irq to IP blocks, send it to amdkfd */
- amdgpu_amdkfd_interrupt(adev,
- (const void *) &adev->irq.ih.ring[ring_index]);
-
- entry.iv_entry = (const uint32_t *)
- &adev->irq.ih.ring[ring_index];
- amdgpu_ih_decode_iv(adev, &entry);
- adev->irq.ih.rptr &= adev->irq.ih.ptr_mask;
-
- amdgpu_irq_dispatch(adev, &entry);
+ while (ih->rptr != wptr) {
+ callback(adev, ih);
+ ih->rptr &= ih->ptr_mask;
}
+
amdgpu_ih_set_rptr(adev);
- atomic_set(&adev->irq.ih.lock, 0);
+ atomic_set(&ih->lock, 0);
/* make sure wptr hasn't changed while processing */
wptr = amdgpu_ih_get_wptr(adev);
- if (wptr != adev->irq.ih.rptr)
+ if (wptr != ih->rptr)
goto restart_ih;
return IRQ_HANDLED;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index 0d5b3f5201d2..9ce8c93ec19b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -24,12 +24,8 @@
#ifndef __AMDGPU_IH_H__
#define __AMDGPU_IH_H__
-#include "soc15_ih_clientid.h"
-
struct amdgpu_device;
-
-#define AMDGPU_IH_CLIENTID_LEGACY 0
-#define AMDGPU_IH_CLIENTID_MAX SOC15_IH_CLIENTID_MAX
+struct amdgpu_iv_entry;
/*
* R6xx+ IH ring
@@ -51,22 +47,6 @@ struct amdgpu_ih_ring {
dma_addr_t rb_dma_addr; /* only used when use_bus_addr = true */
};
-#define AMDGPU_IH_SRC_DATA_MAX_SIZE_DW 4
-
-struct amdgpu_iv_entry {
- unsigned client_id;
- unsigned src_id;
- unsigned ring_id;
- unsigned vmid;
- unsigned vmid_src;
- uint64_t timestamp;
- unsigned timestamp_src;
- unsigned pasid;
- unsigned pasid_src;
- unsigned src_data[AMDGPU_IH_SRC_DATA_MAX_SIZE_DW];
- const uint32_t *iv_entry;
-};
-
/* provided by the ih block */
struct amdgpu_ih_funcs {
/* ring read/write ptr handling, called from interrupt context */
@@ -82,9 +62,11 @@ struct amdgpu_ih_funcs {
#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
-int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size,
- bool use_bus_addr);
-void amdgpu_ih_ring_fini(struct amdgpu_device *adev);
-int amdgpu_ih_process(struct amdgpu_device *adev);
+int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
+ unsigned ring_size, bool use_bus_addr);
+void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
+int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
+ void (*callback)(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih));
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index b927e8798534..52c17f6219a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -51,6 +51,7 @@
#include "atom.h"
#include "amdgpu_connectors.h"
#include "amdgpu_trace.h"
+#include "amdgpu_amdkfd.h"
#include <linux/pm_runtime.h>
@@ -123,7 +124,7 @@ void amdgpu_irq_disable_all(struct amdgpu_device *adev)
int r;
spin_lock_irqsave(&adev->irq.lock, irqflags);
- for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) {
+ for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
if (!adev->irq.client[i].sources)
continue;
@@ -147,6 +148,34 @@ void amdgpu_irq_disable_all(struct amdgpu_device *adev)
}
/**
+ * amdgpu_irq_callback - callback from the IH ring
+ *
+ * @adev: amdgpu device pointer
+ * @ih: amdgpu ih ring
+ *
+ * Callback from IH ring processing to handle the entry at the current position
+ * and advance the read pointer.
+ */
+static void amdgpu_irq_callback(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
+{
+ u32 ring_index = ih->rptr >> 2;
+ struct amdgpu_iv_entry entry;
+
+ /* Prescreening of high-frequency interrupts */
+ if (!amdgpu_ih_prescreen_iv(adev))
+ return;
+
+ /* Before dispatching irq to IP blocks, send it to amdkfd */
+ amdgpu_amdkfd_interrupt(adev, (const void *) &ih->ring[ring_index]);
+
+ entry.iv_entry = (const uint32_t *)&ih->ring[ring_index];
+ amdgpu_ih_decode_iv(adev, &entry);
+
+ amdgpu_irq_dispatch(adev, &entry);
+}
+
+/**
* amdgpu_irq_handler - IRQ handler
*
* @irq: IRQ number (unused)
@@ -163,7 +192,7 @@ irqreturn_t amdgpu_irq_handler(int irq, void *arg)
struct amdgpu_device *adev = dev->dev_private;
irqreturn_t ret;
- ret = amdgpu_ih_process(adev);
+ ret = amdgpu_ih_process(adev, &adev->irq.ih, amdgpu_irq_callback);
if (ret == IRQ_HANDLED)
pm_runtime_mark_last_busy(dev->dev);
return ret;
@@ -273,7 +302,7 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
cancel_work_sync(&adev->reset_work);
}
- for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) {
+ for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
if (!adev->irq.client[i].sources)
continue;
@@ -313,7 +342,7 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev,
unsigned client_id, unsigned src_id,
struct amdgpu_irq_src *source)
{
- if (client_id >= AMDGPU_IH_CLIENTID_MAX)
+ if (client_id >= AMDGPU_IRQ_CLIENTID_MAX)
return -EINVAL;
if (src_id >= AMDGPU_MAX_IRQ_SRC_ID)
@@ -367,7 +396,7 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
trace_amdgpu_iv(entry);
- if (client_id >= AMDGPU_IH_CLIENTID_MAX) {
+ if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) {
DRM_DEBUG("Invalid client_id in IV: %d\n", client_id);
return;
}
@@ -440,7 +469,7 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
{
int i, j, k;
- for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) {
+ for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
if (!adev->irq.client[i].sources)
continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
index 3375ad778edc..f6ce171cb8aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
@@ -25,19 +25,38 @@
#define __AMDGPU_IRQ_H__
#include <linux/irqdomain.h>
+#include "soc15_ih_clientid.h"
#include "amdgpu_ih.h"
-#define AMDGPU_MAX_IRQ_SRC_ID 0x100
+#define AMDGPU_MAX_IRQ_SRC_ID 0x100
#define AMDGPU_MAX_IRQ_CLIENT_ID 0x100
+#define AMDGPU_IRQ_CLIENTID_LEGACY 0
+#define AMDGPU_IRQ_CLIENTID_MAX SOC15_IH_CLIENTID_MAX
+
+#define AMDGPU_IRQ_SRC_DATA_MAX_SIZE_DW 4
+
struct amdgpu_device;
-struct amdgpu_iv_entry;
enum amdgpu_interrupt_state {
AMDGPU_IRQ_STATE_DISABLE,
AMDGPU_IRQ_STATE_ENABLE,
};
+struct amdgpu_iv_entry {
+ unsigned client_id;
+ unsigned src_id;
+ unsigned ring_id;
+ unsigned vmid;
+ unsigned vmid_src;
+ uint64_t timestamp;
+ unsigned timestamp_src;
+ unsigned pasid;
+ unsigned pasid_src;
+ unsigned src_data[AMDGPU_IRQ_SRC_DATA_MAX_SIZE_DW];
+ const uint32_t *iv_entry;
+};
+
struct amdgpu_irq_src {
unsigned num_types;
atomic_t *enabled_types;
@@ -63,7 +82,7 @@ struct amdgpu_irq {
bool installed;
spinlock_t lock;
/* interrupt sources */
- struct amdgpu_irq_client client[AMDGPU_IH_CLIENTID_MAX];
+ struct amdgpu_irq_client client[AMDGPU_IRQ_CLIENTID_MAX];
/* status, etc. */
bool msi_enabled; /* msi enabled */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 8c334fc808c2..18d989e0e362 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -1976,6 +1976,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
{
uint32_t value;
+ uint64_t value64;
uint32_t query = 0;
int size;
@@ -2014,6 +2015,10 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
seq_printf(m, "GPU Load: %u %%\n", value);
seq_printf(m, "\n");
+ /* SMC feature mask */
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size))
+ seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64);
+
/* UVD clocks */
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
if (!value) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 44fc665e4577..4caa301ce454 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -77,6 +77,7 @@ struct amdgpu_fence_driver {
bool initialized;
struct amdgpu_irq_src *irq_src;
unsigned irq_type;
+ struct timer_list fallback_timer;
unsigned num_fences_mask;
spinlock_t lock;
struct dma_fence **fences;
@@ -96,7 +97,7 @@ void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence,
unsigned flags);
int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s);
-void amdgpu_fence_process(struct amdgpu_ring *ring);
+bool amdgpu_fence_process(struct amdgpu_ring *ring);
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
uint32_t wait_seq,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 2e87414422f9..e9bf70e2ac51 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -103,7 +103,7 @@ TRACE_EVENT(amdgpu_iv,
__entry->src_data[2] = iv->src_data[2];
__entry->src_data[3] = iv->src_data[3];
),
- TP_printk("client_id:%u src_id:%u ring:%u vmid:%u timestamp: %llu pasid:%u src_data: %08x %08x %08x %08x\n",
+ TP_printk("client_id:%u src_id:%u ring:%u vmid:%u timestamp: %llu pasid:%u src_data: %08x %08x %08x %08x",
__entry->client_id, __entry->src_id,
__entry->ring_id, __entry->vmid,
__entry->timestamp, __entry->pasid,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index a73674f9a0f5..2a2eb0143f48 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -36,6 +36,7 @@
#include "soc15_common.h"
#include "vcn/vcn_1_0_offset.h"
+#include "vcn/vcn_1_0_sh_mask.h"
/* 1 second timeout */
#define VCN_IDLE_TIMEOUT msecs_to_jiffies(1000)
@@ -212,18 +213,158 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
return 0;
}
+static int amdgpu_vcn_pause_dpg_mode(struct amdgpu_device *adev,
+ struct dpg_pause_state *new_state)
+{
+ int ret_code;
+ uint32_t reg_data = 0;
+ uint32_t reg_data2 = 0;
+ struct amdgpu_ring *ring;
+
+ /* pause/unpause if state is changed */
+ if (adev->vcn.pause_state.fw_based != new_state->fw_based) {
+ DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d",
+ adev->vcn.pause_state.fw_based, adev->vcn.pause_state.jpeg,
+ new_state->fw_based, new_state->jpeg);
+
+ reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
+ (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
+
+ if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
+ ret_code = 0;
+
+ if (!(reg_data & UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK))
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
+
+ if (!ret_code) {
+ /* pause DPG non-jpeg */
+ reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
+ WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE,
+ UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
+ UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code);
+
+ /* Restore */
+ ring = &adev->vcn.ring_enc[0];
+ WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
+ WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
+ WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
+ WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
+
+ ring = &adev->vcn.ring_enc[1];
+ WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
+ WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
+ WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
+ WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
+
+ ring = &adev->vcn.ring_dec;
+ WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
+ lower_32_bits(ring->wptr) | 0x80000000);
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
+ UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
+ }
+ } else {
+ /* unpause dpg non-jpeg, no need to wait */
+ reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
+ WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
+ }
+ adev->vcn.pause_state.fw_based = new_state->fw_based;
+ }
+
+ /* pause/unpause if state is changed */
+ if (adev->vcn.pause_state.jpeg != new_state->jpeg) {
+ DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d",
+ adev->vcn.pause_state.fw_based, adev->vcn.pause_state.jpeg,
+ new_state->fw_based, new_state->jpeg);
+
+ reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
+ (~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK);
+
+ if (new_state->jpeg == VCN_DPG_STATE__PAUSE) {
+ ret_code = 0;
+
+ if (!(reg_data & UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK))
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
+
+ if (!ret_code) {
+ /* Make sure JPRG Snoop is disabled before sending the pause */
+ reg_data2 = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS);
+ reg_data2 |= UVD_POWER_STATUS__JRBC_SNOOP_DIS_MASK;
+ WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, reg_data2);
+
+ /* pause DPG jpeg */
+ reg_data |= UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK;
+ WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE,
+ UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK,
+ UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK, ret_code);
+
+ /* Restore */
+ ring = &adev->vcn.ring_jpeg;
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
+ WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, 0x00000001L | 0x00000002L);
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
+ lower_32_bits(ring->gpu_addr));
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
+ upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, ring->wptr);
+ WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, ring->wptr);
+ WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, 0x00000002L);
+
+ ring = &adev->vcn.ring_dec;
+ WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
+ lower_32_bits(ring->wptr) | 0x80000000);
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
+ UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
+ }
+ } else {
+ /* unpause dpg jpeg, no need to wait */
+ reg_data &= ~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK;
+ WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
+ }
+ adev->vcn.pause_state.jpeg = new_state->jpeg;
+ }
+
+ return 0;
+}
+
static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
{
struct amdgpu_device *adev =
container_of(work, struct amdgpu_device, vcn.idle_work.work);
- unsigned fences = amdgpu_fence_count_emitted(&adev->vcn.ring_dec);
- unsigned i;
+ unsigned int fences = 0;
+ unsigned int i;
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
}
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ struct dpg_pause_state new_state;
+
+ if (fences)
+ new_state.fw_based = VCN_DPG_STATE__PAUSE;
+ else
+ new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
+
+ if (amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg))
+ new_state.jpeg = VCN_DPG_STATE__PAUSE;
+ else
+ new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
+
+ amdgpu_vcn_pause_dpg_mode(adev, &new_state);
+ }
+
fences += amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg);
+ fences += amdgpu_fence_count_emitted(&adev->vcn.ring_dec);
if (fences == 0) {
amdgpu_gfx_off_ctrl(adev, true);
@@ -250,6 +391,22 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
AMD_PG_STATE_UNGATE);
}
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ struct dpg_pause_state new_state;
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
+ new_state.fw_based = VCN_DPG_STATE__PAUSE;
+ else
+ new_state.fw_based = adev->vcn.pause_state.fw_based;
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
+ new_state.jpeg = VCN_DPG_STATE__PAUSE;
+ else
+ new_state.jpeg = adev->vcn.pause_state.jpeg;
+
+ amdgpu_vcn_pause_dpg_mode(adev, &new_state);
+ }
}
void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
@@ -264,7 +421,7 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
unsigned i;
int r;
- WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0xCAFEDEAD);
+ WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 3);
if (r) {
DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
@@ -272,11 +429,11 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
return r;
}
amdgpu_ring_write(ring,
- PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0));
+ PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0));
amdgpu_ring_write(ring, 0xDEADBEEF);
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID));
+ tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9));
if (tmp == 0xDEADBEEF)
break;
DRM_UDELAY(1);
@@ -616,7 +773,7 @@ int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
unsigned i;
int r;
- WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0xCAFEDEAD);
+ WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 3);
if (r) {
@@ -626,12 +783,12 @@ int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
}
amdgpu_ring_write(ring,
- PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0, 0, 0));
+ PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0, 0, 0));
amdgpu_ring_write(ring, 0xDEADBEEF);
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID));
+ tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9));
if (tmp == 0xDEADBEEF)
break;
DRM_UDELAY(1);
@@ -665,7 +822,7 @@ static int amdgpu_vcn_jpeg_set_reg(struct amdgpu_ring *ring, uint32_t handle,
ib = &job->ibs[0];
- ib->ptr[0] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH), 0, 0, PACKETJ_TYPE0);
+ ib->ptr[0] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0, 0, PACKETJ_TYPE0);
ib->ptr[1] = 0xDEADBEEF;
for (i = 2; i < 16; i += 2) {
ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
@@ -714,7 +871,7 @@ int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = 0;
for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH));
+ tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9));
if (tmp == 0xDEADBEEF)
break;
DRM_UDELAY(1);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 0b0b8638d73f..0b88a4672da5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -56,6 +56,16 @@ enum engine_status_constants {
UVD_STATUS__RBC_BUSY = 0x1,
};
+enum internal_dpg_state {
+ VCN_DPG_STATE__UNPAUSE = 0,
+ VCN_DPG_STATE__PAUSE,
+};
+
+struct dpg_pause_state {
+ enum internal_dpg_state fw_based;
+ enum internal_dpg_state jpeg;
+};
+
struct amdgpu_vcn {
struct amdgpu_bo *vcpu_bo;
void *cpu_addr;
@@ -69,6 +79,8 @@ struct amdgpu_vcn {
struct amdgpu_ring ring_jpeg;
struct amdgpu_irq_src irq;
unsigned num_enc_rings;
+ enum amd_powergating_state cur_state;
+ struct dpg_pause_state pause_state;
};
int amdgpu_vcn_sw_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index d2469453dca2..79220a91abe3 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -6277,12 +6277,12 @@ static int ci_dpm_sw_init(void *handle)
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 230,
+ ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 230,
&adev->pm.dpm.thermal.irq);
if (ret)
return ret;
- ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 231,
+ ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 231,
&adev->pm.dpm.thermal.irq);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index 44d10c2172f6..b5775c6a857b 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -276,7 +276,7 @@ static void cik_ih_decode_iv(struct amdgpu_device *adev,
dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
- entry->client_id = AMDGPU_IH_CLIENTID_LEGACY;
+ entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
entry->src_id = dw[0] & 0xff;
entry->src_data[0] = dw[1] & 0xfffffff;
entry->ring_id = dw[2] & 0xff;
@@ -318,7 +318,7 @@ static int cik_ih_sw_init(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_ih_ring_init(adev, 64 * 1024, false);
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false);
if (r)
return r;
@@ -332,7 +332,7 @@ static int cik_ih_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_irq_fini(adev);
- amdgpu_ih_ring_fini(adev);
+ amdgpu_ih_ring_fini(adev, &adev->irq.ih);
amdgpu_irq_remove_domain(adev);
return 0;
@@ -468,8 +468,7 @@ static const struct amdgpu_ih_funcs cik_ih_funcs = {
static void cik_ih_set_interrupt_funcs(struct amdgpu_device *adev)
{
- if (adev->irq.ih_funcs == NULL)
- adev->irq.ih_funcs = &cik_ih_funcs;
+ adev->irq.ih_funcs = &cik_ih_funcs;
}
const struct amdgpu_ip_block_version cik_ih_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 154b1499b07e..b918c8886b75 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -970,19 +970,19 @@ static int cik_sdma_sw_init(void *handle)
}
/* SDMA trap event */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 224,
&adev->sdma.trap_irq);
if (r)
return r;
/* SDMA Privileged inst */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 241,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 241,
&adev->sdma.illegal_inst_irq);
if (r)
return r;
/* SDMA Privileged inst */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 247,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 247,
&adev->sdma.illegal_inst_irq);
if (r)
return r;
@@ -1370,10 +1370,8 @@ static const struct amdgpu_buffer_funcs cik_sdma_buffer_funcs = {
static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev)
{
- if (adev->mman.buffer_funcs == NULL) {
- adev->mman.buffer_funcs = &cik_sdma_buffer_funcs;
- adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
- }
+ adev->mman.buffer_funcs = &cik_sdma_buffer_funcs;
+ adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
}
static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = {
@@ -1389,15 +1387,13 @@ static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
struct drm_gpu_scheduler *sched;
unsigned i;
- if (adev->vm_manager.vm_pte_funcs == NULL) {
- adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- sched = &adev->sdma.instance[i].ring.sched;
- adev->vm_manager.vm_pte_rqs[i] =
- &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
- }
- adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+ adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ sched = &adev->sdma.instance[i].ring.sched;
+ adev->vm_manager.vm_pte_rqs[i] =
+ &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
}
+ adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
}
const struct amdgpu_ip_block_version cik_sdma_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index 960c29e17da6..df5ac4d85a00 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -255,7 +255,7 @@ static void cz_ih_decode_iv(struct amdgpu_device *adev,
dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
- entry->client_id = AMDGPU_IH_CLIENTID_LEGACY;
+ entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
entry->src_id = dw[0] & 0xff;
entry->src_data[0] = dw[1] & 0xfffffff;
entry->ring_id = dw[2] & 0xff;
@@ -297,7 +297,7 @@ static int cz_ih_sw_init(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_ih_ring_init(adev, 64 * 1024, false);
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false);
if (r)
return r;
@@ -311,7 +311,7 @@ static int cz_ih_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_irq_fini(adev);
- amdgpu_ih_ring_fini(adev);
+ amdgpu_ih_ring_fini(adev, &adev->irq.ih);
amdgpu_irq_remove_domain(adev);
return 0;
@@ -449,8 +449,7 @@ static const struct amdgpu_ih_funcs cz_ih_funcs = {
static void cz_ih_set_interrupt_funcs(struct amdgpu_device *adev)
{
- if (adev->irq.ih_funcs == NULL)
- adev->irq.ih_funcs = &cz_ih_funcs;
+ adev->irq.ih_funcs = &cz_ih_funcs;
}
const struct amdgpu_ip_block_version cz_ih_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 3916aa6cc4ec..4cfecdce29a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2746,19 +2746,19 @@ static int dce_v10_0_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->mode_info.num_crtc; i++) {
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
if (r)
return r;
}
for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; i < 20; i += 2) {
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq);
if (r)
return r;
}
/* HPD hotplug */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
if (r)
return r;
@@ -3570,8 +3570,7 @@ static const struct amdgpu_display_funcs dce_v10_0_display_funcs = {
static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev)
{
- if (adev->mode_info.funcs == NULL)
- adev->mode_info.funcs = &dce_v10_0_display_funcs;
+ adev->mode_info.funcs = &dce_v10_0_display_funcs;
}
static const struct amdgpu_irq_src_funcs dce_v10_0_crtc_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 4ffb612a4e53..7c868916d90f 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -2867,19 +2867,19 @@ static int dce_v11_0_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->mode_info.num_crtc; i++) {
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
if (r)
return r;
}
for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; i < 20; i += 2) {
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq);
if (r)
return r;
}
/* HPD hotplug */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
if (r)
return r;
@@ -3702,8 +3702,7 @@ static const struct amdgpu_display_funcs dce_v11_0_display_funcs = {
static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev)
{
- if (adev->mode_info.funcs == NULL)
- adev->mode_info.funcs = &dce_v11_0_display_funcs;
+ adev->mode_info.funcs = &dce_v11_0_display_funcs;
}
static const struct amdgpu_irq_src_funcs dce_v11_0_crtc_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 480c5348a14f..17eaaba36017 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -2616,19 +2616,19 @@ static int dce_v6_0_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->mode_info.num_crtc; i++) {
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
if (r)
return r;
}
for (i = 8; i < 20; i += 2) {
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq);
if (r)
return r;
}
/* HPD hotplug */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 42, &adev->hpd_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 42, &adev->hpd_irq);
if (r)
return r;
@@ -3376,8 +3376,7 @@ static const struct amdgpu_display_funcs dce_v6_0_display_funcs = {
static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev)
{
- if (adev->mode_info.funcs == NULL)
- adev->mode_info.funcs = &dce_v6_0_display_funcs;
+ adev->mode_info.funcs = &dce_v6_0_display_funcs;
}
static const struct amdgpu_irq_src_funcs dce_v6_0_crtc_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 797196476c94..8c0576978d36 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2643,19 +2643,19 @@ static int dce_v8_0_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->mode_info.num_crtc; i++) {
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
if (r)
return r;
}
for (i = 8; i < 20; i += 2) {
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq);
if (r)
return r;
}
/* HPD hotplug */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 42, &adev->hpd_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 42, &adev->hpd_irq);
if (r)
return r;
@@ -3458,8 +3458,7 @@ static const struct amdgpu_display_funcs dce_v8_0_display_funcs = {
static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev)
{
- if (adev->mode_info.funcs == NULL)
- adev->mode_info.funcs = &dce_v8_0_display_funcs;
+ adev->mode_info.funcs = &dce_v8_0_display_funcs;
}
static const struct amdgpu_irq_src_funcs dce_v8_0_crtc_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index 15257634a53a..fdace004544d 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -372,7 +372,7 @@ static int dce_virtual_sw_init(void *handle)
int r, i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SMU_DISP_TIMER2_TRIGGER, &adev->crtc_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SMU_DISP_TIMER2_TRIGGER, &adev->crtc_irq);
if (r)
return r;
@@ -649,8 +649,7 @@ static const struct amdgpu_display_funcs dce_virtual_display_funcs = {
static void dce_virtual_set_display_funcs(struct amdgpu_device *adev)
{
- if (adev->mode_info.funcs == NULL)
- adev->mode_info.funcs = &dce_virtual_display_funcs;
+ adev->mode_info.funcs = &dce_virtual_display_funcs;
}
static int dce_virtual_pageflip(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index de184a886057..d76eb27945dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -1552,7 +1552,7 @@ static void gfx_v6_0_config_init(struct amdgpu_device *adev)
adev->gfx.config.double_offchip_lds_buf = 0;
}
-static void gfx_v6_0_gpu_init(struct amdgpu_device *adev)
+static void gfx_v6_0_constants_init(struct amdgpu_device *adev)
{
u32 gb_addr_config = 0;
u32 mc_shared_chmap, mc_arb_ramcfg;
@@ -3094,15 +3094,15 @@ static int gfx_v6_0_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i, r;
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq);
if (r)
return r;
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 184, &adev->gfx.priv_reg_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 184, &adev->gfx.priv_reg_irq);
if (r)
return r;
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 185, &adev->gfx.priv_inst_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 185, &adev->gfx.priv_inst_irq);
if (r)
return r;
@@ -3175,7 +3175,7 @@ static int gfx_v6_0_hw_init(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- gfx_v6_0_gpu_init(adev);
+ gfx_v6_0_constants_init(adev);
r = gfx_v6_0_rlc_resume(adev);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index fc39ebbc9d9f..0e72bc09939a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -1886,14 +1886,14 @@ static void gfx_v7_0_config_init(struct amdgpu_device *adev)
}
/**
- * gfx_v7_0_gpu_init - setup the 3D engine
+ * gfx_v7_0_constants_init - setup the 3D engine
*
* @adev: amdgpu_device pointer
*
- * Configures the 3D engine and tiling configuration
- * registers so that the 3D engine is usable.
+ * init the gfx constants such as the 3D engine, tiling configuration
+ * registers, maximum number of quad pipes, render backends...
*/
-static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
+static void gfx_v7_0_constants_init(struct amdgpu_device *adev)
{
u32 sh_mem_cfg, sh_static_mem_cfg, sh_mem_base;
u32 tmp;
@@ -4516,18 +4516,18 @@ static int gfx_v7_0_sw_init(void *handle)
adev->gfx.mec.num_queue_per_pipe = 8;
/* EOP Event */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq);
if (r)
return r;
/* Privileged reg */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 184,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 184,
&adev->gfx.priv_reg_irq);
if (r)
return r;
/* Privileged inst */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 185,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 185,
&adev->gfx.priv_inst_irq);
if (r)
return r;
@@ -4624,7 +4624,7 @@ static int gfx_v7_0_hw_init(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- gfx_v7_0_gpu_init(adev);
+ gfx_v7_0_constants_init(adev);
/* init rlc */
r = gfx_v7_0_rlc_resume(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 470dc80f4fe7..2aeef2bb93a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -2049,35 +2049,35 @@ static int gfx_v8_0_sw_init(void *handle)
adev->gfx.mec.num_queue_per_pipe = 8;
/* KIQ event */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_INT_IB2, &adev->gfx.kiq.irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_INT_IB2, &adev->gfx.kiq.irq);
if (r)
return r;
/* EOP Event */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_END_OF_PIPE, &adev->gfx.eop_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_END_OF_PIPE, &adev->gfx.eop_irq);
if (r)
return r;
/* Privileged reg */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_REG_FAULT,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_REG_FAULT,
&adev->gfx.priv_reg_irq);
if (r)
return r;
/* Privileged inst */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_INSTR_FAULT,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_INSTR_FAULT,
&adev->gfx.priv_inst_irq);
if (r)
return r;
/* Add CP EDC/ECC irq */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_ECC_ERROR,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_ECC_ERROR,
&adev->gfx.cp_ecc_error_irq);
if (r)
return r;
/* SQ interrupts. */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SQ_INTERRUPT_MSG,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SQ_INTERRUPT_MSG,
&adev->gfx.sq_irq);
if (r) {
DRM_ERROR("amdgpu_irq_add() for SQ failed: %d\n", r);
@@ -3835,7 +3835,7 @@ static void gfx_v8_0_config_init(struct amdgpu_device *adev)
}
}
-static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
+static void gfx_v8_0_constants_init(struct amdgpu_device *adev)
{
u32 tmp, sh_static_mem_cfg;
int i;
@@ -4208,31 +4208,11 @@ static int gfx_v8_0_rlc_load_microcode(struct amdgpu_device *adev)
static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
{
int r;
- u32 tmp;
gfx_v8_0_rlc_stop(adev);
-
- /* disable CG */
- tmp = RREG32(mmRLC_CGCG_CGLS_CTRL);
- tmp &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK |
- RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
- WREG32(mmRLC_CGCG_CGLS_CTRL, tmp);
- if (adev->asic_type == CHIP_POLARIS11 ||
- adev->asic_type == CHIP_POLARIS10 ||
- adev->asic_type == CHIP_POLARIS12 ||
- adev->asic_type == CHIP_VEGAM) {
- tmp = RREG32(mmRLC_CGCG_CGLS_CTRL_3D);
- tmp &= ~0x3;
- WREG32(mmRLC_CGCG_CGLS_CTRL_3D, tmp);
- }
-
- /* disable PG */
- WREG32(mmRLC_PG_CNTL, 0);
-
gfx_v8_0_rlc_reset(adev);
gfx_v8_0_init_pg(adev);
-
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
/* legacy rlc firmware loading */
r = gfx_v8_0_rlc_load_microcode(adev);
@@ -5039,7 +5019,7 @@ static int gfx_v8_0_hw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gfx_v8_0_init_golden_registers(adev);
- gfx_v8_0_gpu_init(adev);
+ gfx_v8_0_constants_init(adev);
r = gfx_v8_0_rlc_resume(adev);
if (r)
@@ -5080,6 +5060,55 @@ static int gfx_v8_0_kcq_disable(struct amdgpu_device *adev)
return r;
}
+static bool gfx_v8_0_is_idle(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (REG_GET_FIELD(RREG32(mmGRBM_STATUS), GRBM_STATUS, GUI_ACTIVE)
+ || RREG32(mmGRBM_STATUS2) != 0x8)
+ return false;
+ else
+ return true;
+}
+
+static bool gfx_v8_0_rlc_is_idle(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (RREG32(mmGRBM_STATUS2) != 0x8)
+ return false;
+ else
+ return true;
+}
+
+static int gfx_v8_0_wait_for_rlc_idle(void *handle)
+{
+ unsigned int i;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (gfx_v8_0_rlc_is_idle(handle))
+ return 0;
+
+ udelay(1);
+ }
+ return -ETIMEDOUT;
+}
+
+static int gfx_v8_0_wait_for_idle(void *handle)
+{
+ unsigned int i;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (gfx_v8_0_is_idle(handle))
+ return 0;
+
+ udelay(1);
+ }
+ return -ETIMEDOUT;
+}
+
static int gfx_v8_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -5098,9 +5127,16 @@ static int gfx_v8_0_hw_fini(void *handle)
pr_debug("For SRIOV client, shouldn't do anything.\n");
return 0;
}
- gfx_v8_0_cp_enable(adev, false);
- gfx_v8_0_rlc_stop(adev);
-
+ adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ if (!gfx_v8_0_wait_for_idle(adev))
+ gfx_v8_0_cp_enable(adev, false);
+ else
+ pr_err("cp is busy, skip halt cp\n");
+ if (!gfx_v8_0_wait_for_rlc_idle(adev))
+ gfx_v8_0_rlc_stop(adev);
+ else
+ pr_err("rlc is busy, skip halt rlc\n");
+ adev->gfx.rlc.funcs->exit_safe_mode(adev);
return 0;
}
@@ -5121,30 +5157,6 @@ static int gfx_v8_0_resume(void *handle)
return r;
}
-static bool gfx_v8_0_is_idle(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (REG_GET_FIELD(RREG32(mmGRBM_STATUS), GRBM_STATUS, GUI_ACTIVE))
- return false;
- else
- return true;
-}
-
-static int gfx_v8_0_wait_for_idle(void *handle)
-{
- unsigned i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- for (i = 0; i < adev->usec_timeout; i++) {
- if (gfx_v8_0_is_idle(handle))
- return 0;
-
- udelay(1);
- }
- return -ETIMEDOUT;
-}
-
static bool gfx_v8_0_check_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index f369d9603435..261bb051b14d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1847,7 +1847,7 @@ static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
mutex_unlock(&adev->srbm_mutex);
}
-static void gfx_v9_0_gpu_init(struct amdgpu_device *adev)
+static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
{
u32 tmp;
int i;
@@ -3235,7 +3235,7 @@ static int gfx_v9_0_hw_init(void *handle)
gfx_v9_0_init_golden_registers(adev);
- gfx_v9_0_gpu_init(adev);
+ gfx_v9_0_constants_init(adev);
r = gfx_v9_0_csb_vram_pin(adev);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index 65f58ebcf835..ceb7847b504f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -82,7 +82,8 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
* to get rid of the VM fault and hardware hang.
*/
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- (max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18) + 0x1);
+ max((adev->gmc.vram_end >> 18) + 0x1,
+ adev->gmc.agp_end >> 18));
else
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 4411463ca719..e1c2b4e9c7b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -859,11 +859,11 @@ static int gmc_v6_0_sw_init(void *handle)
adev->gmc.vram_type = gmc_v6_0_convert_vram_type(tmp);
}
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault);
if (r)
return r;
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault);
if (r)
return r;
@@ -1180,8 +1180,7 @@ static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev)
{
- if (adev->gmc.gmc_funcs == NULL)
- adev->gmc.gmc_funcs = &gmc_v6_0_gmc_funcs;
+ adev->gmc.gmc_funcs = &gmc_v6_0_gmc_funcs;
}
static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index ae776ce9a415..910c4ce19cb3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -991,11 +991,11 @@ static int gmc_v7_0_sw_init(void *handle)
adev->gmc.vram_type = gmc_v7_0_convert_vram_type(tmp);
}
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault);
if (r)
return r;
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault);
if (r)
return r;
@@ -1388,8 +1388,7 @@ static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {
static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev)
{
- if (adev->gmc.gmc_funcs == NULL)
- adev->gmc.gmc_funcs = &gmc_v7_0_gmc_funcs;
+ adev->gmc.gmc_funcs = &gmc_v7_0_gmc_funcs;
}
static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 53ae49b8bde8..1d3265c97b70 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -1095,11 +1095,11 @@ static int gmc_v8_0_sw_init(void *handle)
adev->gmc.vram_type = gmc_v8_0_convert_vram_type(tmp);
}
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault);
if (r)
return r;
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault);
if (r)
return r;
@@ -1733,8 +1733,7 @@ static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev)
{
- if (adev->gmc.gmc_funcs == NULL)
- adev->gmc.gmc_funcs = &gmc_v8_0_gmc_funcs;
+ adev->gmc.gmc_funcs = &gmc_v8_0_gmc_funcs;
}
static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index aad3c7c5fb3a..f35d7a554ad5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -593,8 +593,7 @@ static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
{
- if (adev->gmc.gmc_funcs == NULL)
- adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
+ adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
}
static int gmc_v9_0_early_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index 842c4b677b4d..cf0fc61aebe6 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -255,7 +255,7 @@ static void iceland_ih_decode_iv(struct amdgpu_device *adev,
dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
- entry->client_id = AMDGPU_IH_CLIENTID_LEGACY;
+ entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
entry->src_id = dw[0] & 0xff;
entry->src_data[0] = dw[1] & 0xfffffff;
entry->ring_id = dw[2] & 0xff;
@@ -297,7 +297,7 @@ static int iceland_ih_sw_init(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_ih_ring_init(adev, 64 * 1024, false);
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false);
if (r)
return r;
@@ -311,7 +311,7 @@ static int iceland_ih_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_irq_fini(adev);
- amdgpu_ih_ring_fini(adev);
+ amdgpu_ih_ring_fini(adev, &adev->irq.ih);
amdgpu_irq_remove_domain(adev);
return 0;
@@ -447,8 +447,7 @@ static const struct amdgpu_ih_funcs iceland_ih_funcs = {
static void iceland_ih_set_interrupt_funcs(struct amdgpu_device *adev)
{
- if (adev->irq.ih_funcs == NULL)
- adev->irq.ih_funcs = &iceland_ih_funcs;
+ adev->irq.ih_funcs = &iceland_ih_funcs;
}
const struct amdgpu_ip_block_version iceland_ih_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index cb79a93c2eb7..d0e478f43443 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -2995,12 +2995,12 @@ static int kv_dpm_sw_init(void *handle)
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 230,
+ ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 230,
&adev->pm.dpm.thermal.irq);
if (ret)
return ret;
- ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 231,
+ ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 231,
&adev->pm.dpm.thermal.irq);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 80698b5ffa4a..14649f8475f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -100,7 +100,8 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
* to get rid of the VM fault and hardware hang.
*/
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- (max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18) + 0x1);
+ max((adev->gmc.vram_end >> 18) + 0x1,
+ adev->gmc.agp_end >> 18));
else
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18);
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
index 842567b53df5..64e875d528dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
@@ -580,11 +580,11 @@ int xgpu_vi_mailbox_add_irq_id(struct amdgpu_device *adev)
{
int r;
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 135, &adev->virt.rcv_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 135, &adev->virt.rcv_irq);
if (r)
return r;
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 138, &adev->virt.ack_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 138, &adev->virt.ack_irq);
if (r) {
amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index c403bdf8ad70..cd781abc4953 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -898,19 +898,19 @@ static int sdma_v2_4_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* SDMA trap event */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP,
&adev->sdma.trap_irq);
if (r)
return r;
/* SDMA Privileged inst */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 241,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 241,
&adev->sdma.illegal_inst_irq);
if (r)
return r;
/* SDMA Privileged inst */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE,
&adev->sdma.illegal_inst_irq);
if (r)
return r;
@@ -1296,10 +1296,8 @@ static const struct amdgpu_buffer_funcs sdma_v2_4_buffer_funcs = {
static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev)
{
- if (adev->mman.buffer_funcs == NULL) {
- adev->mman.buffer_funcs = &sdma_v2_4_buffer_funcs;
- adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
- }
+ adev->mman.buffer_funcs = &sdma_v2_4_buffer_funcs;
+ adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
}
static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = {
@@ -1315,15 +1313,13 @@ static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev)
struct drm_gpu_scheduler *sched;
unsigned i;
- if (adev->vm_manager.vm_pte_funcs == NULL) {
- adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- sched = &adev->sdma.instance[i].ring.sched;
- adev->vm_manager.vm_pte_rqs[i] =
- &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
- }
- adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+ adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs;
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ sched = &adev->sdma.instance[i].ring.sched;
+ adev->vm_manager.vm_pte_rqs[i] =
+ &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
}
+ adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
}
const struct amdgpu_ip_block_version sdma_v2_4_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 2677d6a1bf42..6d5c8ac64874 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -1177,19 +1177,19 @@ static int sdma_v3_0_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* SDMA trap event */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP,
&adev->sdma.trap_irq);
if (r)
return r;
/* SDMA Privileged inst */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 241,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 241,
&adev->sdma.illegal_inst_irq);
if (r)
return r;
/* SDMA Privileged inst */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE,
&adev->sdma.illegal_inst_irq);
if (r)
return r;
@@ -1736,10 +1736,8 @@ static const struct amdgpu_buffer_funcs sdma_v3_0_buffer_funcs = {
static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev)
{
- if (adev->mman.buffer_funcs == NULL) {
- adev->mman.buffer_funcs = &sdma_v3_0_buffer_funcs;
- adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
- }
+ adev->mman.buffer_funcs = &sdma_v3_0_buffer_funcs;
+ adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
}
static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = {
@@ -1755,15 +1753,13 @@ static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev)
struct drm_gpu_scheduler *sched;
unsigned i;
- if (adev->vm_manager.vm_pte_funcs == NULL) {
- adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- sched = &adev->sdma.instance[i].ring.sched;
- adev->vm_manager.vm_pte_rqs[i] =
- &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
- }
- adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+ adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs;
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ sched = &adev->sdma.instance[i].ring.sched;
+ adev->vm_manager.vm_pte_rqs[i] =
+ &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
}
+ adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
}
const struct amdgpu_ip_block_version sdma_v3_0_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 2ea1f0d8f5be..a3e2ed15fff2 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -1320,9 +1320,15 @@ static int sdma_v4_0_sw_init(void *handle)
DRM_INFO("use_doorbell being set to: [%s]\n",
ring->use_doorbell?"true":"false");
- ring->doorbell_index = (i == 0) ?
- (AMDGPU_DOORBELL64_sDMA_ENGINE0 << 1) //get DWORD offset
- : (AMDGPU_DOORBELL64_sDMA_ENGINE1 << 1); // get DWORD offset
+ if (adev->asic_type == CHIP_VEGA10)
+ ring->doorbell_index = (i == 0) ?
+ (AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 << 1) //get DWORD offset
+ : (AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 << 1); // get DWORD offset
+ else
+ ring->doorbell_index = (i == 0) ?
+ (AMDGPU_DOORBELL64_sDMA_ENGINE0 << 1) //get DWORD offset
+ : (AMDGPU_DOORBELL64_sDMA_ENGINE1 << 1); // get DWORD offset
+
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024,
@@ -1801,10 +1807,8 @@ static const struct amdgpu_buffer_funcs sdma_v4_0_buffer_funcs = {
static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev)
{
- if (adev->mman.buffer_funcs == NULL) {
- adev->mman.buffer_funcs = &sdma_v4_0_buffer_funcs;
- adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
- }
+ adev->mman.buffer_funcs = &sdma_v4_0_buffer_funcs;
+ adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
}
static const struct amdgpu_vm_pte_funcs sdma_v4_0_vm_pte_funcs = {
@@ -1820,15 +1824,13 @@ static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev)
struct drm_gpu_scheduler *sched;
unsigned i;
- if (adev->vm_manager.vm_pte_funcs == NULL) {
- adev->vm_manager.vm_pte_funcs = &sdma_v4_0_vm_pte_funcs;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- sched = &adev->sdma.instance[i].ring.sched;
- adev->vm_manager.vm_pte_rqs[i] =
- &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
- }
- adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+ adev->vm_manager.vm_pte_funcs = &sdma_v4_0_vm_pte_funcs;
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ sched = &adev->sdma.instance[i].ring.sched;
+ adev->vm_manager.vm_pte_rqs[i] =
+ &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
}
+ adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
}
const struct amdgpu_ip_block_version sdma_v4_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index fafaf259b17b..d4ceaf440f26 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -502,12 +502,12 @@ static int si_dma_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* DMA0 trap event */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224, &adev->sdma.trap_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 224, &adev->sdma.trap_irq);
if (r)
return r;
/* DMA1 trap event */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 244, &adev->sdma.trap_irq_1);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 244, &adev->sdma.trap_irq_1);
if (r)
return r;
@@ -863,10 +863,8 @@ static const struct amdgpu_buffer_funcs si_dma_buffer_funcs = {
static void si_dma_set_buffer_funcs(struct amdgpu_device *adev)
{
- if (adev->mman.buffer_funcs == NULL) {
- adev->mman.buffer_funcs = &si_dma_buffer_funcs;
- adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
- }
+ adev->mman.buffer_funcs = &si_dma_buffer_funcs;
+ adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
}
static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = {
@@ -882,15 +880,13 @@ static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev)
struct drm_gpu_scheduler *sched;
unsigned i;
- if (adev->vm_manager.vm_pte_funcs == NULL) {
- adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- sched = &adev->sdma.instance[i].ring.sched;
- adev->vm_manager.vm_pte_rqs[i] =
- &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
- }
- adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+ adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs;
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ sched = &adev->sdma.instance[i].ring.sched;
+ adev->vm_manager.vm_pte_rqs[i] =
+ &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
}
+ adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
}
const struct amdgpu_ip_block_version si_dma_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 1de96995e690..da58040fdbdc 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -7687,11 +7687,11 @@ static int si_dpm_sw_init(void *handle)
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 230, &adev->pm.dpm.thermal.irq);
+ ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 230, &adev->pm.dpm.thermal.irq);
if (ret)
return ret;
- ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 231, &adev->pm.dpm.thermal.irq);
+ ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 231, &adev->pm.dpm.thermal.irq);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
index 60dad63098a2..b3d7d9f83202 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -142,7 +142,7 @@ static void si_ih_decode_iv(struct amdgpu_device *adev,
dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
- entry->client_id = AMDGPU_IH_CLIENTID_LEGACY;
+ entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
entry->src_id = dw[0] & 0xff;
entry->src_data[0] = dw[1] & 0xfffffff;
entry->ring_id = dw[2] & 0xff;
@@ -170,7 +170,7 @@ static int si_ih_sw_init(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_ih_ring_init(adev, 64 * 1024, false);
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false);
if (r)
return r;
@@ -182,7 +182,7 @@ static int si_ih_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_irq_fini(adev);
- amdgpu_ih_ring_fini(adev);
+ amdgpu_ih_ring_fini(adev, &adev->irq.ih);
return 0;
}
@@ -308,8 +308,7 @@ static const struct amdgpu_ih_funcs si_ih_funcs = {
static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev)
{
- if (adev->irq.ih_funcs == NULL)
- adev->irq.ih_funcs = &si_ih_funcs;
+ adev->irq.ih_funcs = &si_ih_funcs;
}
const struct amdgpu_ip_block_version si_ih_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 138c4810a3de..fb26039beeba 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -739,7 +739,8 @@ static int soc15_common_early_init(void *handle)
adev->pg_flags = AMD_PG_SUPPORT_SDMA |
AMD_PG_SUPPORT_MMHUB |
- AMD_PG_SUPPORT_VCN;
+ AMD_PG_SUPPORT_VCN |
+ AMD_PG_SUPPORT_VCN_DPG;
} else {
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
index f5d602540673..958b10a57073 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
@@ -57,13 +57,33 @@
loop--; \
if (!loop) { \
DRM_ERROR("Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n", \
- inst, #reg, expected_value, (tmp_ & (mask))); \
+ inst, #reg, (unsigned)expected_value, (unsigned)(tmp_ & (mask))); \
ret = -ETIMEDOUT; \
break; \
} \
} \
} while (0)
+#define RREG32_SOC15_DPG_MODE(ip, inst, reg, mask, sram_sel) \
+ ({ WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_MASK, mask); \
+ WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_CTL, \
+ UVD_DPG_LMA_CTL__MASK_EN_MASK | \
+ ((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) \
+ << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT) | \
+ (sram_sel << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); \
+ RREG32_SOC15(ip, inst, mmUVD_DPG_LMA_DATA); })
+
+#define WREG32_SOC15_DPG_MODE(ip, inst, reg, value, mask, sram_sel) \
+ do { \
+ WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_DATA, value); \
+ WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_MASK, mask); \
+ WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_CTL, \
+ UVD_DPG_LMA_CTL__READ_WRITE_MASK | \
+ ((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) \
+ << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT) | \
+ (sram_sel << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); \
+ } while (0)
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index 52853d8a8fdd..3abffd06b5c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -266,7 +266,7 @@ static void tonga_ih_decode_iv(struct amdgpu_device *adev,
dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
- entry->client_id = AMDGPU_IH_CLIENTID_LEGACY;
+ entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
entry->src_id = dw[0] & 0xff;
entry->src_data[0] = dw[1] & 0xfffffff;
entry->ring_id = dw[2] & 0xff;
@@ -317,7 +317,7 @@ static int tonga_ih_sw_init(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_ih_ring_init(adev, 64 * 1024, true);
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, true);
if (r)
return r;
@@ -334,7 +334,7 @@ static int tonga_ih_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_irq_fini(adev);
- amdgpu_ih_ring_fini(adev);
+ amdgpu_ih_ring_fini(adev, &adev->irq.ih);
amdgpu_irq_remove_domain(adev);
return 0;
@@ -513,8 +513,7 @@ static const struct amdgpu_ih_funcs tonga_ih_funcs = {
static void tonga_ih_set_interrupt_funcs(struct amdgpu_device *adev)
{
- if (adev->irq.ih_funcs == NULL)
- adev->irq.ih_funcs = &tonga_ih_funcs;
+ adev->irq.ih_funcs = &tonga_ih_funcs;
}
const struct amdgpu_ip_block_version tonga_ih_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 8a926d1df939..1fc17bf39fed 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -108,7 +108,7 @@ static int uvd_v4_2_sw_init(void *handle)
int r;
/* UVD TRAP */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 50248059412e..fde6ad5ac9ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -105,7 +105,7 @@ static int uvd_v5_0_sw_init(void *handle)
int r;
/* UVD TRAP */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 6ae82cc2e55e..8ef4a5392112 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -393,14 +393,14 @@ static int uvd_v6_0_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* UVD TRAP */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
if (r)
return r;
/* UVD ENC TRAP */
if (uvd_v6_0_enc_support(adev)) {
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + VISLANDS30_IV_SRCID_UVD_ENC_GEN_PURP, &adev->uvd.inst->irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + VISLANDS30_IV_SRCID_UVD_ENC_GEN_PURP, &adev->uvd.inst->irq);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index 7eaa54ba016b..ea28828360d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -417,7 +417,7 @@ static int vce_v2_0_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* VCE */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 167, &adev->vce.irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 167, &adev->vce.irq);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index c8390f9adfd6..6dbd39730070 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -423,7 +423,7 @@ static int vce_v3_0_sw_init(void *handle)
int r, i;
/* VCE */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_VCE_TRAP, &adev->vce.irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_VCE_TRAP, &adev->vce.irq);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 2664bb2c47c3..63d7f97e81b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -198,7 +198,8 @@ static int vcn_v1_0_hw_init(void *handle)
done:
if (!r)
- DRM_INFO("VCN decode and encode initialized successfully.\n");
+ DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
+ (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
return r;
}
@@ -266,13 +267,13 @@ static int vcn_v1_0_resume(void *handle)
}
/**
- * vcn_v1_0_mc_resume - memory controller programming
+ * vcn_v1_0_mc_resume_spg_mode - memory controller programming
*
* @adev: amdgpu_device pointer
*
* Let the VCN memory controller know it's offsets
*/
-static void vcn_v1_0_mc_resume(struct amdgpu_device *adev)
+static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev)
{
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
uint32_t offset;
@@ -319,6 +320,65 @@ static void vcn_v1_0_mc_resume(struct amdgpu_device *adev)
adev->gfx.config.gb_addr_config);
}
+static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev)
+{
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ uint32_t offset;
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo),
+ 0xFFFFFFFF, 0);
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi),
+ 0xFFFFFFFF, 0);
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0,
+ 0xFFFFFFFF, 0);
+ offset = 0;
+ } else {
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+ lower_32_bits(adev->vcn.gpu_addr), 0xFFFFFFFF, 0);
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+ upper_32_bits(adev->vcn.gpu_addr), 0xFFFFFFFF, 0);
+ offset = size;
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
+ AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0xFFFFFFFF, 0);
+ }
+
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size, 0xFFFFFFFF, 0);
+
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
+ lower_32_bits(adev->vcn.gpu_addr + offset), 0xFFFFFFFF, 0);
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
+ upper_32_bits(adev->vcn.gpu_addr + offset), 0xFFFFFFFF, 0);
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0,
+ 0xFFFFFFFF, 0);
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_HEAP_SIZE,
+ 0xFFFFFFFF, 0);
+
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
+ lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_HEAP_SIZE),
+ 0xFFFFFFFF, 0);
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
+ upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_HEAP_SIZE),
+ 0xFFFFFFFF, 0);
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0, 0xFFFFFFFF, 0);
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE2,
+ AMDGPU_VCN_STACK_SIZE + (AMDGPU_VCN_SESSION_SIZE * 40),
+ 0xFFFFFFFF, 0);
+
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_UDEC_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_JPEG_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_JPEG_UV_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
+}
+
/**
* vcn_v1_0_disable_clock_gating - disable VCN clock gating
*
@@ -519,6 +579,62 @@ static void vcn_v1_0_enable_clock_gating(struct amdgpu_device *adev)
WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
}
+static void vcn_v1_0_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel)
+{
+ uint32_t reg_data = 0;
+
+ /* disable JPEG CGC */
+ if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
+ reg_data = 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+ else
+ reg_data = 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+ reg_data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
+ reg_data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmJPEG_CGC_CTRL, reg_data, 0xFFFFFFFF, sram_sel);
+
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmJPEG_CGC_GATE, 0, 0xFFFFFFFF, sram_sel);
+
+ /* enable sw clock gating control */
+ if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
+ reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+ else
+ reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+ reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
+ reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_CGC_CTRL, reg_data, 0xFFFFFFFF, sram_sel);
+
+ reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
+ UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
+ UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
+ UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
+ UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
+ UVD_CGC_CTRL__SYS_MODE_MASK |
+ UVD_CGC_CTRL__UDEC_MODE_MASK |
+ UVD_CGC_CTRL__MPEG2_MODE_MASK |
+ UVD_CGC_CTRL__REGS_MODE_MASK |
+ UVD_CGC_CTRL__RBC_MODE_MASK |
+ UVD_CGC_CTRL__LMI_MC_MODE_MASK |
+ UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
+ UVD_CGC_CTRL__IDCT_MODE_MASK |
+ UVD_CGC_CTRL__MPRD_MODE_MASK |
+ UVD_CGC_CTRL__MPC_MODE_MASK |
+ UVD_CGC_CTRL__LBSI_MODE_MASK |
+ UVD_CGC_CTRL__LRBBM_MODE_MASK |
+ UVD_CGC_CTRL__WCB_MODE_MASK |
+ UVD_CGC_CTRL__VCPU_MODE_MASK |
+ UVD_CGC_CTRL__SCPU_MODE_MASK);
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_CGC_CTRL, reg_data, 0xFFFFFFFF, sram_sel);
+
+ /* turn off clock gating */
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_CGC_GATE, 0, 0xFFFFFFFF, sram_sel);
+
+ /* turn on SUVD clock gating */
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SUVD_CGC_GATE, 1, 0xFFFFFFFF, sram_sel);
+
+ /* turn on sw mode in UVD_SUVD_CGC_CTRL */
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SUVD_CGC_CTRL, 0, 0xFFFFFFFF, sram_sel);
+}
+
static void vcn_1_0_disable_static_power_gating(struct amdgpu_device *adev)
{
uint32_t data = 0;
@@ -614,7 +730,7 @@ static void vcn_1_0_enable_static_power_gating(struct amdgpu_device *adev)
*
* Setup and start the VCN block
*/
-static int vcn_v1_0_start(struct amdgpu_device *adev)
+static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring = &adev->vcn.ring_dec;
uint32_t rb_bufsz, tmp;
@@ -628,7 +744,7 @@ static int vcn_v1_0_start(struct amdgpu_device *adev)
/* disable clock gating */
vcn_v1_0_disable_clock_gating(adev);
- vcn_v1_0_mc_resume(adev);
+ vcn_v1_0_mc_resume_spg_mode(adev);
/* disable interupt */
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
@@ -799,6 +915,170 @@ static int vcn_v1_0_start(struct amdgpu_device *adev)
return 0;
}
+static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring = &adev->vcn.ring_dec;
+ uint32_t rb_bufsz, tmp, reg_data;
+ uint32_t lmi_swap_cntl;
+
+ /* disable byte swapping */
+ lmi_swap_cntl = 0;
+
+ vcn_1_0_enable_static_power_gating(adev);
+
+ /* enable dynamic power gating mode */
+ reg_data = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS);
+ reg_data |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
+ reg_data |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
+ WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, reg_data);
+
+ /* enable clock gating */
+ vcn_v1_0_clock_gating_dpg_mode(adev, 0);
+
+ /* enable VCPU clock */
+ reg_data = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
+ reg_data |= UVD_VCPU_CNTL__CLK_EN_MASK;
+ reg_data |= UVD_VCPU_CNTL__MIF_WR_LOW_THRESHOLD_BP_MASK;
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CNTL, reg_data, 0xFFFFFFFF, 0);
+
+ /* disable interupt */
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MASTINT_EN,
+ 0, UVD_MASTINT_EN__VCPU_EN_MASK, 0);
+
+ /* stall UMC and register bus before resetting VCPU */
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_CTRL2,
+ UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0);
+
+ /* put LMI, VCPU, RBC etc... into reset */
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SOFT_RESET,
+ UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
+ UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
+ UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
+ UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
+ UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
+ UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
+ UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
+ UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK,
+ 0xFFFFFFFF, 0);
+
+ /* initialize VCN memory controller */
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_CTRL,
+ (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
+ UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__REQ_MODE_MASK |
+ 0x00100000L, 0xFFFFFFFF, 0);
+
+#ifdef __BIG_ENDIAN
+ /* swap (8 in 32) RB and IB */
+ lmi_swap_cntl = 0xa;
+#endif
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl, 0xFFFFFFFF, 0);
+
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_MUXA0, 0x40c2040, 0xFFFFFFFF, 0);
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_MUXA1, 0x0, 0xFFFFFFFF, 0);
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_MUXB0, 0x40c2040, 0xFFFFFFFF, 0);
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_MUXB1, 0x0, 0xFFFFFFFF, 0);
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_ALU, 0, 0xFFFFFFFF, 0);
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_MUX, 0x88, 0xFFFFFFFF, 0);
+
+ vcn_v1_0_mc_resume_dpg_mode(adev);
+
+ /* take all subblocks out of reset, except VCPU */
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SOFT_RESET,
+ UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK, 0xFFFFFFFF, 0);
+
+ /* enable VCPU clock */
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CNTL,
+ UVD_VCPU_CNTL__CLK_EN_MASK, 0xFFFFFFFF, 0);
+
+ /* enable UMC */
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_CTRL2,
+ 0, UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0);
+
+ /* boot up the VCPU */
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SOFT_RESET, 0, 0xFFFFFFFF, 0);
+
+ /* enable master interrupt */
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MASTINT_EN,
+ (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
+ (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK), 0);
+
+ vcn_v1_0_clock_gating_dpg_mode(adev, 1);
+ /* setup mmUVD_LMI_CTRL */
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_CTRL,
+ (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+ UVD_LMI_CTRL__CRC_RESET_MASK |
+ UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
+ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
+ (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
+ 0x00100000L), 0xFFFFFFFF, 1);
+
+ tmp = adev->gfx.config.gb_addr_config;
+ /* setup VCN global tiling registers */
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_JPEG_ADDR_CONFIG, tmp, 0xFFFFFFFF, 1);
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_JPEG_UV_ADDR_CONFIG, tmp, 0xFFFFFFFF, 1);
+
+ /* enable System Interrupt for JRBC */
+ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SYS_INT_EN,
+ UVD_SYS_INT_EN__UVD_JRBC_EN_MASK, 0xFFFFFFFF, 1);
+
+ /* force RBC into idle state */
+ rb_bufsz = order_base_2(ring->ring_size);
+ tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
+ WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
+
+ /* set the write pointer delay */
+ WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
+
+ /* set the wb address */
+ WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
+ (upper_32_bits(ring->gpu_addr) >> 2));
+
+ /* programm the RB_BASE for ring buffer */
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
+ lower_32_bits(ring->gpu_addr));
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
+ upper_32_bits(ring->gpu_addr));
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
+
+ ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
+ WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
+ lower_32_bits(ring->wptr));
+
+ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
+ ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
+
+ /* initialize wptr */
+ ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
+
+ /* copy patch commands to the jpeg ring */
+ vcn_v1_0_jpeg_ring_set_patch_ring(ring,
+ (ring->wptr + ring->max_dw * amdgpu_sched_hw_submission));
+
+ return 0;
+}
+
+static int vcn_v1_0_start(struct amdgpu_device *adev)
+{
+ int r;
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ r = vcn_v1_0_start_dpg_mode(adev);
+ else
+ r = vcn_v1_0_start_spg_mode(adev);
+ return r;
+}
+
/**
* vcn_v1_0_stop - stop VCN block
*
@@ -806,7 +1086,7 @@ static int vcn_v1_0_start(struct amdgpu_device *adev)
*
* stop the VCN block
*/
-static int vcn_v1_0_stop(struct amdgpu_device *adev)
+static int vcn_v1_0_stop_spg_mode(struct amdgpu_device *adev)
{
/* force RBC into idle state */
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, 0x11010101);
@@ -836,6 +1116,33 @@ static int vcn_v1_0_stop(struct amdgpu_device *adev)
return 0;
}
+static int vcn_v1_0_stop_dpg_mode(struct amdgpu_device *adev)
+{
+ int ret_code;
+
+ /* Wait for power status to be 1 */
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 0x1,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
+
+ /* disable dynamic power gating mode */
+ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0,
+ ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
+
+ return 0;
+}
+
+static int vcn_v1_0_stop(struct amdgpu_device *adev)
+{
+ int r;
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ r = vcn_v1_0_stop_dpg_mode(adev);
+ else
+ r = vcn_v1_0_stop_spg_mode(adev);
+
+ return r;
+}
+
static bool vcn_v1_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1633,12 +1940,20 @@ static int vcn_v1_0_set_powergating_state(void *handle,
* revisit this when there is a cleaner line between
* the smc and the hw blocks
*/
+ int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if(state == adev->vcn.cur_state)
+ return 0;
+
if (state == AMD_PG_STATE_GATE)
- return vcn_v1_0_stop(adev);
+ ret = vcn_v1_0_stop(adev);
else
- return vcn_v1_0_start(adev);
+ ret = vcn_v1_0_start(adev);
+
+ if(!ret)
+ adev->vcn.cur_state = state;
+ return ret;
}
static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index acbe5a770207..a99f71797aa3 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -380,7 +380,7 @@ static int vega10_ih_sw_init(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_ih_ring_init(adev, 256 * 1024, true);
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, true);
if (r)
return r;
@@ -397,7 +397,7 @@ static int vega10_ih_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_irq_fini(adev);
- amdgpu_ih_ring_fini(adev);
+ amdgpu_ih_ring_fini(adev, &adev->irq.ih);
return 0;
}
@@ -494,8 +494,7 @@ static const struct amdgpu_ih_funcs vega10_ih_funcs = {
static void vega10_ih_set_interrupt_funcs(struct amdgpu_device *adev)
{
- if (adev->irq.ih_funcs == NULL)
- adev->irq.ih_funcs = &vega10_ih_funcs;
+ adev->irq.ih_funcs = &vega10_ih_funcs;
}
const struct amdgpu_ip_block_version vega10_ih_ip_block =