aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c68
1 files changed, 12 insertions, 56 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 15ae4bc9c072..2d4770e173dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -504,41 +504,6 @@ static int sdma_v2_4_rlc_resume(struct amdgpu_device *adev)
return 0;
}
-/**
- * sdma_v2_4_load_microcode - load the sDMA ME ucode
- *
- * @adev: amdgpu_device pointer
- *
- * Loads the sDMA0/1 ucode.
- * Returns 0 for success, -EINVAL if the ucode is not available.
- */
-static int sdma_v2_4_load_microcode(struct amdgpu_device *adev)
-{
- const struct sdma_firmware_header_v1_0 *hdr;
- const __le32 *fw_data;
- u32 fw_size;
- int i, j;
-
- /* halt the MEs */
- sdma_v2_4_enable(adev, false);
-
- for (i = 0; i < adev->sdma.num_instances; i++) {
- if (!adev->sdma.instance[i].fw)
- return -EINVAL;
- hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
- amdgpu_ucode_print_sdma_hdr(&hdr->header);
- fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
- fw_data = (const __le32 *)
- (adev->sdma.instance[i].fw->data +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
- for (j = 0; j < fw_size; j++)
- WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
- WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version);
- }
-
- return 0;
-}
/**
* sdma_v2_4_start - setup and start the async dma engines
@@ -552,13 +517,6 @@ static int sdma_v2_4_start(struct amdgpu_device *adev)
{
int r;
-
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
- r = sdma_v2_4_load_microcode(adev);
- if (r)
- return r;
- }
-
/* halt the engine before programing */
sdma_v2_4_enable(adev, false);
@@ -898,19 +856,19 @@ static int sdma_v2_4_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* SDMA trap event */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP,
&adev->sdma.trap_irq);
if (r)
return r;
/* SDMA Privileged inst */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 241,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 241,
&adev->sdma.illegal_inst_irq);
if (r)
return r;
/* SDMA Privileged inst */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE,
&adev->sdma.illegal_inst_irq);
if (r)
return r;
@@ -1296,10 +1254,8 @@ static const struct amdgpu_buffer_funcs sdma_v2_4_buffer_funcs = {
static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev)
{
- if (adev->mman.buffer_funcs == NULL) {
- adev->mman.buffer_funcs = &sdma_v2_4_buffer_funcs;
- adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
- }
+ adev->mman.buffer_funcs = &sdma_v2_4_buffer_funcs;
+ adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
}
static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = {
@@ -1312,16 +1268,16 @@ static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = {
static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev)
{
+ struct drm_gpu_scheduler *sched;
unsigned i;
- if (adev->vm_manager.vm_pte_funcs == NULL) {
- adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs;
- for (i = 0; i < adev->sdma.num_instances; i++)
- adev->vm_manager.vm_pte_rings[i] =
- &adev->sdma.instance[i].ring;
-
- adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
+ adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs;
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ sched = &adev->sdma.instance[i].ring.sched;
+ adev->vm_manager.vm_pte_rqs[i] =
+ &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
}
+ adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
}
const struct amdgpu_ip_block_version sdma_v2_4_ip_block =