summaryrefslogtreecommitdiffstats
path: root/sys/dev/pci/drm/amd/amdgpu/amdgpu_cik_sdma.c
diff options
context:
space:
mode:
authorjsg <jsg@openbsd.org>2020-06-08 04:47:57 +0000
committerjsg <jsg@openbsd.org>2020-06-08 04:47:57 +0000
commitc349dbc7938c71a30e13c1be4acc1976165f4630 (patch)
tree8798187dfd7a927a15123e8dad31b782b074baa8 /sys/dev/pci/drm/amd/amdgpu/amdgpu_cik_sdma.c
parentThe errcheck() function treats an errno of ERANGE or EDOM as something (diff)
downloadwireguard-openbsd-c349dbc7938c71a30e13c1be4acc1976165f4630.tar.xz
wireguard-openbsd-c349dbc7938c71a30e13c1be4acc1976165f4630.zip
update drm to linux 5.7
adds kernel support for amdgpu: vega20, raven2, renoir, navi10, navi14 inteldrm: icelake, tigerlake Thanks to the OpenBSD Foundation for sponsoring this work, kettenis@ for helping, patrick@ for helping adapt rockchip drm and many developers for testing.
Diffstat (limited to 'sys/dev/pci/drm/amd/amdgpu/amdgpu_cik_sdma.c')
-rw-r--r--sys/dev/pci/drm/amd/amdgpu/amdgpu_cik_sdma.c108
1 files changed, 46 insertions, 62 deletions
diff --git a/sys/dev/pci/drm/amd/amdgpu/amdgpu_cik_sdma.c b/sys/dev/pci/drm/amd/amdgpu/amdgpu_cik_sdma.c
index d61e18decab..747ac1ddfa0 100644
--- a/sys/dev/pci/drm/amd/amdgpu/amdgpu_cik_sdma.c
+++ b/sys/dev/pci/drm/amd/amdgpu/amdgpu_cik_sdma.c
@@ -21,8 +21,10 @@
*
* Authors: Alex Deucher
*/
+
#include <linux/firmware.h>
-#include <drm/drmP.h>
+#include <linux/module.h>
+
#include "amdgpu.h"
#include "amdgpu_ucode.h"
#include "amdgpu_trace.h"
@@ -198,7 +200,7 @@ static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring)
static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{
- struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
+ struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
int i;
for (i = 0; i < count; i++)
@@ -218,13 +220,15 @@ static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
* Schedule an IB in the DMA ring (CIK).
*/
static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ uint32_t flags)
{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
u32 extra_bits = vmid & 0xf;
/* IB packet must end on a 8 DW boundary */
- cik_sdma_ring_insert_nop(ring, (12 - (lower_32_bits(ring->wptr) & 7)) % 8);
+ cik_sdma_ring_insert_nop(ring, (4 - lower_32_bits(ring->wptr)) & 7);
amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
@@ -316,8 +320,8 @@ static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], 0);
}
- sdma0->ready = false;
- sdma1->ready = false;
+ sdma0->sched.ready = false;
+ sdma1->sched.ready = false;
}
/**
@@ -494,18 +498,16 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
/* enable DMA IBs */
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
- ring->ready = true;
+ ring->sched.ready = true;
}
cik_sdma_enable(adev, true);
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
return r;
- }
if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_buffer_funcs_status(adev, true);
@@ -618,21 +620,17 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring)
u64 gpu_addr;
r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+ if (r)
return r;
- }
gpu_addr = adev->wb.gpu_addr + (index * 4);
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
r = amdgpu_ring_alloc(ring, 5);
- if (r) {
- DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- amdgpu_device_wb_free(adev, index);
- return r;
- }
+ if (r)
+ goto error_free_wb;
+
amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
@@ -644,18 +642,14 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring)
tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
break;
- DRM_UDELAY(1);
+ udelay(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
- ring->idx, tmp);
- r = -EINVAL;
- }
- amdgpu_device_wb_free(adev, index);
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+error_free_wb:
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -678,20 +672,16 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+ if (r)
return r;
- }
gpu_addr = adev->wb.gpu_addr + (index * 4);
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 256, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ if (r)
goto err0;
- }
ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE,
SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
@@ -706,21 +696,16 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
goto err1;
} else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err1;
}
tmp = le32_to_cpu(adev->wb.wb[index]);
- if (tmp == 0xDEADBEEF) {
- DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+ if (tmp == 0xDEADBEEF)
r = 0;
- } else {
- DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
+ else
r = -EINVAL;
- }
err1:
amdgpu_ib_free(adev, &ib, NULL);
@@ -822,11 +807,11 @@ static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
*/
static void cik_sdma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
{
- struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
+ struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
u32 pad_count;
int i;
- pad_count = (8 - (ib->length_dw & 0x7)) % 8;
+ pad_count = (-ib->length_dw) & 7;
for (i = 0; i < pad_count; i++)
if (sdma && sdma->burst_nop && (i == 0))
ib->ptr[ib->length_dw++] =
@@ -970,19 +955,19 @@ static int cik_sdma_sw_init(void *handle)
}
/* SDMA trap event */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 224,
&adev->sdma.trap_irq);
if (r)
return r;
/* SDMA Privileged inst */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 241,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 241,
&adev->sdma.illegal_inst_irq);
if (r)
return r;
/* SDMA Privileged inst */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 247,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 247,
&adev->sdma.illegal_inst_irq);
if (r)
return r;
@@ -994,8 +979,8 @@ static int cik_sdma_sw_init(void *handle)
r = amdgpu_ring_init(adev, ring, 1024,
&adev->sdma.trap_irq,
(i == 0) ?
- AMDGPU_SDMA_IRQ_TRAP0 :
- AMDGPU_SDMA_IRQ_TRAP1);
+ AMDGPU_SDMA_IRQ_INSTANCE0 :
+ AMDGPU_SDMA_IRQ_INSTANCE1);
if (r)
return r;
}
@@ -1131,7 +1116,7 @@ static int cik_sdma_set_trap_irq_state(struct amdgpu_device *adev,
u32 sdma_cntl;
switch (type) {
- case AMDGPU_SDMA_IRQ_TRAP0:
+ case AMDGPU_SDMA_IRQ_INSTANCE0:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
@@ -1147,7 +1132,7 @@ static int cik_sdma_set_trap_irq_state(struct amdgpu_device *adev,
break;
}
break;
- case AMDGPU_SDMA_IRQ_TRAP1:
+ case AMDGPU_SDMA_IRQ_INSTANCE1:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
@@ -1214,8 +1199,11 @@ static int cik_sdma_process_illegal_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
+ u8 instance_id;
+
DRM_ERROR("Illegal instruction in SDMA command stream\n");
- schedule_work(&adev->reset_work);
+ instance_id = (entry->ring_id & 0x3) >> 0;
+ drm_sched_fault(&adev->sdma.instance[instance_id].ring.sched);
return 0;
}
@@ -1370,10 +1358,8 @@ static const struct amdgpu_buffer_funcs cik_sdma_buffer_funcs = {
static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev)
{
- if (adev->mman.buffer_funcs == NULL) {
- adev->mman.buffer_funcs = &cik_sdma_buffer_funcs;
- adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
- }
+ adev->mman.buffer_funcs = &cik_sdma_buffer_funcs;
+ adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
}
static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = {
@@ -1388,14 +1374,12 @@ static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
{
unsigned i;
- if (adev->vm_manager.vm_pte_funcs == NULL) {
- adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
- for (i = 0; i < adev->sdma.num_instances; i++)
- adev->vm_manager.vm_pte_rings[i] =
- &adev->sdma.instance[i].ring;
-
- adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
+ adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ adev->vm_manager.vm_pte_scheds[i] =
+ &adev->sdma.instance[i].ring.sched;
}
+ adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
}
const struct amdgpu_ip_block_version cik_sdma_ip_block =