aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c46
1 files changed, 28 insertions, 18 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index 8af67f649660..1c50be3ab8a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -52,6 +52,8 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
ih->use_bus_addr = use_bus_addr;
if (use_bus_addr) {
+ dma_addr_t dma_addr;
+
if (ih->ring)
return 0;
@@ -59,21 +61,26 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
* add them to the end of the ring allocation.
*/
ih->ring = dma_alloc_coherent(adev->dev, ih->ring_size + 8,
- &ih->rb_dma_addr, GFP_KERNEL);
+ &dma_addr, GFP_KERNEL);
if (ih->ring == NULL)
return -ENOMEM;
memset((void *)ih->ring, 0, ih->ring_size + 8);
- ih->wptr_offs = (ih->ring_size / 4) + 0;
- ih->rptr_offs = (ih->ring_size / 4) + 1;
+ ih->gpu_addr = dma_addr;
+ ih->wptr_addr = dma_addr + ih->ring_size;
+ ih->wptr_cpu = &ih->ring[ih->ring_size / 4];
+ ih->rptr_addr = dma_addr + ih->ring_size + 4;
+ ih->rptr_cpu = &ih->ring[(ih->ring_size / 4) + 1];
} else {
- r = amdgpu_device_wb_get(adev, &ih->wptr_offs);
+ unsigned wptr_offs, rptr_offs;
+
+ r = amdgpu_device_wb_get(adev, &wptr_offs);
if (r)
return r;
- r = amdgpu_device_wb_get(adev, &ih->rptr_offs);
+ r = amdgpu_device_wb_get(adev, &rptr_offs);
if (r) {
- amdgpu_device_wb_free(adev, ih->wptr_offs);
+ amdgpu_device_wb_free(adev, wptr_offs);
return r;
}
@@ -82,10 +89,15 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
&ih->ring_obj, &ih->gpu_addr,
(void **)&ih->ring);
if (r) {
- amdgpu_device_wb_free(adev, ih->rptr_offs);
- amdgpu_device_wb_free(adev, ih->wptr_offs);
+ amdgpu_device_wb_free(adev, rptr_offs);
+ amdgpu_device_wb_free(adev, wptr_offs);
return r;
}
+
+ ih->wptr_addr = adev->wb.gpu_addr + wptr_offs * 4;
+ ih->wptr_cpu = &adev->wb.wb[wptr_offs];
+ ih->rptr_addr = adev->wb.gpu_addr + rptr_offs * 4;
+ ih->rptr_cpu = &adev->wb.wb[rptr_offs];
}
return 0;
}
@@ -109,13 +121,13 @@ void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
* add them to the end of the ring allocation.
*/
dma_free_coherent(adev->dev, ih->ring_size + 8,
- (void *)ih->ring, ih->rb_dma_addr);
+ (void *)ih->ring, ih->gpu_addr);
ih->ring = NULL;
} else {
amdgpu_bo_free_kernel(&ih->ring_obj, &ih->gpu_addr,
(void **)&ih->ring);
- amdgpu_device_wb_free(adev, ih->wptr_offs);
- amdgpu_device_wb_free(adev, ih->rptr_offs);
+ amdgpu_device_wb_free(adev, (ih->wptr_addr - ih->gpu_addr) / 4);
+ amdgpu_device_wb_free(adev, (ih->rptr_addr - ih->gpu_addr) / 4);
}
}
@@ -128,16 +140,14 @@ void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
* Interrupt hander (VI), walk the IH ring.
* Returns irq process return code.
*/
-int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
- void (*callback)(struct amdgpu_device *adev,
- struct amdgpu_ih_ring *ih))
+int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
{
u32 wptr;
if (!ih->enabled || adev->shutdown)
return IRQ_NONE;
- wptr = amdgpu_ih_get_wptr(adev);
+ wptr = amdgpu_ih_get_wptr(adev, ih);
restart_ih:
/* is somebody else already processing irqs? */
@@ -150,15 +160,15 @@ restart_ih:
rmb();
while (ih->rptr != wptr) {
- callback(adev, ih);
+ amdgpu_irq_dispatch(adev, ih);
ih->rptr &= ih->ptr_mask;
}
- amdgpu_ih_set_rptr(adev);
+ amdgpu_ih_set_rptr(adev, ih);
atomic_set(&ih->lock, 0);
/* make sure wptr hasn't changed while processing */
- wptr = amdgpu_ih_get_wptr(adev);
+ wptr = amdgpu_ih_get_wptr(adev, ih);
if (wptr != ih->rptr)
goto restart_ih;