aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2020-09-01 14:19:48 +0200
committerJiri Kosina <jkosina@suse.cz>2020-09-01 14:19:48 +0200
commitead5d1f4d877e92c051e1a1ade623d0d30e71619 (patch)
treecb9db5698a546e7b96f7d5bef5ce544629dd37a2 /drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
parentscif: Fix spelling of EACCES (diff)
parentMerge tag 'docs-5.9-3' of git://git.lwn.net/linux (diff)
downloadwireguard-linux-ead5d1f4d877e92c051e1a1ade623d0d30e71619.tar.xz
wireguard-linux-ead5d1f4d877e92c051e1a1ade623d0d30e71619.zip
Merge branch 'master' into for-next
Sync with Linus' branch in order to be able to apply fixups of more recent patches.
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_job.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c39
1 files changed, 17 insertions, 22 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index d42be880a236..937029ad5271 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -33,10 +33,12 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job)
struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
struct amdgpu_job *job = to_amdgpu_job(s_job);
struct amdgpu_task_info ti;
+ struct amdgpu_device *adev = ring->adev;
memset(&ti, 0, sizeof(struct amdgpu_task_info));
- if (amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
+ if (amdgpu_gpu_recovery &&
+ amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
DRM_ERROR("ring %s timeout, but soft recovered\n",
s_job->sched->name);
return;
@@ -49,10 +51,13 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job)
DRM_ERROR("Process information: process %s pid %d thread %s pid %d\n",
ti.process_name, ti.tgid, ti.task_name, ti.pid);
- if (amdgpu_device_should_recover_gpu(ring->adev))
+ if (amdgpu_device_should_recover_gpu(ring->adev)) {
amdgpu_device_gpu_recover(ring->adev, job);
- else
+ } else {
drm_sched_suspend_timeout(&ring->sched);
+ if (amdgpu_sriov_vf(adev))
+ adev->virt.tdr_debug = true;
+ }
}
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
@@ -87,7 +92,8 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
}
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
- struct amdgpu_job **job)
+ enum amdgpu_ib_pool_type pool_type,
+ struct amdgpu_job **job)
{
int r;
@@ -95,7 +101,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
if (r)
return r;
- r = amdgpu_ib_get(adev, NULL, size, &(*job)->ibs[0]);
+ r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]);
if (r)
kfree(*job);
@@ -117,12 +123,10 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
{
- struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
struct amdgpu_job *job = to_amdgpu_job(s_job);
drm_sched_job_cleanup(s_job);
- amdgpu_ring_priority_put(ring, s_job->s_priority);
dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync);
amdgpu_sync_free(&job->sched_sync);
@@ -142,8 +146,6 @@ void amdgpu_job_free(struct amdgpu_job *job)
int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
void *owner, struct dma_fence **f)
{
- enum drm_sched_priority priority;
- struct amdgpu_ring *ring;
int r;
if (!f)
@@ -155,12 +157,8 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
*f = dma_fence_get(&job->base.s_fence->finished);
amdgpu_job_free_resources(job);
- priority = job->base.s_priority;
drm_sched_entity_push_job(&job->base, entity);
- ring = to_amdgpu_ring(entity->rq->sched);
- amdgpu_ring_priority_get(ring, priority);
-
return 0;
}
@@ -186,16 +184,13 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
struct amdgpu_job *job = to_amdgpu_job(sched_job);
struct amdgpu_vm *vm = job->vm;
struct dma_fence *fence;
- bool explicit = false;
int r;
- fence = amdgpu_sync_get_fence(&job->sync, &explicit);
- if (fence && explicit) {
- if (drm_sched_dependency_optimized(fence, s_entity)) {
- r = amdgpu_sync_fence(&job->sched_sync, fence, false);
- if (r)
- DRM_ERROR("Error adding fence (%d)\n", r);
- }
+ fence = amdgpu_sync_get_fence(&job->sync);
+ if (fence && drm_sched_dependency_optimized(fence, s_entity)) {
+ r = amdgpu_sync_fence(&job->sched_sync, fence);
+ if (r)
+ DRM_ERROR("Error adding fence (%d)\n", r);
}
while (fence == NULL && vm && !job->vmid) {
@@ -205,7 +200,7 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
if (r)
DRM_ERROR("Error getting VM ID (%d)\n", r);
- fence = amdgpu_sync_get_fence(&job->sync, NULL);
+ fence = amdgpu_sync_get_fence(&job->sync);
}
return fence;