aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2017-05-12 13:58:29 +1000
committerDave Airlie <airlied@redhat.com>2017-05-12 13:58:29 +1000
commit7ec27233e62b5efe795563896577de5340dc7473 (patch)
tree7cf654cf27518d5e0b1a7718ad454fa3a5d45af8 /drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
parentMerge tag 'drm-misc-next-fixes-2017-05-05' of git://anongit.freedesktop.org/git/drm-misc into drm-next (diff)
parentdrm/amd/powerplay: refine pwm1_enable callback functions for CI. (diff)
downloadlinux-dev-7ec27233e62b5efe795563896577de5340dc7473.tar.xz
linux-dev-7ec27233e62b5efe795563896577de5340dc7473.zip
Merge branch 'drm-next-4.12' of git://people.freedesktop.org/~agd5f/linux into drm-next
Fixes for 4.12. This is a bit bigger than usual since it's 3 weeks worth of fixes and most of these changes are for vega10 which is new for 4.12 and still in a fair amount of flux. It looks like you missed my last pull request, so those patches are included here as well. Highlights: - Lots of vega10 fixes - Fix interruptable wait mixup - Fan control method fixes - Misc display fixes for radeon and amdgpu - Misc bug fixes * 'drm-next-4.12' of git://people.freedesktop.org/~agd5f/linux: (132 commits) drm/amd/powerplay: refine pwm1_enable callback functions for CI. drm/amd/powerplay: refine pwm1_enable callback functions for vi. drm/amd/powerplay: refine pwm1_enable callback functions for Vega10. drm/amdgpu: refine amdgpu pwm1_enable sysfs interface. drm/amdgpu: add amd fan ctrl mode enums. drm/amd/powerplay: add more smu message on Vega10. drm/amdgpu: fix dependency issue drm/amd: fix init order of sched job drm/amdgpu: add some additional vega10 pci ids drm/amdgpu/soc15: use atomfirmware for setting bios scratch for reset drm/amdgpu/atomfirmware: add function to update engine hang status drm/radeon: only warn once in radeon_ttm_bo_destroy if va list not empty drm/amdgpu: fix mutex list null pointer reference drm/amd/powerplay: fix bug sclk/mclk level can't be set on vega10. drm/amd/powerplay: Setup sw CTF to allow graceful exit when temperature exceeds maximum. drm/amd/powerplay: delete dead code in powerplay. drm/amdgpu: Use less generic enum definitions drm/amdgpu/gfx9: derive tile pipes from golden settings drm/amdgpu/gfx: drop max_gs_waves_per_vgt drm/amd/powerplay: disable engine spread spectrum feature on Vega10. ...
Diffstat (limited to 'drivers/gpu/drm/amd/scheduler/gpu_scheduler.c')
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c23
1 files changed, 21 insertions, 2 deletions
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index acd882a188bc..fea96a765cf1 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -236,6 +236,23 @@ static void amd_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb
dma_fence_put(f);
}
+bool amd_sched_dependency_optimized(struct dma_fence* fence,
+ struct amd_sched_entity *entity)
+{
+ struct amd_gpu_scheduler *sched = entity->sched;
+ struct amd_sched_fence *s_fence;
+
+ if (!fence || dma_fence_is_signaled(fence))
+ return false;
+ if (fence->context == entity->fence_context)
+ return true;
+ s_fence = to_amd_sched_fence(fence);
+ if (s_fence && s_fence->sched == sched)
+ return true;
+
+ return false;
+}
+
static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
{
struct amd_gpu_scheduler *sched = entity->sched;
@@ -387,7 +404,9 @@ void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched)
spin_lock(&sched->job_list_lock);
list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
- if (dma_fence_remove_callback(s_job->s_fence->parent, &s_job->s_fence->cb)) {
+ if (s_job->s_fence->parent &&
+ dma_fence_remove_callback(s_job->s_fence->parent,
+ &s_job->s_fence->cb)) {
dma_fence_put(s_job->s_fence->parent);
s_job->s_fence->parent = NULL;
}
@@ -460,9 +479,9 @@ int amd_sched_job_init(struct amd_sched_job *job,
job->sched = sched;
job->s_entity = entity;
job->s_fence = amd_sched_fence_create(entity, owner);
- job->id = atomic64_inc_return(&sched->job_id_count);
if (!job->s_fence)
return -ENOMEM;
+ job->id = atomic64_inc_return(&sched->job_id_count);
INIT_WORK(&job->finish_work, amd_sched_job_finish);
INIT_LIST_HEAD(&job->node);