OSDN Git Service

drm/sched: add drm_sched_start_timeout helper
authorChristian König <christian.koenig@amd.com>
Fri, 5 Oct 2018 17:56:39 +0000 (19:56 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 12 Oct 2018 17:52:21 +0000 (12:52 -0500)
Cleanup starting the timeout a bit.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Nayan Deshmukh <nayan26deshmukh@gmail.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/scheduler/sched_main.c

index 4e8505d..bd7d11c 100644 (file)
@@ -182,6 +182,20 @@ bool drm_sched_dependency_optimized(struct dma_fence* fence,
 }
 EXPORT_SYMBOL(drm_sched_dependency_optimized);
 
+/**
+ * drm_sched_start_timeout - start timeout for reset worker
+ *
+ * @sched: scheduler instance to start the worker for
+ *
+ * Start the timeout for the given scheduler.
+ */
+static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
+{
+       if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
+           !list_empty(&sched->ring_mirror_list))
+               schedule_delayed_work(&sched->work_tdr, sched->timeout);
+}
+
 /* job_finish is called after hw fence signaled
  */
 static void drm_sched_job_finish(struct work_struct *work)
@@ -203,9 +217,7 @@ static void drm_sched_job_finish(struct work_struct *work)
        /* remove job from ring_mirror_list */
        list_del(&s_job->node);
        /* queue TDR for next job */
-       if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
-           !list_empty(&sched->ring_mirror_list))
-               schedule_delayed_work(&sched->work_tdr, sched->timeout);
+       drm_sched_start_timeout(sched);
        spin_unlock(&sched->job_list_lock);
 
        dma_fence_put(&s_job->s_fence->finished);
@@ -229,10 +241,7 @@ static void drm_sched_job_begin(struct drm_sched_job *s_job)
 
        spin_lock(&sched->job_list_lock);
        list_add_tail(&s_job->node, &sched->ring_mirror_list);
-       if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
-           list_first_entry_or_null(&sched->ring_mirror_list,
-                                    struct drm_sched_job, node) == s_job)
-               schedule_delayed_work(&sched->work_tdr, sched->timeout);
+       drm_sched_start_timeout(sched);
        spin_unlock(&sched->job_list_lock);
 }
 
@@ -313,11 +322,6 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
        int r;
 
        spin_lock(&sched->job_list_lock);
-       s_job = list_first_entry_or_null(&sched->ring_mirror_list,
-                                        struct drm_sched_job, node);
-       if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT)
-               schedule_delayed_work(&sched->work_tdr, sched->timeout);
-
        list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
                struct drm_sched_fence *s_fence = s_job->s_fence;
                struct dma_fence *fence;
@@ -350,6 +354,7 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
                }
                spin_lock(&sched->job_list_lock);
        }
+       drm_sched_start_timeout(sched);
        spin_unlock(&sched->job_list_lock);
 }
 EXPORT_SYMBOL(drm_sched_job_recovery);