OSDN Git Service

drm/scheduler: provide scheduler score externally
authorChristian König <christian.koenig@amd.com>
Tue, 2 Feb 2021 11:40:01 +0000 (12:40 +0100)
committerChristian König <christian.koenig@amd.com>
Fri, 5 Feb 2021 09:47:11 +0000 (10:47 +0100)
Allow multiple schedulers to share the load balancing score.

This is useful when one engine has different hw rings.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-and-Tested-by: Leo Liu <leo.liu@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210204144405.2737-1-christian.koenig@amd.com
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
drivers/gpu/drm/etnaviv/etnaviv_sched.c
drivers/gpu/drm/lima/lima_sched.c
drivers/gpu/drm/panfrost/panfrost_job.c
drivers/gpu/drm/scheduler/sched_entity.c
drivers/gpu/drm/scheduler/sched_main.c
drivers/gpu/drm/v3d/v3d_sched.c
include/drm/gpu_scheduler.h

index d56f402..8e0a565 100644 (file)
@@ -487,7 +487,7 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
 
                r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
                                   num_hw_submission, amdgpu_job_hang_limit,
-                                  timeout, ring->name);
+                                  timeout, NULL, ring->name);
                if (r) {
                        DRM_ERROR("Failed to create scheduler on ring %s.\n",
                                  ring->name);
index 2a9439c..19826e5 100644 (file)
@@ -190,7 +190,7 @@ int etnaviv_sched_init(struct etnaviv_gpu *gpu)
 
        ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops,
                             etnaviv_hw_jobs_limit, etnaviv_job_hang_limit,
-                            msecs_to_jiffies(500), dev_name(gpu->dev));
+                            msecs_to_jiffies(500), NULL, dev_name(gpu->dev));
        if (ret)
                return ret;
 
index 20dafa6..ecf3267 100644 (file)
@@ -509,7 +509,7 @@ int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name)
 
        return drm_sched_init(&pipe->base, &lima_sched_ops, 1,
                              lima_job_hang_limit, msecs_to_jiffies(timeout),
-                             name);
+                             NULL, name);
 }
 
 void lima_sched_pipe_fini(struct lima_sched_pipe *pipe)
index 0a83eef..6003cfe 100644 (file)
@@ -627,7 +627,7 @@ int panfrost_job_init(struct panfrost_device *pfdev)
                ret = drm_sched_init(&js->queue[j].sched,
                                     &panfrost_sched_ops,
                                     1, 0, msecs_to_jiffies(JOB_TIMEOUT_MS),
-                                    "pan_js");
+                                    NULL, "pan_js");
                if (ret) {
                        dev_err(pfdev->dev, "Failed to create scheduler: %d.", ret);
                        goto err_sched;
index c1ac3e4..92d965b 100644 (file)
@@ -489,7 +489,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
        bool first;
 
        trace_drm_sched_job(sched_job, entity);
-       atomic_inc(&entity->rq->sched->score);
+       atomic_inc(entity->rq->sched->score);
        WRITE_ONCE(entity->last_user, current->group_leader);
        first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
 
index 73fccc5..d82a7eb 100644 (file)
@@ -91,7 +91,7 @@ void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
        if (!list_empty(&entity->list))
                return;
        spin_lock(&rq->lock);
-       atomic_inc(&rq->sched->score);
+       atomic_inc(rq->sched->score);
        list_add_tail(&entity->list, &rq->entities);
        spin_unlock(&rq->lock);
 }
@@ -110,7 +110,7 @@ void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
        if (list_empty(&entity->list))
                return;
        spin_lock(&rq->lock);
-       atomic_dec(&rq->sched->score);
+       atomic_dec(rq->sched->score);
        list_del_init(&entity->list);
        if (rq->current_entity == entity)
                rq->current_entity = NULL;
@@ -173,7 +173,7 @@ static void drm_sched_job_done(struct drm_sched_job *s_job)
        struct drm_gpu_scheduler *sched = s_fence->sched;
 
        atomic_dec(&sched->hw_rq_count);
-       atomic_dec(&sched->score);
+       atomic_dec(sched->score);
 
        trace_drm_sched_process_job(s_fence);
 
@@ -732,7 +732,7 @@ drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
                        continue;
                }
 
-               num_score = atomic_read(&sched->score);
+               num_score = atomic_read(sched->score);
                if (num_score < min_score) {
                        min_score = num_score;
                        picked_sched = sched;
@@ -842,16 +842,15 @@ static int drm_sched_main(void *param)
  * @hw_submission: number of hw submissions that can be in flight
  * @hang_limit: number of times to allow a job to hang before dropping it
  * @timeout: timeout value in jiffies for the scheduler
+ * @score: optional score atomic shared with other schedulers
  * @name: name used for debugging
  *
  * Return 0 on success, otherwise error code.
  */
 int drm_sched_init(struct drm_gpu_scheduler *sched,
                   const struct drm_sched_backend_ops *ops,
-                  unsigned hw_submission,
-                  unsigned hang_limit,
-                  long timeout,
-                  const char *name)
+                  unsigned hw_submission, unsigned hang_limit, long timeout,
+                  atomic_t *score, const char *name)
 {
        int i, ret;
        sched->ops = ops;
@@ -859,6 +858,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
        sched->name = name;
        sched->timeout = timeout;
        sched->hang_limit = hang_limit;
+       sched->score = score ? score : &sched->_score;
        for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; i++)
                drm_sched_rq_init(sched, &sched->sched_rq[i]);
 
@@ -868,7 +868,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
        spin_lock_init(&sched->job_list_lock);
        atomic_set(&sched->hw_rq_count, 0);
        INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
-       atomic_set(&sched->score, 0);
+       atomic_set(&sched->_score, 0);
        atomic64_set(&sched->job_id_count, 0);
 
        /* Each scheduler will run on a seperate kernel thread */
index ceb33f8..8992480 100644 (file)
@@ -403,7 +403,7 @@ v3d_sched_init(struct v3d_dev *v3d)
                             &v3d_bin_sched_ops,
                             hw_jobs_limit, job_hang_limit,
                             msecs_to_jiffies(hang_limit_ms),
-                            "v3d_bin");
+                            NULL, "v3d_bin");
        if (ret) {
                dev_err(v3d->drm.dev, "Failed to create bin scheduler: %d.", ret);
                return ret;
@@ -413,7 +413,7 @@ v3d_sched_init(struct v3d_dev *v3d)
                             &v3d_render_sched_ops,
                             hw_jobs_limit, job_hang_limit,
                             msecs_to_jiffies(hang_limit_ms),
-                            "v3d_render");
+                            NULL, "v3d_render");
        if (ret) {
                dev_err(v3d->drm.dev, "Failed to create render scheduler: %d.",
                        ret);
@@ -425,7 +425,7 @@ v3d_sched_init(struct v3d_dev *v3d)
                             &v3d_tfu_sched_ops,
                             hw_jobs_limit, job_hang_limit,
                             msecs_to_jiffies(hang_limit_ms),
-                            "v3d_tfu");
+                            NULL, "v3d_tfu");
        if (ret) {
                dev_err(v3d->drm.dev, "Failed to create TFU scheduler: %d.",
                        ret);
@@ -438,7 +438,7 @@ v3d_sched_init(struct v3d_dev *v3d)
                                     &v3d_csd_sched_ops,
                                     hw_jobs_limit, job_hang_limit,
                                     msecs_to_jiffies(hang_limit_ms),
-                                    "v3d_csd");
+                                    NULL, "v3d_csd");
                if (ret) {
                        dev_err(v3d->drm.dev, "Failed to create CSD scheduler: %d.",
                                ret);
@@ -450,7 +450,7 @@ v3d_sched_init(struct v3d_dev *v3d)
                                     &v3d_cache_clean_sched_ops,
                                     hw_jobs_limit, job_hang_limit,
                                     msecs_to_jiffies(hang_limit_ms),
-                                    "v3d_cache_clean");
+                                    NULL, "v3d_cache_clean");
                if (ret) {
                        dev_err(v3d->drm.dev, "Failed to create CACHE_CLEAN scheduler: %d.",
                                ret);
index ce6a383..1c815e0 100644 (file)
@@ -297,7 +297,8 @@ struct drm_gpu_scheduler {
        struct list_head                pending_list;
        spinlock_t                      job_list_lock;
        int                             hang_limit;
-       atomic_t                        score;
+       atomic_t                        *score;
+       atomic_t                        _score;
        bool                            ready;
        bool                            free_guilty;
 };
@@ -305,7 +306,7 @@ struct drm_gpu_scheduler {
 int drm_sched_init(struct drm_gpu_scheduler *sched,
                   const struct drm_sched_backend_ops *ops,
                   uint32_t hw_submission, unsigned hang_limit, long timeout,
-                  const char *name);
+                  atomic_t *score, const char *name);
 
 void drm_sched_fini(struct drm_gpu_scheduler *sched);
 int drm_sched_job_init(struct drm_sched_job *job,