OSDN Git Service

drm/i915/gt: Repeat wait_for_idle for retirement workers
authorChris Wilson <chris@chris-wilson.co.uk>
Sat, 21 Dec 2019 18:02:04 +0000 (18:02 +0000)
committerChris Wilson <chris@chris-wilson.co.uk>
Sat, 21 Dec 2019 18:56:24 +0000 (18:56 +0000)
Since we may retire timelines from secondary workers,
intel_gt_retire_requests() is not always a reliable indicator that all
pending retirements are complete. If we do detect secondary workers are
in progress, recommend intel_gt_wait_for_idle() to repeat the retirement
check.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Acked-by: Andi Shyti <andi.shyti@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191221180204.1201217-1-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/gt/intel_engine.h
drivers/gpu/drm/i915/gt/intel_engine_cs.c
drivers/gpu/drm/i915/gt/intel_gt_requests.c

index b21c20e..d4fd56f 100644 (file)
@@ -282,7 +282,7 @@ static inline void __intel_engine_reset(struct intel_engine_cs *engine,
 
 bool intel_engines_are_idle(struct intel_gt *gt);
 bool intel_engine_is_idle(struct intel_engine_cs *engine);
-void intel_engine_flush_submission(struct intel_engine_cs *engine);
+bool intel_engine_flush_submission(struct intel_engine_cs *engine);
 
 void intel_engines_reset_default_submission(struct intel_gt *gt);
 
index 0c4c070..5309c61 100644 (file)
@@ -1079,9 +1079,10 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
        return idle;
 }
 
-void intel_engine_flush_submission(struct intel_engine_cs *engine)
+bool intel_engine_flush_submission(struct intel_engine_cs *engine)
 {
        struct tasklet_struct *t = &engine->execlists.tasklet;
+       bool active = tasklet_is_locked(t);
 
        if (__tasklet_is_scheduled(t)) {
                local_bh_disable();
@@ -1092,10 +1093,13 @@ void intel_engine_flush_submission(struct intel_engine_cs *engine)
                        tasklet_unlock(t);
                }
                local_bh_enable();
+               active = true;
        }
 
        /* Otherwise flush the tasklet if it was running on another cpu */
        tasklet_unlock_wait(t);
+
+       return active;
 }
 
 /**
index 063f863..b4f0461 100644 (file)
@@ -23,15 +23,18 @@ static void retire_requests(struct intel_timeline *tl)
                        break;
 }
 
-static void flush_submission(struct intel_gt *gt)
+static bool flush_submission(struct intel_gt *gt)
 {
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
+       bool active = false;
 
        for_each_engine(engine, gt, id) {
-               intel_engine_flush_submission(engine);
-               flush_work(&engine->retire_work);
+               active |= intel_engine_flush_submission(engine);
+               active |= flush_work(&engine->retire_work);
        }
+
+       return active;
 }
 
 static void engine_retire(struct work_struct *work)
@@ -120,9 +123,10 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
 
        spin_lock(&timelines->lock);
        list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
-               active_count++; /* report busy to caller, try again? */
-               if (!mutex_trylock(&tl->mutex))
+               if (!mutex_trylock(&tl->mutex)) {
+                       active_count++; /* report busy to caller, try again? */
                        continue;
+               }
 
                intel_timeline_get(tl);
                GEM_BUG_ON(!atomic_read(&tl->active_count));
@@ -147,10 +151,10 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
 
                /* Resume iteration after dropping lock */
                list_safe_reset_next(tl, tn, link);
-               if (atomic_dec_and_test(&tl->active_count)) {
+               if (atomic_dec_and_test(&tl->active_count))
                        list_del(&tl->link);
-                       active_count--;
-               }
+               else
+                       active_count += i915_active_fence_isset(&tl->last_request);
 
                mutex_unlock(&tl->mutex);
 
@@ -165,7 +169,8 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
        list_for_each_entry_safe(tl, tn, &free, link)
                __intel_timeline_free(&tl->kref);
 
-       flush_submission(gt);
+       if (flush_submission(gt))
+               active_count++;
 
        return active_count ? timeout : 0;
 }