OSDN Git Service

drm/i915/gt: Flush submission tasklet before waiting/retiring
authorChris Wilson <chris@chris-wilson.co.uk>
Tue, 8 Oct 2019 10:56:55 +0000 (11:56 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Tue, 8 Oct 2019 15:23:55 +0000 (16:23 +0100)
A common bane of ours is arbitrary delays in ksoftirqd processing our
submission tasklet. Give the submission tasklet a kick before we wait to
avoid those delays eating into a tight timeout.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Stuart Summers <stuart.summers@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191008105655.13256-1-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/gt/intel_engine.h
drivers/gpu/drm/i915/gt/intel_engine_cs.c
drivers/gpu/drm/i915/gt/intel_gt_requests.c

index c9e8c8c..d624752 100644 (file)
@@ -407,8 +407,9 @@ static inline void __intel_engine_reset(struct intel_engine_cs *engine,
        engine->serial++; /* contexts lost */
 }
 
-bool intel_engine_is_idle(struct intel_engine_cs *engine);
 bool intel_engines_are_idle(struct intel_gt *gt);
+bool intel_engine_is_idle(struct intel_engine_cs *engine);
+void intel_engine_flush_submission(struct intel_engine_cs *engine);
 
 void intel_engines_reset_default_submission(struct intel_gt *gt);
 
index 6220b71..7e2aa7a 100644 (file)
@@ -1040,6 +1040,25 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
        return idle;
 }
 
+void intel_engine_flush_submission(struct intel_engine_cs *engine)
+{
+       struct tasklet_struct *t = &engine->execlists.tasklet;
+
+       if (__tasklet_is_scheduled(t)) {
+               local_bh_disable();
+               if (tasklet_trylock(t)) {
+                       /* Must wait for any GPU reset in progress. */
+                       if (__tasklet_is_enabled(t))
+                               t->func(t->data);
+                       tasklet_unlock(t);
+               }
+               local_bh_enable();
+       }
+
+       /* Otherwise flush the tasklet if it was running on another cpu */
+       tasklet_unlock_wait(t);
+}
+
 /**
  * intel_engine_is_idle() - Report if the engine has finished process all work
  * @engine: the intel_engine_cs
@@ -1058,21 +1077,9 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
 
        /* Waiting to drain ELSP? */
        if (execlists_active(&engine->execlists)) {
-               struct tasklet_struct *t = &engine->execlists.tasklet;
-
                synchronize_hardirq(engine->i915->drm.pdev->irq);
 
-               local_bh_disable();
-               if (tasklet_trylock(t)) {
-                       /* Must wait for any GPU reset in progress. */
-                       if (__tasklet_is_enabled(t))
-                               t->func(t->data);
-                       tasklet_unlock(t);
-               }
-               local_bh_enable();
-
-               /* Otherwise flush the tasklet if it was on another cpu */
-               tasklet_unlock_wait(t);
+               intel_engine_flush_submission(engine);
 
                if (execlists_active(&engine->execlists))
                        return false;
index ca606b7..cbb4069 100644 (file)
@@ -4,6 +4,7 @@
  * Copyright © 2019 Intel Corporation
  */
 
+#include "i915_drv.h" /* for_each_engine() */
 #include "i915_request.h"
 #include "intel_gt.h"
 #include "intel_gt_pm.h"
@@ -19,6 +20,15 @@ static void retire_requests(struct intel_timeline *tl)
                        break;
 }
 
+static void flush_submission(struct intel_gt *gt)
+{
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+
+       for_each_engine(engine, gt->i915, id)
+               intel_engine_flush_submission(engine);
+}
+
 long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
 {
        struct intel_gt_timelines *timelines = &gt->timelines;
@@ -32,6 +42,8 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
        if (unlikely(timeout < 0))
                timeout = -timeout, interruptible = false;
 
+       flush_submission(gt); /* kick the ksoftirqd tasklets */
+
        spin_lock_irqsave(&timelines->lock, flags);
        list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
                if (!mutex_trylock(&tl->mutex)) {