OSDN Git Service

drm/i915/gt: Schedule request retirement when signaler idles
authorChris Wilson <chris@chris-wilson.co.uk>
Thu, 19 Dec 2019 12:43:52 +0000 (12:43 +0000)
committerChris Wilson <chris@chris-wilson.co.uk>
Thu, 19 Dec 2019 17:03:56 +0000 (17:03 +0000)
Very similar to commit 4f88f8747fa4 ("drm/i915/gt: Schedule request
retirement when timeline idles"), but this time instead of coupling into
the execlists CS event interrupt, we couple into the breadcrumb
interrupt and queue a timeline's retirement when the last signaler is
completed. This should allow us to more rapidly park ringbuffer
submission, and so help reduce power consumption on older systems.

v2: Fixup intel_engine_add_retire() to handle concurrent callers

References: 4f88f8747fa4 ("drm/i915/gt: Schedule request retirement when timeline idles")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191219124353.8607-1-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
drivers/gpu/drm/i915/gt/intel_gt_requests.c
drivers/gpu/drm/i915/gt/intel_lrc.c

index 5fa4d62..4f49179 100644 (file)
@@ -29,6 +29,7 @@
 #include "i915_drv.h"
 #include "i915_trace.h"
 #include "intel_gt_pm.h"
+#include "intel_gt_requests.h"
 
 static void irq_enable(struct intel_engine_cs *engine)
 {
@@ -179,8 +180,11 @@ static void signal_irq_work(struct irq_work *work)
                if (!list_is_first(pos, &ce->signals)) {
                        /* Advance the list to the first incomplete request */
                        __list_del_many(&ce->signals, pos);
-                       if (&ce->signals == pos) /* now empty */
+                       if (&ce->signals == pos) /* now empty */
                                list_del_init(&ce->signal_link);
+                               intel_engine_add_retire(ce->engine,
+                                                       ce->timeline);
+                       }
                }
        }
 
index 8cb5421..0d1bca7 100644 (file)
@@ -62,19 +62,16 @@ static void engine_retire(struct work_struct *work)
 static bool add_retire(struct intel_engine_cs *engine,
                       struct intel_timeline *tl)
 {
+#define STUB ((struct intel_timeline *)1)
        struct intel_timeline *first;
 
        /*
         * We open-code a llist here to include the additional tag [BIT(0)]
         * so that we know when the timeline is already on a
         * retirement queue: either this engine or another.
-        *
-        * However, we rely on that a timeline can only be active on a single
-        * engine at any one time and that add_retire() is called before the
-        * engine releases the timeline and transferred to another to retire.
         */
 
-       if (READ_ONCE(tl->retire)) /* already queued */
+       if (cmpxchg(&tl->retire, NULL, STUB)) /* already queued */
                return false;
 
        intel_timeline_get(tl);
index 4db54fd..56cf71d 100644 (file)
@@ -4511,8 +4511,8 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx,
 
        intel_engine_init_active(&ve->base, ENGINE_VIRTUAL);
        intel_engine_init_breadcrumbs(&ve->base);
-
        intel_engine_init_execlists(&ve->base);
+       intel_engine_init_retire(&ve->base);
 
        ve->base.cops = &virtual_context_ops;
        ve->base.request_alloc = execlists_request_alloc;