OSDN Git Service

drm/i915: Nonblocking request submission
authorChris Wilson <chris@chris-wilson.co.uk>
Fri, 9 Sep 2016 13:12:00 +0000 (14:12 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Fri, 9 Sep 2016 13:23:08 +0000 (14:23 +0100)
Now that we have fences in place to drive request submission, we can
employ those to queue requests after their dependencies as opposed to
stalling in the middle of an execbuf ioctl. (However, we still choose to
spin before enabling the IRQ as that is faster - though contentious.)

v2: Do the fence ordering first, where we can still fail.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160909131201.16673-20-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/i915_gem_request.c
drivers/gpu/drm/i915/i915_gem_request.h

index 017cadf..40978bc 100644 (file)
@@ -477,12 +477,13 @@ i915_gem_request_await_request(struct drm_i915_gem_request *to,
 
        trace_i915_gem_ring_sync_to(to, from);
        if (!i915.semaphores) {
-               ret = i915_wait_request(from,
-                                       I915_WAIT_INTERRUPTIBLE |
-                                       I915_WAIT_LOCKED,
-                                       NULL, NO_WAITBOOST);
-               if (ret)
-                       return ret;
+               if (!i915_spin_request(from, TASK_INTERRUPTIBLE, 2)) {
+                       ret = i915_sw_fence_await_dma_fence(&to->submit,
+                                                           &from->fence, 0,
+                                                           GFP_KERNEL);
+                       if (ret < 0)
+                               return ret;
+               }
        } else {
                ret = to->engine->semaphore.sync_to(to, from);
                if (ret)
@@ -577,6 +578,7 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
 {
        struct intel_engine_cs *engine = request->engine;
        struct intel_ring *ring = request->ring;
+       struct drm_i915_gem_request *prev;
        u32 request_start;
        u32 reserved_tail;
        int ret;
@@ -631,6 +633,13 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
         * hangcheck. Hence we apply the barrier to ensure that we do not
         * see a more recent value in the hws than we are tracking.
         */
+
+       prev = i915_gem_active_raw(&engine->last_request,
+                                  &request->i915->drm.struct_mutex);
+       if (prev)
+               i915_sw_fence_await_sw_fence(&request->submit, &prev->submit,
+                                            &request->submitq);
+
        request->emitted_jiffies = jiffies;
        request->previous_seqno = engine->last_submitted_seqno;
        engine->last_submitted_seqno = request->fence.seqno;
index 883df3b..974bd7b 100644 (file)
@@ -84,6 +84,7 @@ struct drm_i915_gem_request {
        struct intel_signal_node signaling;
 
        struct i915_sw_fence submit;
+       wait_queue_t submitq;
 
        /** GEM sequence number associated with the previous request,
         * when the HWS breadcrumb is equal to this the GPU is processing