OSDN Git Service

drm/i915: Update move_to_gpu() to take a request structure
authorJohn Harrison <John.C.Harrison@Intel.com>
Fri, 29 May 2015 16:43:32 +0000 (17:43 +0100)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Tue, 23 Jun 2015 12:02:03 +0000 (14:02 +0200)
The plan is to pass requests around as the basic submission tracking structure
rather than rings and contexts. This patch updates the move_to_gpu() code paths.

For: VIZ-5115
Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
Reviewed-by: Tomas Elf <tomas.elf@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/intel_lrc.c

index 610c330..f6ce811 100644 (file)
@@ -891,10 +891,10 @@ err:
 }
 
 static int
-i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
+i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
                                struct list_head *vmas)
 {
-       const unsigned other_rings = ~intel_ring_flag(ring);
+       const unsigned other_rings = ~intel_ring_flag(req->ring);
        struct i915_vma *vma;
        uint32_t flush_domains = 0;
        bool flush_chipset = false;
@@ -904,7 +904,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
                struct drm_i915_gem_object *obj = vma->obj;
 
                if (obj->active & other_rings) {
-                       ret = i915_gem_object_sync(obj, ring);
+                       ret = i915_gem_object_sync(obj, req->ring);
                        if (ret)
                                return ret;
                }
@@ -916,7 +916,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
        }
 
        if (flush_chipset)
-               i915_gem_chipset_flush(ring->dev);
+               i915_gem_chipset_flush(req->ring->dev);
 
        if (flush_domains & I915_GEM_DOMAIN_GTT)
                wmb();
@@ -924,7 +924,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
        /* Unconditionally invalidate gpu caches and ensure that we do flush
         * any residual writes from the previous batch.
         */
-       return intel_ring_invalidate_all_caches(ring);
+       return intel_ring_invalidate_all_caches(req->ring);
 }
 
 static bool
@@ -1246,7 +1246,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
                }
        }
 
-       ret = i915_gem_execbuffer_move_to_gpu(ring, vmas);
+       ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
        if (ret)
                goto error;
 
index d94c015..aa12a59 100644 (file)
@@ -624,12 +624,10 @@ static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf,
        return 0;
 }
 
-static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
-                                struct intel_context *ctx,
+static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
                                 struct list_head *vmas)
 {
-       struct intel_engine_cs *ring = ringbuf->ring;
-       const unsigned other_rings = ~intel_ring_flag(ring);
+       const unsigned other_rings = ~intel_ring_flag(req->ring);
        struct i915_vma *vma;
        uint32_t flush_domains = 0;
        bool flush_chipset = false;
@@ -639,7 +637,7 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
                struct drm_i915_gem_object *obj = vma->obj;
 
                if (obj->active & other_rings) {
-                       ret = i915_gem_object_sync(obj, ring);
+                       ret = i915_gem_object_sync(obj, req->ring);
                        if (ret)
                                return ret;
                }
@@ -656,7 +654,7 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
        /* Unconditionally invalidate gpu caches and ensure that we do flush
         * any residual writes from the previous batch.
         */
-       return logical_ring_invalidate_all_caches(ringbuf, ctx);
+       return logical_ring_invalidate_all_caches(req->ringbuf, req->ctx);
 }
 
 int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
@@ -918,7 +916,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
                return -EINVAL;
        }
 
-       ret = execlists_move_to_gpu(ringbuf, params->ctx, vmas);
+       ret = execlists_move_to_gpu(params->request, vmas);
        if (ret)
                return ret;