From de220cc21967fd745d91fbd3fc23a13372730db8 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 8 Apr 2019 10:17:03 +0100 Subject: [PATCH] drm/i915: Consolidate the timeline->barrier The timeline is strictly ordered, so by inserting the timeline->barrier request into the timeline->last_request it naturally provides the same barrier. Consolidate the pair of barriers into one as they serve the same purpose. Signed-off-by: Chris Wilson Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190408091728.20207-4-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_context.c | 13 ++----------- drivers/gpu/drm/i915/i915_request.c | 9 --------- drivers/gpu/drm/i915/i915_timeline.c | 2 -- drivers/gpu/drm/i915/i915_timeline.h | 15 --------------- drivers/gpu/drm/i915/i915_timeline_types.h | 10 ---------- drivers/gpu/drm/i915/selftests/mock_timeline.c | 1 - 6 files changed, 2 insertions(+), 48 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 66b6852cb4d2..7fc34ab6df87 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -1167,7 +1167,7 @@ static int gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu) { struct drm_i915_private *i915 = ce->engine->i915; - struct i915_request *rq, *prev; + struct i915_request *rq; intel_wakeref_t wakeref; int ret; @@ -1192,16 +1192,7 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu) } /* Queue this switch after all other activity by this context. */ - prev = i915_active_request_raw(&ce->ring->timeline->last_request, - &i915->drm.struct_mutex); - if (prev && !i915_request_completed(prev)) { - ret = i915_request_await_dma_fence(rq, &prev->fence); - if (ret < 0) - goto out_add; - } - - /* Order all following requests to be after. */ - ret = i915_timeline_set_barrier(ce->ring->timeline, rq); + ret = i915_active_request_set(&ce->ring->timeline->last_request, rq); if (ret) goto out_add; diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 2da0d6436a1a..96a9e8bcd805 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -584,11 +584,6 @@ out: return kmem_cache_alloc(global.slab_requests, GFP_KERNEL); } -static int add_timeline_barrier(struct i915_request *rq) -{ - return i915_request_await_active_request(rq, &rq->timeline->barrier); -} - /** * i915_request_alloc - allocate a request structure * @@ -738,10 +733,6 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) */ rq->head = rq->ring->emit; - ret = add_timeline_barrier(rq); - if (ret) - goto err_unwind; - ret = engine->request_alloc(rq); if (ret) goto err_unwind; diff --git a/drivers/gpu/drm/i915/i915_timeline.c b/drivers/gpu/drm/i915/i915_timeline.c index 2f4907364920..5fbea0892f33 100644 --- a/drivers/gpu/drm/i915/i915_timeline.c +++ b/drivers/gpu/drm/i915/i915_timeline.c @@ -253,7 +253,6 @@ int i915_timeline_init(struct drm_i915_private *i915, spin_lock_init(&timeline->lock); mutex_init(&timeline->mutex); - INIT_ACTIVE_REQUEST(&timeline->barrier); INIT_ACTIVE_REQUEST(&timeline->last_request); INIT_LIST_HEAD(&timeline->requests); @@ -326,7 +325,6 @@ void i915_timeline_fini(struct i915_timeline *timeline) { GEM_BUG_ON(timeline->pin_count); GEM_BUG_ON(!list_empty(&timeline->requests)); - GEM_BUG_ON(i915_active_request_isset(&timeline->barrier)); i915_syncmap_free(&timeline->sync); diff --git a/drivers/gpu/drm/i915/i915_timeline.h b/drivers/gpu/drm/i915/i915_timeline.h index 4ca7f80bdf6d..27668a1a69a3 100644 --- a/drivers/gpu/drm/i915/i915_timeline.h +++ b/drivers/gpu/drm/i915/i915_timeline.h @@ -110,19 +110,4 @@ void i915_timelines_init(struct drm_i915_private *i915); void i915_timelines_park(struct drm_i915_private *i915); void i915_timelines_fini(struct drm_i915_private *i915); -/** - * i915_timeline_set_barrier - orders submission between different timelines - * @timeline: timeline to set the barrier on - * @rq: request after which new submissions can proceed - * - * Sets the passed in request as the serialization point for all subsequent - * submissions on @timeline. Subsequent requests will not be submitted to GPU - * until the barrier has been completed. - */ -static inline int -i915_timeline_set_barrier(struct i915_timeline *tl, struct i915_request *rq) -{ - return i915_active_request_set(&tl->barrier, rq); -} - #endif diff --git a/drivers/gpu/drm/i915/i915_timeline_types.h b/drivers/gpu/drm/i915/i915_timeline_types.h index 1f5b55d9ffb5..5256a0b5c5f7 100644 --- a/drivers/gpu/drm/i915/i915_timeline_types.h +++ b/drivers/gpu/drm/i915/i915_timeline_types.h @@ -61,16 +61,6 @@ struct i915_timeline { */ struct i915_syncmap *sync; - /** - * Barrier provides the ability to serialize ordering between different - * timelines. - * - * Users can call i915_timeline_set_barrier which will make all - * subsequent submissions to this timeline be executed only after the - * barrier has been completed. - */ - struct i915_active_request barrier; - struct list_head link; struct drm_i915_private *i915; diff --git a/drivers/gpu/drm/i915/selftests/mock_timeline.c b/drivers/gpu/drm/i915/selftests/mock_timeline.c index 416d85233263..e084476469ef 100644 --- a/drivers/gpu/drm/i915/selftests/mock_timeline.c +++ b/drivers/gpu/drm/i915/selftests/mock_timeline.c @@ -16,7 +16,6 @@ void mock_timeline_init(struct i915_timeline *timeline, u64 context) spin_lock_init(&timeline->lock); mutex_init(&timeline->mutex); - INIT_ACTIVE_REQUEST(&timeline->barrier); INIT_ACTIVE_REQUEST(&timeline->last_request); INIT_LIST_HEAD(&timeline->requests); -- 2.11.0