From 7e9d3a4a1b21fd8a595774697f4047e7505deea6 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 7 Mar 2018 13:42:25 +0000 Subject: [PATCH] drm/i915: Wrap engine->schedule in RCU locks for set-wedge protection Similar to the staging around handling of engine->submit_request, we need to stop adding to the execlists->queue prior to calling engine->cancel_requests. cancel_requests will move requests from the queue onto the timeline, so if we add a request onto the queue after that point, it will be lost. Fixes: af7a8ffad9c5 ("drm/i915: Use rcu instead of stop_machine in set_wedged") Signed-off-by: Chris Wilson Cc: Mika Kuoppala Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20180307134226.25492-5-chris@chris-wilson.co.uk (cherry picked from commit 47650db02dd52267953df81438c93cf8a0eb0e5e) Signed-off-by: Joonas Lahtinen --- drivers/gpu/drm/i915/i915_gem.c | 13 +++++++------ drivers/gpu/drm/i915/i915_request.c | 2 ++ 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index a5bd07338b46..8d913d833ab9 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -471,10 +471,11 @@ static void __fence_set_priority(struct dma_fence *fence, int prio) rq = to_request(fence); engine = rq->engine; - if (!engine->schedule) - return; - engine->schedule(rq, prio); + rcu_read_lock(); + if (engine->schedule) + engine->schedule(rq, prio); + rcu_read_unlock(); } static void fence_set_priority(struct dma_fence *fence, int prio) @@ -3214,8 +3215,11 @@ void i915_gem_set_wedged(struct drm_i915_private *i915) */ for_each_engine(engine, i915, id) { i915_gem_reset_prepare_engine(engine); + engine->submit_request = nop_submit_request; + engine->schedule = NULL; } + i915->caps.scheduler = 0; /* * Make sure no one is running the old callback before we proceed with @@ -3233,11 +3237,8 @@ void i915_gem_set_wedged(struct drm_i915_private *i915) * start to complete all requests. */ engine->submit_request = nop_complete_submit_request; - engine->schedule = NULL; } - i915->caps.scheduler = 0; - /* * Make sure no request can slip through without getting completed by * either this call here to intel_engine_init_global_seqno, or the one diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index d437beac3969..282f57630cc1 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -1081,8 +1081,10 @@ void __i915_request_add(struct i915_request *request, bool flush_caches) * decide whether to preempt the entire chain so that it is ready to * run at the earliest possible convenience. */ + rcu_read_lock(); if (engine->schedule) engine->schedule(request, request->ctx->priority); + rcu_read_unlock(); local_bh_disable(); i915_sw_fence_commit(&request->submit); -- 2.11.0