From 7de1691a8b1c902bc4d10015e1dbe674ead49461 Mon Sep 17 00:00:00 2001 From: Tomas Elf Date: Mon, 19 Oct 2015 16:32:32 +0100 Subject: [PATCH] drm/i915: Grab execlist spinlock to avoid post-reset concurrency issues. Grab execlist lock when cleaning up execlist queues after GPU reset to avoid concurrency problems between the context event interrupt handler and the reset path immediately following a GPU reset. * v2 (Chris Wilson): Do execlist check and use simpler form of spinlock functions. Signed-off-by: Tomas Elf Reviewed-by: Dave Gordon Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index d0fa5481543c..9b2048c7077d 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2753,18 +2753,23 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, * are the ones that keep the context and ringbuffer backing objects * pinned in place. */ - while (!list_empty(&ring->execlist_queue)) { - struct drm_i915_gem_request *submit_req; - submit_req = list_first_entry(&ring->execlist_queue, - struct drm_i915_gem_request, - execlist_link); - list_del(&submit_req->execlist_link); + if (i915.enable_execlists) { + spin_lock_irq(&ring->execlist_lock); + while (!list_empty(&ring->execlist_queue)) { + struct drm_i915_gem_request *submit_req; - if (submit_req->ctx != ring->default_context) - intel_lr_context_unpin(submit_req); + submit_req = list_first_entry(&ring->execlist_queue, + struct drm_i915_gem_request, + execlist_link); + list_del(&submit_req->execlist_link); - i915_gem_request_unreference(submit_req); + if (submit_req->ctx != ring->default_context) + intel_lr_context_unpin(submit_req); + + i915_gem_request_unreference(submit_req); + } + spin_unlock_irq(&ring->execlist_lock); } /* -- 2.11.0