From: Chris Wilson Date: Tue, 24 Apr 2018 08:16:00 +0000 (+0100) Subject: drm/i915: Don't dump umpteen thousand requests X-Git-Tag: v4.18-rc1~16^2~17^2~116 X-Git-Url: http://git.osdn.net/view?a=commitdiff_plain;h=56021f48dbea69a00b96a53d6450b0950f9c811f;p=uclinux-h8%2Flinux.git drm/i915: Don't dump umpteen thousand requests If we have more than a few, possibly several thousand request in the queue, don't show the central portion, just the first few and the last being executed and/or queued. The first few should be enough to help identify a problem in execution, and most often comparing the first/last in the queue is enough to identify problems in the scheduling. We may need some fine tuning to set MAX_REQUESTS_TO_SHOW for common debug scenarios, but for the moment if we can avoiding spending more than a few seconds dumping the GPU state that will avoid a nasty livelock (where hangcheck spends so long dumping the state, it fires again and starts to dump the state again in parallel, ad infinitum). v2: Remember to print last not the stale rq iter after the loop. Signed-off-by: Chris Wilson Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20180424081600.27544-1-chris@chris-wilson.co.uk --- diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 66cddd059666..2398ea71e747 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -1307,11 +1307,13 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m, const char *header, ...) { + const int MAX_REQUESTS_TO_SHOW = 8; struct intel_breadcrumbs * const b = &engine->breadcrumbs; const struct intel_engine_execlists * const execlists = &engine->execlists; struct i915_gpu_error * const error = &engine->i915->gpu_error; - struct i915_request *rq; + struct i915_request *rq, *last; struct rb_node *rb; + int count; if (header) { va_list ap; @@ -1378,16 +1380,47 @@ void intel_engine_dump(struct intel_engine_cs *engine, } spin_lock_irq(&engine->timeline->lock); - list_for_each_entry(rq, &engine->timeline->requests, link) - print_request(m, rq, "\t\tE "); + + last = NULL; + count = 0; + list_for_each_entry(rq, &engine->timeline->requests, link) { + if (count++ < MAX_REQUESTS_TO_SHOW - 1) + print_request(m, rq, "\t\tE "); + else + last = rq; + } + if (last) { + if (count > MAX_REQUESTS_TO_SHOW) { + drm_printf(m, + "\t\t...skipping %d executing requests...\n", + count - MAX_REQUESTS_TO_SHOW); + } + print_request(m, last, "\t\tE "); + } + + last = NULL; + count = 0; drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority); for (rb = execlists->first; rb; rb = rb_next(rb)) { struct i915_priolist *p = rb_entry(rb, typeof(*p), node); - list_for_each_entry(rq, &p->requests, sched.link) - print_request(m, rq, "\t\tQ "); + list_for_each_entry(rq, &p->requests, sched.link) { + if (count++ < MAX_REQUESTS_TO_SHOW - 1) + print_request(m, rq, "\t\tQ "); + else + last = rq; + } } + if (last) { + if (count > MAX_REQUESTS_TO_SHOW) { + drm_printf(m, + "\t\t...skipping %d queued requests...\n", + count - MAX_REQUESTS_TO_SHOW); + } + print_request(m, last, "\t\tQ "); + } + spin_unlock_irq(&engine->timeline->lock); spin_lock_irq(&b->rb_lock);