2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/kthread.h>
29 static unsigned long wait_timeout(void)
31 return round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES);
34 static void intel_breadcrumbs_hangcheck(unsigned long data)
36 struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
37 struct intel_breadcrumbs *b = &engine->breadcrumbs;
42 if (b->hangcheck_interrupts != atomic_read(&engine->irq_count)) {
43 b->hangcheck_interrupts = atomic_read(&engine->irq_count);
44 mod_timer(&b->hangcheck, wait_timeout());
48 DRM_DEBUG("Hangcheck timer elapsed... %s idle\n", engine->name);
49 set_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
50 mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
52 /* Ensure that even if the GPU hangs, we get woken up.
54 * However, note that if no one is waiting, we never notice
55 * a gpu hang. Eventually, we will have to wait for a resource
56 * held by the GPU and so trigger a hangcheck. In the most
57 * pathological case, this will be upon memory starvation! To
58 * prevent this, we also queue the hangcheck from the retire
61 i915_queue_hangcheck(engine->i915);
64 static void intel_breadcrumbs_fake_irq(unsigned long data)
66 struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
69 * The timer persists in case we cannot enable interrupts,
70 * or if we have previously seen seqno/interrupt incoherency
71 * ("missed interrupt" syndrome). Here the worker will wake up
72 * every jiffie in order to kick the oldest waiter to do the
73 * coherent seqno check.
75 if (intel_engine_wakeup(engine))
76 mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
79 static void irq_enable(struct intel_engine_cs *engine)
81 /* Enabling the IRQ may miss the generation of the interrupt, but
82 * we still need to force the barrier before reading the seqno,
85 set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
87 /* Caller disables interrupts */
88 spin_lock(&engine->i915->irq_lock);
89 engine->irq_enable(engine);
90 spin_unlock(&engine->i915->irq_lock);
93 static void irq_disable(struct intel_engine_cs *engine)
95 /* Caller disables interrupts */
96 spin_lock(&engine->i915->irq_lock);
97 engine->irq_disable(engine);
98 spin_unlock(&engine->i915->irq_lock);
101 static void __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
103 struct intel_engine_cs *engine =
104 container_of(b, struct intel_engine_cs, breadcrumbs);
105 struct drm_i915_private *i915 = engine->i915;
107 assert_spin_locked(&b->lock);
111 if (I915_SELFTEST_ONLY(b->mock)) {
112 /* For our mock objects we want to avoid interaction
113 * with the real hardware (which is not set up). So
114 * we simply pretend we have enabled the powerwell
115 * and the irq, and leave it up to the mock
116 * implementation to call intel_engine_wakeup()
117 * itself when it wants to simulate a user interrupt,
119 b->rpm_wakelock = true;
123 /* Since we are waiting on a request, the GPU should be busy
124 * and should have its own rpm reference. For completeness,
125 * record an rpm reference for ourselves to cover the
126 * interrupt we unmask.
128 intel_runtime_pm_get_noresume(i915);
129 b->rpm_wakelock = true;
131 /* No interrupts? Kick the waiter every jiffie! */
132 if (intel_irqs_enabled(i915)) {
133 if (!test_bit(engine->id, &i915->gpu_error.test_irq_rings))
135 b->irq_enabled = true;
138 if (!b->irq_enabled ||
139 test_bit(engine->id, &i915->gpu_error.missed_irq_rings)) {
140 mod_timer(&b->fake_irq, jiffies + 1);
141 i915_queue_hangcheck(i915);
143 /* Ensure we never sleep indefinitely */
144 mod_timer(&b->hangcheck, wait_timeout());
148 static void __intel_breadcrumbs_disable_irq(struct intel_breadcrumbs *b)
150 struct intel_engine_cs *engine =
151 container_of(b, struct intel_engine_cs, breadcrumbs);
153 assert_spin_locked(&b->lock);
154 if (!b->rpm_wakelock)
157 if (I915_SELFTEST_ONLY(b->mock)) {
158 b->rpm_wakelock = false;
162 if (b->irq_enabled) {
164 b->irq_enabled = false;
167 intel_runtime_pm_put(engine->i915);
168 b->rpm_wakelock = false;
171 static inline struct intel_wait *to_wait(struct rb_node *node)
173 return rb_entry(node, struct intel_wait, node);
176 static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b,
177 struct intel_wait *wait)
179 assert_spin_locked(&b->lock);
181 /* This request is completed, so remove it from the tree, mark it as
182 * complete, and *then* wake up the associated task.
184 rb_erase(&wait->node, &b->waiters);
185 RB_CLEAR_NODE(&wait->node);
187 wake_up_process(wait->tsk); /* implicit smp_wmb() */
190 static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
191 struct intel_wait *wait)
193 struct intel_breadcrumbs *b = &engine->breadcrumbs;
194 struct rb_node **p, *parent, *completed;
198 /* Insert the request into the retirement ordered list
199 * of waiters by walking the rbtree. If we are the oldest
200 * seqno in the tree (the first to be retired), then
201 * set ourselves as the bottom-half.
203 * As we descend the tree, prune completed branches since we hold the
204 * spinlock we know that the first_waiter must be delayed and can
205 * reduce some of the sequential wake up latency if we take action
206 * ourselves and wake up the completed tasks in parallel. Also, by
207 * removing stale elements in the tree, we may be able to reduce the
208 * ping-pong between the old bottom-half and ourselves as first-waiter.
213 seqno = intel_engine_get_seqno(engine);
215 /* If the request completed before we managed to grab the spinlock,
216 * return now before adding ourselves to the rbtree. We let the
217 * current bottom-half handle any pending wakeups and instead
218 * try and get out of the way quickly.
220 if (i915_seqno_passed(seqno, wait->seqno)) {
221 RB_CLEAR_NODE(&wait->node);
225 p = &b->waiters.rb_node;
228 if (wait->seqno == to_wait(parent)->seqno) {
229 /* We have multiple waiters on the same seqno, select
230 * the highest priority task (that with the smallest
231 * task->prio) to serve as the bottom-half for this
234 if (wait->tsk->prio > to_wait(parent)->tsk->prio) {
235 p = &parent->rb_right;
238 p = &parent->rb_left;
240 } else if (i915_seqno_passed(wait->seqno,
241 to_wait(parent)->seqno)) {
242 p = &parent->rb_right;
243 if (i915_seqno_passed(seqno, to_wait(parent)->seqno))
248 p = &parent->rb_left;
251 rb_link_node(&wait->node, parent, p);
252 rb_insert_color(&wait->node, &b->waiters);
253 GEM_BUG_ON(!first && !rcu_access_pointer(b->irq_seqno_bh));
256 struct rb_node *next = rb_next(completed);
258 GEM_BUG_ON(!next && !first);
259 if (next && next != &wait->node) {
261 b->first_wait = to_wait(next);
262 rcu_assign_pointer(b->irq_seqno_bh, b->first_wait->tsk);
263 /* As there is a delay between reading the current
264 * seqno, processing the completed tasks and selecting
265 * the next waiter, we may have missed the interrupt
266 * and so need for the next bottom-half to wakeup.
268 * Also as we enable the IRQ, we may miss the
269 * interrupt for that seqno, so we have to wake up
270 * the next bottom-half in order to do a coherent check
271 * in case the seqno passed.
273 __intel_breadcrumbs_enable_irq(b);
274 if (test_bit(ENGINE_IRQ_BREADCRUMB,
275 &engine->irq_posted))
276 wake_up_process(to_wait(next)->tsk);
280 struct intel_wait *crumb = to_wait(completed);
281 completed = rb_prev(completed);
282 __intel_breadcrumbs_finish(b, crumb);
287 GEM_BUG_ON(rb_first(&b->waiters) != &wait->node);
288 b->first_wait = wait;
289 rcu_assign_pointer(b->irq_seqno_bh, wait->tsk);
290 /* After assigning ourselves as the new bottom-half, we must
291 * perform a cursory check to prevent a missed interrupt.
292 * Either we miss the interrupt whilst programming the hardware,
293 * or if there was a previous waiter (for a later seqno) they
294 * may be woken instead of us (due to the inherent race
295 * in the unlocked read of b->irq_seqno_bh in the irq handler)
296 * and so we miss the wake up.
298 __intel_breadcrumbs_enable_irq(b);
300 GEM_BUG_ON(!rcu_access_pointer(b->irq_seqno_bh));
301 GEM_BUG_ON(!b->first_wait);
302 GEM_BUG_ON(rb_first(&b->waiters) != &b->first_wait->node);
307 bool intel_engine_add_wait(struct intel_engine_cs *engine,
308 struct intel_wait *wait)
310 struct intel_breadcrumbs *b = &engine->breadcrumbs;
313 spin_lock_irq(&b->lock);
314 first = __intel_engine_add_wait(engine, wait);
315 spin_unlock_irq(&b->lock);
320 static inline bool chain_wakeup(struct rb_node *rb, int priority)
322 return rb && to_wait(rb)->tsk->prio <= priority;
325 static inline int wakeup_priority(struct intel_breadcrumbs *b,
326 struct task_struct *tsk)
328 if (tsk == b->signaler)
334 void intel_engine_remove_wait(struct intel_engine_cs *engine,
335 struct intel_wait *wait)
337 struct intel_breadcrumbs *b = &engine->breadcrumbs;
339 /* Quick check to see if this waiter was already decoupled from
340 * the tree by the bottom-half to avoid contention on the spinlock
343 if (RB_EMPTY_NODE(&wait->node))
346 spin_lock_irq(&b->lock);
348 if (RB_EMPTY_NODE(&wait->node))
351 if (b->first_wait == wait) {
352 const int priority = wakeup_priority(b, wait->tsk);
353 struct rb_node *next;
355 GEM_BUG_ON(rcu_access_pointer(b->irq_seqno_bh) != wait->tsk);
357 /* We are the current bottom-half. Find the next candidate,
358 * the first waiter in the queue on the remaining oldest
359 * request. As multiple seqnos may complete in the time it
360 * takes us to wake up and find the next waiter, we have to
361 * wake up that waiter for it to perform its own coherent
364 next = rb_next(&wait->node);
365 if (chain_wakeup(next, priority)) {
366 /* If the next waiter is already complete,
367 * wake it up and continue onto the next waiter. So
368 * if have a small herd, they will wake up in parallel
369 * rather than sequentially, which should reduce
370 * the overall latency in waking all the completed
373 * However, waking up a chain adds extra latency to
374 * the first_waiter. This is undesirable if that
375 * waiter is a high priority task.
377 u32 seqno = intel_engine_get_seqno(engine);
379 while (i915_seqno_passed(seqno, to_wait(next)->seqno)) {
380 struct rb_node *n = rb_next(next);
382 __intel_breadcrumbs_finish(b, to_wait(next));
384 if (!chain_wakeup(next, priority))
390 /* In our haste, we may have completed the first waiter
391 * before we enabled the interrupt. Do so now as we
392 * have a second waiter for a future seqno. Afterwards,
393 * we have to wake up that waiter in case we missed
394 * the interrupt, or if we have to handle an
395 * exception rather than a seqno completion.
397 b->first_wait = to_wait(next);
398 rcu_assign_pointer(b->irq_seqno_bh, b->first_wait->tsk);
399 if (b->first_wait->seqno != wait->seqno)
400 __intel_breadcrumbs_enable_irq(b);
401 wake_up_process(b->first_wait->tsk);
403 b->first_wait = NULL;
404 rcu_assign_pointer(b->irq_seqno_bh, NULL);
405 __intel_breadcrumbs_disable_irq(b);
408 GEM_BUG_ON(rb_first(&b->waiters) == &wait->node);
411 GEM_BUG_ON(RB_EMPTY_NODE(&wait->node));
412 rb_erase(&wait->node, &b->waiters);
415 GEM_BUG_ON(b->first_wait == wait);
416 GEM_BUG_ON(rb_first(&b->waiters) !=
417 (b->first_wait ? &b->first_wait->node : NULL));
418 GEM_BUG_ON(!rcu_access_pointer(b->irq_seqno_bh) ^ RB_EMPTY_ROOT(&b->waiters));
419 spin_unlock_irq(&b->lock);
422 static bool signal_complete(struct drm_i915_gem_request *request)
427 /* If another process served as the bottom-half it may have already
428 * signalled that this wait is already completed.
430 if (intel_wait_complete(&request->signaling.wait))
433 /* Carefully check if the request is complete, giving time for the
434 * seqno to be visible or if the GPU hung.
436 if (__i915_request_irq_complete(request))
442 static struct drm_i915_gem_request *to_signaler(struct rb_node *rb)
444 return rb_entry(rb, struct drm_i915_gem_request, signaling.node);
447 static void signaler_set_rtpriority(void)
449 struct sched_param param = { .sched_priority = 1 };
451 sched_setscheduler_nocheck(current, SCHED_FIFO, ¶m);
454 static int intel_breadcrumbs_signaler(void *arg)
456 struct intel_engine_cs *engine = arg;
457 struct intel_breadcrumbs *b = &engine->breadcrumbs;
458 struct drm_i915_gem_request *request;
460 /* Install ourselves with high priority to reduce signalling latency */
461 signaler_set_rtpriority();
464 set_current_state(TASK_INTERRUPTIBLE);
466 /* We are either woken up by the interrupt bottom-half,
467 * or by a client adding a new signaller. In both cases,
468 * the GPU seqno may have advanced beyond our oldest signal.
469 * If it has, propagate the signal, remove the waiter and
470 * check again with the next oldest signal. Otherwise we
471 * need to wait for a new interrupt from the GPU or for
474 request = READ_ONCE(b->first_signal);
475 if (signal_complete(request)) {
477 dma_fence_signal(&request->fence);
478 local_bh_enable(); /* kick start the tasklets */
480 /* Wake up all other completed waiters and select the
481 * next bottom-half for the next user interrupt.
483 intel_engine_remove_wait(engine,
484 &request->signaling.wait);
486 /* Find the next oldest signal. Note that as we have
487 * not been holding the lock, another client may
488 * have installed an even older signal than the one
489 * we just completed - so double check we are still
490 * the oldest before picking the next one.
492 spin_lock_irq(&b->lock);
493 if (request == b->first_signal) {
495 rb_next(&request->signaling.node);
496 b->first_signal = rb ? to_signaler(rb) : NULL;
498 rb_erase(&request->signaling.node, &b->signals);
499 spin_unlock_irq(&b->lock);
501 i915_gem_request_put(request);
503 if (kthread_should_stop())
508 if (kthread_should_park())
512 __set_current_state(TASK_RUNNING);
517 void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
519 struct intel_engine_cs *engine = request->engine;
520 struct intel_breadcrumbs *b = &engine->breadcrumbs;
521 struct rb_node *parent, **p;
524 /* Note that we may be called from an interrupt handler on another
525 * device (e.g. nouveau signaling a fence completion causing us
526 * to submit a request, and so enable signaling). As such,
527 * we need to make sure that all other users of b->lock protect
528 * against interrupts, i.e. use spin_lock_irqsave.
531 /* locked by dma_fence_enable_sw_signaling() (irqsafe fence->lock) */
532 assert_spin_locked(&request->lock);
533 if (!request->global_seqno)
536 request->signaling.wait.tsk = b->signaler;
537 request->signaling.wait.seqno = request->global_seqno;
538 i915_gem_request_get(request);
542 /* First add ourselves into the list of waiters, but register our
543 * bottom-half as the signaller thread. As per usual, only the oldest
544 * waiter (not just signaller) is tasked as the bottom-half waking
545 * up all completed waiters after the user interrupt.
547 * If we are the oldest waiter, enable the irq (after which we
548 * must double check that the seqno did not complete).
550 wakeup = __intel_engine_add_wait(engine, &request->signaling.wait);
552 /* Now insert ourselves into the retirement ordered list of signals
553 * on this engine. We track the oldest seqno as that will be the
554 * first signal to complete.
558 p = &b->signals.rb_node;
561 if (i915_seqno_passed(request->global_seqno,
562 to_signaler(parent)->global_seqno)) {
563 p = &parent->rb_right;
566 p = &parent->rb_left;
569 rb_link_node(&request->signaling.node, parent, p);
570 rb_insert_color(&request->signaling.node, &b->signals);
572 smp_store_mb(b->first_signal, request);
574 spin_unlock(&b->lock);
577 wake_up_process(b->signaler);
580 int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
582 struct intel_breadcrumbs *b = &engine->breadcrumbs;
583 struct task_struct *tsk;
585 spin_lock_init(&b->lock);
586 setup_timer(&b->fake_irq,
587 intel_breadcrumbs_fake_irq,
588 (unsigned long)engine);
589 setup_timer(&b->hangcheck,
590 intel_breadcrumbs_hangcheck,
591 (unsigned long)engine);
593 /* Spawn a thread to provide a common bottom-half for all signals.
594 * As this is an asynchronous interface we cannot steal the current
595 * task for handling the bottom-half to the user interrupt, therefore
596 * we create a thread to do the coherent seqno dance after the
597 * interrupt and then signal the waitqueue (via the dma-buf/fence).
599 tsk = kthread_run(intel_breadcrumbs_signaler, engine,
600 "i915/signal:%d", engine->id);
609 static void cancel_fake_irq(struct intel_engine_cs *engine)
611 struct intel_breadcrumbs *b = &engine->breadcrumbs;
613 del_timer_sync(&b->hangcheck);
614 del_timer_sync(&b->fake_irq);
615 clear_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
618 void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
620 struct intel_breadcrumbs *b = &engine->breadcrumbs;
622 cancel_fake_irq(engine);
623 spin_lock_irq(&b->lock);
625 __intel_breadcrumbs_disable_irq(b);
626 if (intel_engine_has_waiter(engine)) {
627 __intel_breadcrumbs_enable_irq(b);
628 if (test_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted))
629 wake_up_process(b->first_wait->tsk);
631 /* sanitize the IMR and unmask any auxiliary interrupts */
635 spin_unlock_irq(&b->lock);
638 void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
640 struct intel_breadcrumbs *b = &engine->breadcrumbs;
642 /* The engines should be idle and all requests accounted for! */
643 WARN_ON(READ_ONCE(b->first_wait));
644 WARN_ON(!RB_EMPTY_ROOT(&b->waiters));
645 WARN_ON(READ_ONCE(b->first_signal));
646 WARN_ON(!RB_EMPTY_ROOT(&b->signals));
648 if (!IS_ERR_OR_NULL(b->signaler))
649 kthread_stop(b->signaler);
651 cancel_fake_irq(engine);
654 unsigned int intel_breadcrumbs_busy(struct drm_i915_private *i915)
656 struct intel_engine_cs *engine;
657 enum intel_engine_id id;
658 unsigned int mask = 0;
660 for_each_engine(engine, i915, id) {
661 struct intel_breadcrumbs *b = &engine->breadcrumbs;
663 spin_lock_irq(&b->lock);
666 wake_up_process(b->first_wait->tsk);
667 mask |= intel_engine_flag(engine);
670 if (b->first_signal) {
671 wake_up_process(b->signaler);
672 mask |= intel_engine_flag(engine);
675 spin_unlock_irq(&b->lock);
681 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
682 #include "selftests/intel_breadcrumbs.c"