2 * Copyright 2009 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Jerome Glisse <glisse@freedesktop.org>
31 #include <linux/seq_file.h>
32 #include <linux/atomic.h>
33 #include <linux/wait.h>
34 #include <linux/kref.h>
35 #include <linux/slab.h>
36 #include <linux/firmware.h>
37 #include <linux/pm_runtime.h>
39 #include <drm/drm_drv.h>
41 #include "amdgpu_trace.h"
45 * Fences mark an event in the GPUs pipeline and are used
46 * for GPU/CPU synchronization. When the fence is written,
47 * it is expected that all buffers associated with that fence
48 * are no longer in use by the associated ring on the GPU and
49 * that the the relevant GPU caches have been flushed.
53 struct dma_fence base;
56 struct amdgpu_ring *ring;
59 static struct kmem_cache *amdgpu_fence_slab;
61 int amdgpu_fence_slab_init(void)
63 amdgpu_fence_slab = kmem_cache_create(
64 "amdgpu_fence", sizeof(struct amdgpu_fence), 0,
65 SLAB_HWCACHE_ALIGN, NULL);
66 if (!amdgpu_fence_slab)
71 void amdgpu_fence_slab_fini(void)
74 kmem_cache_destroy(amdgpu_fence_slab);
79 static const struct dma_fence_ops amdgpu_fence_ops;
80 static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
82 struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
84 if (__f->base.ops == &amdgpu_fence_ops)
91 * amdgpu_fence_write - write a fence value
93 * @ring: ring the fence is associated with
94 * @seq: sequence number to write
96 * Writes a fence value to memory (all asics).
98 static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq)
100 struct amdgpu_fence_driver *drv = &ring->fence_drv;
103 *drv->cpu_addr = cpu_to_le32(seq);
107 * amdgpu_fence_read - read a fence value
109 * @ring: ring the fence is associated with
111 * Reads a fence value from memory (all asics).
112 * Returns the value of the fence read from memory.
114 static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
116 struct amdgpu_fence_driver *drv = &ring->fence_drv;
120 seq = le32_to_cpu(*drv->cpu_addr);
122 seq = atomic_read(&drv->last_seq);
128 * amdgpu_fence_emit - emit a fence on the requested ring
130 * @ring: ring the fence is associated with
131 * @f: resulting fence object
132 * @job: job the fence is embedded in
133 * @flags: flags to pass into the subordinate .emit_fence() call
135 * Emits a fence command on the requested ring (all asics).
136 * Returns 0 on success, -ENOMEM on failure.
138 int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amdgpu_job *job,
141 struct amdgpu_device *adev = ring->adev;
142 struct dma_fence *fence;
143 struct amdgpu_fence *am_fence;
144 struct dma_fence __rcu **ptr;
149 /* create a sperate hw fence */
150 am_fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_ATOMIC);
151 if (am_fence == NULL)
153 fence = &am_fence->base;
154 am_fence->ring = ring;
156 /* take use of job-embedded fence */
157 fence = &job->hw_fence;
160 seq = ++ring->fence_drv.sync_seq;
161 if (job != NULL && job->job_run_counter) {
162 /* reinit seq for resubmitted jobs */
165 dma_fence_init(fence, &amdgpu_fence_ops,
166 &ring->fence_drv.lock,
167 adev->fence_context + ring->idx,
172 /* mark this fence has a parent job */
173 set_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &fence->flags);
176 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
177 seq, flags | AMDGPU_FENCE_FLAG_INT);
178 pm_runtime_get_noresume(adev_to_drm(adev)->dev);
179 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
180 if (unlikely(rcu_dereference_protected(*ptr, 1))) {
181 struct dma_fence *old;
184 old = dma_fence_get_rcu_safe(ptr);
188 r = dma_fence_wait(old, false);
195 /* This function can't be called concurrently anyway, otherwise
196 * emitting the fence would mess up the hardware ring buffer.
198 rcu_assign_pointer(*ptr, dma_fence_get(fence));
206 * amdgpu_fence_emit_polling - emit a fence on the requeste ring
208 * @ring: ring the fence is associated with
209 * @s: resulting sequence number
210 * @timeout: the timeout for waiting in usecs
212 * Emits a fence command on the requested ring (all asics).
213 * Used For polling fence.
214 * Returns 0 on success, -ENOMEM on failure.
216 int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
225 seq = ++ring->fence_drv.sync_seq;
226 r = amdgpu_fence_wait_polling(ring,
227 seq - ring->fence_drv.num_fences_mask,
232 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
241 * amdgpu_fence_schedule_fallback - schedule fallback check
243 * @ring: pointer to struct amdgpu_ring
245 * Start a timer as fallback to our interrupts.
247 static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
249 mod_timer(&ring->fence_drv.fallback_timer,
250 jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT);
254 * amdgpu_fence_process - check for fence activity
256 * @ring: pointer to struct amdgpu_ring
258 * Checks the current fence value and calculates the last
259 * signalled fence value. Wakes the fence queue if the
260 * sequence number has increased.
262 * Returns true if fence was processed
264 bool amdgpu_fence_process(struct amdgpu_ring *ring)
266 struct amdgpu_fence_driver *drv = &ring->fence_drv;
267 struct amdgpu_device *adev = ring->adev;
268 uint32_t seq, last_seq;
271 last_seq = atomic_read(&ring->fence_drv.last_seq);
272 seq = amdgpu_fence_read(ring);
274 } while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq);
276 if (del_timer(&ring->fence_drv.fallback_timer) &&
277 seq != ring->fence_drv.sync_seq)
278 amdgpu_fence_schedule_fallback(ring);
280 if (unlikely(seq == last_seq))
283 last_seq &= drv->num_fences_mask;
284 seq &= drv->num_fences_mask;
287 struct dma_fence *fence, **ptr;
290 last_seq &= drv->num_fences_mask;
291 ptr = &drv->fences[last_seq];
293 /* There is always exactly one thread signaling this fence slot */
294 fence = rcu_dereference_protected(*ptr, 1);
295 RCU_INIT_POINTER(*ptr, NULL);
300 dma_fence_signal(fence);
301 dma_fence_put(fence);
302 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
303 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
304 } while (last_seq != seq);
310 * amdgpu_fence_fallback - fallback for hardware interrupts
312 * @t: timer context used to obtain the pointer to ring structure
314 * Checks for fence activity.
316 static void amdgpu_fence_fallback(struct timer_list *t)
318 struct amdgpu_ring *ring = from_timer(ring, t,
319 fence_drv.fallback_timer);
321 if (amdgpu_fence_process(ring))
322 DRM_WARN("Fence fallback timer expired on ring %s\n", ring->name);
326 * amdgpu_fence_wait_empty - wait for all fences to signal
328 * @ring: ring index the fence is associated with
330 * Wait for all fences on the requested ring to signal (all asics).
331 * Returns 0 if the fences have passed, error for all other cases.
333 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
335 uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq);
336 struct dma_fence *fence, **ptr;
342 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
344 fence = rcu_dereference(*ptr);
345 if (!fence || !dma_fence_get_rcu(fence)) {
351 r = dma_fence_wait(fence, false);
352 dma_fence_put(fence);
357 * amdgpu_fence_wait_polling - busy wait for givn sequence number
359 * @ring: ring index the fence is associated with
360 * @wait_seq: sequence number to wait
361 * @timeout: the timeout for waiting in usecs
363 * Wait for all fences on the requested ring to signal (all asics).
364 * Returns left time if no timeout, 0 or minus if timeout.
366 signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
373 seq = amdgpu_fence_read(ring);
376 } while ((int32_t)(wait_seq - seq) > 0 && timeout > 0);
378 return timeout > 0 ? timeout : 0;
381 * amdgpu_fence_count_emitted - get the count of emitted fences
383 * @ring: ring the fence is associated with
385 * Get the number of fences emitted on the requested ring (all asics).
386 * Returns the number of emitted fences on the ring. Used by the
387 * dynpm code to ring track activity.
389 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
393 /* We are not protected by ring lock when reading the last sequence
394 * but it's ok to report slightly wrong fence count here.
396 amdgpu_fence_process(ring);
397 emitted = 0x100000000ull;
398 emitted -= atomic_read(&ring->fence_drv.last_seq);
399 emitted += READ_ONCE(ring->fence_drv.sync_seq);
400 return lower_32_bits(emitted);
404 * amdgpu_fence_driver_start_ring - make the fence driver
405 * ready for use on the requested ring.
407 * @ring: ring to start the fence driver on
408 * @irq_src: interrupt source to use for this ring
409 * @irq_type: interrupt type to use for this ring
411 * Make the fence driver ready for processing (all asics).
412 * Not all asics have all rings, so each asic will only
413 * start the fence driver on the rings it has.
414 * Returns 0 for success, errors for failure.
416 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
417 struct amdgpu_irq_src *irq_src,
420 struct amdgpu_device *adev = ring->adev;
423 if (ring->funcs->type != AMDGPU_RING_TYPE_UVD) {
424 ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs];
425 ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4);
427 /* put fence directly behind firmware */
428 index = ALIGN(adev->uvd.fw->size, 8);
429 ring->fence_drv.cpu_addr = adev->uvd.inst[ring->me].cpu_addr + index;
430 ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index;
432 amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
434 ring->fence_drv.irq_src = irq_src;
435 ring->fence_drv.irq_type = irq_type;
436 ring->fence_drv.initialized = true;
438 DRM_DEV_DEBUG(adev->dev, "fence driver on ring %s use gpu addr 0x%016llx\n",
439 ring->name, ring->fence_drv.gpu_addr);
444 * amdgpu_fence_driver_init_ring - init the fence driver
445 * for the requested ring.
447 * @ring: ring to init the fence driver on
448 * @num_hw_submission: number of entries on the hardware queue
449 * @sched_score: optional score atomic shared with other schedulers
451 * Init the fence driver for the requested ring (all asics).
452 * Helper function for amdgpu_fence_driver_init().
454 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
455 unsigned num_hw_submission,
456 atomic_t *sched_score)
458 struct amdgpu_device *adev = ring->adev;
465 if (!is_power_of_2(num_hw_submission))
468 ring->fence_drv.cpu_addr = NULL;
469 ring->fence_drv.gpu_addr = 0;
470 ring->fence_drv.sync_seq = 0;
471 atomic_set(&ring->fence_drv.last_seq, 0);
472 ring->fence_drv.initialized = false;
474 timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0);
476 ring->fence_drv.num_fences_mask = num_hw_submission * 2 - 1;
477 spin_lock_init(&ring->fence_drv.lock);
478 ring->fence_drv.fences = kcalloc(num_hw_submission * 2, sizeof(void *),
480 if (!ring->fence_drv.fences)
483 /* No need to setup the GPU scheduler for rings that don't need it */
484 if (ring->no_scheduler)
487 switch (ring->funcs->type) {
488 case AMDGPU_RING_TYPE_GFX:
489 timeout = adev->gfx_timeout;
491 case AMDGPU_RING_TYPE_COMPUTE:
492 timeout = adev->compute_timeout;
494 case AMDGPU_RING_TYPE_SDMA:
495 timeout = adev->sdma_timeout;
498 timeout = adev->video_timeout;
502 r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
503 num_hw_submission, amdgpu_job_hang_limit,
504 timeout, NULL, sched_score, ring->name);
506 DRM_ERROR("Failed to create scheduler on ring %s.\n",
515 * amdgpu_fence_driver_sw_init - init the fence driver
516 * for all possible rings.
518 * @adev: amdgpu device pointer
520 * Init the fence driver for all possible rings (all asics).
521 * Not all asics have all rings, so each asic will only
522 * start the fence driver on the rings it has using
523 * amdgpu_fence_driver_start_ring().
524 * Returns 0 for success.
526 int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev)
532 * amdgpu_fence_driver_hw_fini - tear down the fence driver
533 * for all possible rings.
535 * @adev: amdgpu device pointer
537 * Tear down the fence driver for all possible rings (all asics).
539 void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev)
543 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
544 struct amdgpu_ring *ring = adev->rings[i];
546 if (!ring || !ring->fence_drv.initialized)
549 if (!ring->no_scheduler)
550 drm_sched_stop(&ring->sched, NULL);
552 /* You can't wait for HW to signal if it's gone */
553 if (!drm_dev_is_unplugged(adev_to_drm(adev)))
554 r = amdgpu_fence_wait_empty(ring);
557 /* no need to trigger GPU reset as we are unloading */
559 amdgpu_fence_driver_force_completion(ring);
561 if (ring->fence_drv.irq_src)
562 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
563 ring->fence_drv.irq_type);
565 del_timer_sync(&ring->fence_drv.fallback_timer);
569 void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev)
573 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
574 struct amdgpu_ring *ring = adev->rings[i];
576 if (!ring || !ring->fence_drv.initialized)
579 if (!ring->no_scheduler)
580 drm_sched_fini(&ring->sched);
582 for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
583 dma_fence_put(ring->fence_drv.fences[j]);
584 kfree(ring->fence_drv.fences);
585 ring->fence_drv.fences = NULL;
586 ring->fence_drv.initialized = false;
591 * amdgpu_fence_driver_hw_init - enable the fence driver
592 * for all possible rings.
594 * @adev: amdgpu device pointer
596 * Enable the fence driver for all possible rings (all asics).
597 * Not all asics have all rings, so each asic will only
598 * start the fence driver on the rings it has using
599 * amdgpu_fence_driver_start_ring().
600 * Returns 0 for success.
602 void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev)
606 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
607 struct amdgpu_ring *ring = adev->rings[i];
608 if (!ring || !ring->fence_drv.initialized)
611 if (!ring->no_scheduler) {
612 drm_sched_resubmit_jobs(&ring->sched);
613 drm_sched_start(&ring->sched, true);
616 /* enable the interrupt */
617 if (ring->fence_drv.irq_src)
618 amdgpu_irq_get(adev, ring->fence_drv.irq_src,
619 ring->fence_drv.irq_type);
624 * amdgpu_fence_driver_force_completion - force signal latest fence of ring
626 * @ring: fence of the ring to signal
629 void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)
631 amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
632 amdgpu_fence_process(ring);
636 * Common fence implementation
639 static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
644 static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
646 struct amdgpu_ring *ring;
648 if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
649 struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
651 ring = to_amdgpu_ring(job->base.sched);
653 ring = to_amdgpu_fence(f)->ring;
655 return (const char *)ring->name;
659 * amdgpu_fence_enable_signaling - enable signalling on fence
662 * This function is called with fence_queue lock held, and adds a callback
663 * to fence_queue that checks if this fence is signaled, and if so it
664 * signals the fence and removes itself.
666 static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
668 struct amdgpu_ring *ring;
670 if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
671 struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
673 ring = to_amdgpu_ring(job->base.sched);
675 ring = to_amdgpu_fence(f)->ring;
678 if (!timer_pending(&ring->fence_drv.fallback_timer))
679 amdgpu_fence_schedule_fallback(ring);
685 * amdgpu_fence_free - free up the fence memory
687 * @rcu: RCU callback head
689 * Free up the fence memory after the RCU grace period.
691 static void amdgpu_fence_free(struct rcu_head *rcu)
693 struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
695 if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
696 /* free job if fence has a parent job */
697 struct amdgpu_job *job;
699 job = container_of(f, struct amdgpu_job, hw_fence);
702 /* free fence_slab if it's separated fence*/
703 struct amdgpu_fence *fence;
705 fence = to_amdgpu_fence(f);
706 kmem_cache_free(amdgpu_fence_slab, fence);
711 * amdgpu_fence_release - callback that fence can be freed
715 * This function is called when the reference count becomes zero.
716 * It just RCU schedules freeing up the fence.
718 static void amdgpu_fence_release(struct dma_fence *f)
720 call_rcu(&f->rcu, amdgpu_fence_free);
723 static const struct dma_fence_ops amdgpu_fence_ops = {
724 .get_driver_name = amdgpu_fence_get_driver_name,
725 .get_timeline_name = amdgpu_fence_get_timeline_name,
726 .enable_signaling = amdgpu_fence_enable_signaling,
727 .release = amdgpu_fence_release,
734 #if defined(CONFIG_DEBUG_FS)
735 static int amdgpu_debugfs_fence_info_show(struct seq_file *m, void *unused)
737 struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
740 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
741 struct amdgpu_ring *ring = adev->rings[i];
742 if (!ring || !ring->fence_drv.initialized)
745 amdgpu_fence_process(ring);
747 seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name);
748 seq_printf(m, "Last signaled fence 0x%08x\n",
749 atomic_read(&ring->fence_drv.last_seq));
750 seq_printf(m, "Last emitted 0x%08x\n",
751 ring->fence_drv.sync_seq);
753 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX ||
754 ring->funcs->type == AMDGPU_RING_TYPE_SDMA) {
755 seq_printf(m, "Last signaled trailing fence 0x%08x\n",
756 le32_to_cpu(*ring->trail_fence_cpu_addr));
757 seq_printf(m, "Last emitted 0x%08x\n",
761 if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
764 /* set in CP_VMID_PREEMPT and preemption occurred */
765 seq_printf(m, "Last preempted 0x%08x\n",
766 le32_to_cpu(*(ring->fence_drv.cpu_addr + 2)));
767 /* set in CP_VMID_RESET and reset occurred */
768 seq_printf(m, "Last reset 0x%08x\n",
769 le32_to_cpu(*(ring->fence_drv.cpu_addr + 4)));
770 /* Both preemption and reset occurred */
771 seq_printf(m, "Last both 0x%08x\n",
772 le32_to_cpu(*(ring->fence_drv.cpu_addr + 6)));
778 * amdgpu_debugfs_gpu_recover - manually trigger a gpu reset & recover
780 * Manually trigger a gpu reset at the next fence wait.
782 static int gpu_recover_get(void *data, u64 *val)
784 struct amdgpu_device *adev = (struct amdgpu_device *)data;
785 struct drm_device *dev = adev_to_drm(adev);
788 r = pm_runtime_get_sync(dev->dev);
790 pm_runtime_put_autosuspend(dev->dev);
794 *val = amdgpu_device_gpu_recover(adev, NULL);
796 pm_runtime_mark_last_busy(dev->dev);
797 pm_runtime_put_autosuspend(dev->dev);
802 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_fence_info);
803 DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_gpu_recover_fops, gpu_recover_get, NULL,
808 void amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
810 #if defined(CONFIG_DEBUG_FS)
811 struct drm_minor *minor = adev_to_drm(adev)->primary;
812 struct dentry *root = minor->debugfs_root;
814 debugfs_create_file("amdgpu_fence_info", 0444, root, adev,
815 &amdgpu_debugfs_fence_info_fops);
817 if (!amdgpu_sriov_vf(adev))
818 debugfs_create_file("amdgpu_gpu_recover", 0444, root, adev,
819 &amdgpu_debugfs_gpu_recover_fops);