1 /**************************************************************************
3 * Copyright © 2007 Red Hat Inc.
4 * Copyright © 2007-2012 Intel Corporation
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
29 **************************************************************************/
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
42 #include <xf86atomic.h>
50 #include <sys/ioctl.h>
53 #include <sys/types.h>
58 #define ETIME ETIMEDOUT
60 #include "libdrm_lists.h"
61 #include "intel_bufmgr.h"
62 #include "intel_bufmgr_priv.h"
63 #include "intel_chipset.h"
64 #include "intel_aub.h"
77 #define VG_CLEAR(s) VG(memset(&s, 0, sizeof(s)))
79 #define DBG(...) do { \
80 if (bufmgr_gem->bufmgr.debug) \
81 fprintf(stderr, __VA_ARGS__); \
84 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
86 typedef struct _drm_intel_bo_gem drm_intel_bo_gem;
88 struct drm_intel_gem_bo_bucket {
93 typedef struct _drm_intel_bufmgr_gem {
94 drm_intel_bufmgr bufmgr;
100 pthread_mutex_t lock;
102 struct drm_i915_gem_exec_object *exec_objects;
103 struct drm_i915_gem_exec_object2 *exec2_objects;
104 drm_intel_bo **exec_bos;
108 /** Array of lists of cached gem objects of power-of-two sizes */
109 struct drm_intel_gem_bo_bucket cache_bucket[14 * 4];
114 drmMMListHead vma_cache;
115 int vma_count, vma_open, vma_max;
118 int available_fences;
121 unsigned int has_bsd : 1;
122 unsigned int has_blt : 1;
123 unsigned int has_relaxed_fencing : 1;
124 unsigned int has_llc : 1;
125 unsigned int has_wait_timeout : 1;
126 unsigned int bo_reuse : 1;
127 unsigned int no_exec : 1;
128 unsigned int has_vebox : 1;
134 } drm_intel_bufmgr_gem;
136 #define DRM_INTEL_RELOC_FENCE (1<<0)
138 typedef struct _drm_intel_reloc_target_info {
141 } drm_intel_reloc_target;
143 struct _drm_intel_bo_gem {
151 * Kenel-assigned global name for this object
153 * List contains both flink named and prime fd'd objects
155 unsigned int global_name;
156 drmMMListHead name_list;
159 * Index of the buffer within the validation list while preparing a
160 * batchbuffer execution.
165 * Current tiling mode
167 uint32_t tiling_mode;
168 uint32_t swizzle_mode;
169 unsigned long stride;
173 /** Array passed to the DRM containing relocation information. */
174 struct drm_i915_gem_relocation_entry *relocs;
176 * Array of info structs corresponding to relocs[i].target_handle etc
178 drm_intel_reloc_target *reloc_target_info;
179 /** Number of entries in relocs */
181 /** Mapped address for the buffer, saved across map/unmap cycles */
183 /** GTT virtual address for the buffer, saved across map/unmap cycles */
186 drmMMListHead vma_list;
192 * Boolean of whether this BO and its children have been included in
193 * the current drm_intel_bufmgr_check_aperture_space() total.
195 bool included_in_check_aperture;
198 * Boolean of whether this buffer has been used as a relocation
199 * target and had its size accounted for, and thus can't have any
200 * further relocations added to it.
202 bool used_as_reloc_target;
205 * Boolean of whether we have encountered an error whilst building the relocation tree.
210 * Boolean of whether this buffer can be re-used
215 * Boolean of whether the GPU is definitely not accessing the buffer.
217 * This is only valid when reusable, since non-reusable
218 * buffers are those that have been shared wth other
219 * processes, so we don't know their state.
224 * Size in bytes of this buffer and its relocation descendents.
226 * Used to avoid costly tree walking in
227 * drm_intel_bufmgr_check_aperture in the common case.
232 * Number of potential fence registers required by this buffer and its
235 int reloc_tree_fences;
237 /** Flags that we may need to do the SW_FINSIH ioctl on unmap. */
238 bool mapped_cpu_write;
242 drm_intel_aub_annotation *aub_annotations;
243 unsigned aub_annotation_count;
247 drm_intel_gem_estimate_batch_space(drm_intel_bo ** bo_array, int count);
250 drm_intel_gem_compute_batch_space(drm_intel_bo ** bo_array, int count);
253 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
254 uint32_t * swizzle_mode);
257 drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
258 uint32_t tiling_mode,
261 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
264 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo);
266 static void drm_intel_gem_bo_free(drm_intel_bo *bo);
269 drm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size,
270 uint32_t *tiling_mode)
272 unsigned long min_size, max_size;
275 if (*tiling_mode == I915_TILING_NONE)
278 /* 965+ just need multiples of page size for tiling */
279 if (bufmgr_gem->gen >= 4)
280 return ROUND_UP_TO(size, 4096);
282 /* Older chips need powers of two, of at least 512k or 1M */
283 if (bufmgr_gem->gen == 3) {
284 min_size = 1024*1024;
285 max_size = 128*1024*1024;
288 max_size = 64*1024*1024;
291 if (size > max_size) {
292 *tiling_mode = I915_TILING_NONE;
296 /* Do we need to allocate every page for the fence? */
297 if (bufmgr_gem->has_relaxed_fencing)
298 return ROUND_UP_TO(size, 4096);
300 for (i = min_size; i < size; i <<= 1)
307 * Round a given pitch up to the minimum required for X tiling on a
308 * given chip. We use 512 as the minimum to allow for a later tiling
312 drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem,
313 unsigned long pitch, uint32_t *tiling_mode)
315 unsigned long tile_width;
318 /* If untiled, then just align it so that we can do rendering
319 * to it with the 3D engine.
321 if (*tiling_mode == I915_TILING_NONE)
322 return ALIGN(pitch, 64);
324 if (*tiling_mode == I915_TILING_X
325 || (IS_915(bufmgr_gem->pci_device)
326 && *tiling_mode == I915_TILING_Y))
331 /* 965 is flexible */
332 if (bufmgr_gem->gen >= 4)
333 return ROUND_UP_TO(pitch, tile_width);
335 /* The older hardware has a maximum pitch of 8192 with tiled
336 * surfaces, so fallback to untiled if it's too large.
339 *tiling_mode = I915_TILING_NONE;
340 return ALIGN(pitch, 64);
343 /* Pre-965 needs power of two tile width */
344 for (i = tile_width; i < pitch; i <<= 1)
350 static struct drm_intel_gem_bo_bucket *
351 drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
356 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
357 struct drm_intel_gem_bo_bucket *bucket =
358 &bufmgr_gem->cache_bucket[i];
359 if (bucket->size >= size) {
368 drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
372 for (i = 0; i < bufmgr_gem->exec_count; i++) {
373 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
374 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
376 if (bo_gem->relocs == NULL) {
377 DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle,
382 for (j = 0; j < bo_gem->reloc_count; j++) {
383 drm_intel_bo *target_bo = bo_gem->reloc_target_info[j].bo;
384 drm_intel_bo_gem *target_gem =
385 (drm_intel_bo_gem *) target_bo;
387 DBG("%2d: %d (%s)@0x%08llx -> "
388 "%d (%s)@0x%08lx + 0x%08x\n",
390 bo_gem->gem_handle, bo_gem->name,
391 (unsigned long long)bo_gem->relocs[j].offset,
392 target_gem->gem_handle,
395 bo_gem->relocs[j].delta);
401 drm_intel_gem_bo_reference(drm_intel_bo *bo)
403 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
405 atomic_inc(&bo_gem->refcount);
409 * Adds the given buffer to the list of buffers to be validated (moved into the
410 * appropriate memory type) with the next batch submission.
412 * If a buffer is validated multiple times in a batch submission, it ends up
413 * with the intersection of the memory type flags and the union of the
417 drm_intel_add_validate_buffer(drm_intel_bo *bo)
419 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
420 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
423 if (bo_gem->validate_index != -1)
426 /* Extend the array of validation entries as necessary. */
427 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
428 int new_size = bufmgr_gem->exec_size * 2;
433 bufmgr_gem->exec_objects =
434 realloc(bufmgr_gem->exec_objects,
435 sizeof(*bufmgr_gem->exec_objects) * new_size);
436 bufmgr_gem->exec_bos =
437 realloc(bufmgr_gem->exec_bos,
438 sizeof(*bufmgr_gem->exec_bos) * new_size);
439 bufmgr_gem->exec_size = new_size;
442 index = bufmgr_gem->exec_count;
443 bo_gem->validate_index = index;
444 /* Fill in array entry */
445 bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
446 bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
447 bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs;
448 bufmgr_gem->exec_objects[index].alignment = 0;
449 bufmgr_gem->exec_objects[index].offset = 0;
450 bufmgr_gem->exec_bos[index] = bo;
451 bufmgr_gem->exec_count++;
455 drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence)
457 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
458 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
461 if (bo_gem->validate_index != -1) {
463 bufmgr_gem->exec2_objects[bo_gem->validate_index].flags |=
464 EXEC_OBJECT_NEEDS_FENCE;
468 /* Extend the array of validation entries as necessary. */
469 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
470 int new_size = bufmgr_gem->exec_size * 2;
475 bufmgr_gem->exec2_objects =
476 realloc(bufmgr_gem->exec2_objects,
477 sizeof(*bufmgr_gem->exec2_objects) * new_size);
478 bufmgr_gem->exec_bos =
479 realloc(bufmgr_gem->exec_bos,
480 sizeof(*bufmgr_gem->exec_bos) * new_size);
481 bufmgr_gem->exec_size = new_size;
484 index = bufmgr_gem->exec_count;
485 bo_gem->validate_index = index;
486 /* Fill in array entry */
487 bufmgr_gem->exec2_objects[index].handle = bo_gem->gem_handle;
488 bufmgr_gem->exec2_objects[index].relocation_count = bo_gem->reloc_count;
489 bufmgr_gem->exec2_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
490 bufmgr_gem->exec2_objects[index].alignment = 0;
491 bufmgr_gem->exec2_objects[index].offset = 0;
492 bufmgr_gem->exec_bos[index] = bo;
493 bufmgr_gem->exec2_objects[index].flags = 0;
494 bufmgr_gem->exec2_objects[index].rsvd1 = 0;
495 bufmgr_gem->exec2_objects[index].rsvd2 = 0;
497 bufmgr_gem->exec2_objects[index].flags |=
498 EXEC_OBJECT_NEEDS_FENCE;
500 bufmgr_gem->exec_count++;
503 #define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
507 drm_intel_bo_gem_set_in_aperture_size(drm_intel_bufmgr_gem *bufmgr_gem,
508 drm_intel_bo_gem *bo_gem)
512 assert(!bo_gem->used_as_reloc_target);
514 /* The older chipsets are far-less flexible in terms of tiling,
515 * and require tiled buffer to be size aligned in the aperture.
516 * This means that in the worst possible case we will need a hole
517 * twice as large as the object in order for it to fit into the
518 * aperture. Optimal packing is for wimps.
520 size = bo_gem->bo.size;
521 if (bufmgr_gem->gen < 4 && bo_gem->tiling_mode != I915_TILING_NONE) {
524 if (bufmgr_gem->has_relaxed_fencing) {
525 if (bufmgr_gem->gen == 3)
526 min_size = 1024*1024;
530 while (min_size < size)
535 /* Account for worst-case alignment. */
539 bo_gem->reloc_tree_size = size;
543 drm_intel_setup_reloc_list(drm_intel_bo *bo)
545 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
546 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
547 unsigned int max_relocs = bufmgr_gem->max_relocs;
549 if (bo->size / 4 < max_relocs)
550 max_relocs = bo->size / 4;
552 bo_gem->relocs = malloc(max_relocs *
553 sizeof(struct drm_i915_gem_relocation_entry));
554 bo_gem->reloc_target_info = malloc(max_relocs *
555 sizeof(drm_intel_reloc_target));
556 if (bo_gem->relocs == NULL || bo_gem->reloc_target_info == NULL) {
557 bo_gem->has_error = true;
559 free (bo_gem->relocs);
560 bo_gem->relocs = NULL;
562 free (bo_gem->reloc_target_info);
563 bo_gem->reloc_target_info = NULL;
572 drm_intel_gem_bo_busy(drm_intel_bo *bo)
574 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
575 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
576 struct drm_i915_gem_busy busy;
579 if (bo_gem->reusable && bo_gem->idle)
583 busy.handle = bo_gem->gem_handle;
585 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
587 bo_gem->idle = !busy.busy;
592 return (ret == 0 && busy.busy);
596 drm_intel_gem_bo_madvise_internal(drm_intel_bufmgr_gem *bufmgr_gem,
597 drm_intel_bo_gem *bo_gem, int state)
599 struct drm_i915_gem_madvise madv;
602 madv.handle = bo_gem->gem_handle;
605 drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
607 return madv.retained;
611 drm_intel_gem_bo_madvise(drm_intel_bo *bo, int madv)
613 return drm_intel_gem_bo_madvise_internal
614 ((drm_intel_bufmgr_gem *) bo->bufmgr,
615 (drm_intel_bo_gem *) bo,
619 /* drop the oldest entries that have been purged by the kernel */
621 drm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem *bufmgr_gem,
622 struct drm_intel_gem_bo_bucket *bucket)
624 while (!DRMLISTEMPTY(&bucket->head)) {
625 drm_intel_bo_gem *bo_gem;
627 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
628 bucket->head.next, head);
629 if (drm_intel_gem_bo_madvise_internal
630 (bufmgr_gem, bo_gem, I915_MADV_DONTNEED))
633 DRMLISTDEL(&bo_gem->head);
634 drm_intel_gem_bo_free(&bo_gem->bo);
638 static drm_intel_bo *
639 drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
643 uint32_t tiling_mode,
644 unsigned long stride)
646 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
647 drm_intel_bo_gem *bo_gem;
648 unsigned int page_size = getpagesize();
650 struct drm_intel_gem_bo_bucket *bucket;
651 bool alloc_from_cache;
652 unsigned long bo_size;
653 bool for_render = false;
655 if (flags & BO_ALLOC_FOR_RENDER)
658 /* Round the allocated size up to a power of two number of pages. */
659 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size);
661 /* If we don't have caching at this size, don't actually round the
664 if (bucket == NULL) {
666 if (bo_size < page_size)
669 bo_size = bucket->size;
672 pthread_mutex_lock(&bufmgr_gem->lock);
673 /* Get a buffer out of the cache if available */
675 alloc_from_cache = false;
676 if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) {
678 /* Allocate new render-target BOs from the tail (MRU)
679 * of the list, as it will likely be hot in the GPU
680 * cache and in the aperture for us.
682 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
683 bucket->head.prev, head);
684 DRMLISTDEL(&bo_gem->head);
685 alloc_from_cache = true;
687 /* For non-render-target BOs (where we're probably
688 * going to map it first thing in order to fill it
689 * with data), check if the last BO in the cache is
690 * unbusy, and only reuse in that case. Otherwise,
691 * allocating a new buffer is probably faster than
692 * waiting for the GPU to finish.
694 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
695 bucket->head.next, head);
696 if (!drm_intel_gem_bo_busy(&bo_gem->bo)) {
697 alloc_from_cache = true;
698 DRMLISTDEL(&bo_gem->head);
702 if (alloc_from_cache) {
703 if (!drm_intel_gem_bo_madvise_internal
704 (bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) {
705 drm_intel_gem_bo_free(&bo_gem->bo);
706 drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem,
711 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
714 drm_intel_gem_bo_free(&bo_gem->bo);
719 pthread_mutex_unlock(&bufmgr_gem->lock);
721 if (!alloc_from_cache) {
722 struct drm_i915_gem_create create;
724 bo_gem = calloc(1, sizeof(*bo_gem));
728 bo_gem->bo.size = bo_size;
731 create.size = bo_size;
733 ret = drmIoctl(bufmgr_gem->fd,
734 DRM_IOCTL_I915_GEM_CREATE,
736 bo_gem->gem_handle = create.handle;
737 bo_gem->bo.handle = bo_gem->gem_handle;
742 bo_gem->bo.bufmgr = bufmgr;
744 bo_gem->tiling_mode = I915_TILING_NONE;
745 bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
748 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
751 drm_intel_gem_bo_free(&bo_gem->bo);
755 DRMINITLISTHEAD(&bo_gem->name_list);
756 DRMINITLISTHEAD(&bo_gem->vma_list);
760 atomic_set(&bo_gem->refcount, 1);
761 bo_gem->validate_index = -1;
762 bo_gem->reloc_tree_fences = 0;
763 bo_gem->used_as_reloc_target = false;
764 bo_gem->has_error = false;
765 bo_gem->reusable = true;
766 bo_gem->aub_annotations = NULL;
767 bo_gem->aub_annotation_count = 0;
769 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
771 DBG("bo_create: buf %d (%s) %ldb\n",
772 bo_gem->gem_handle, bo_gem->name, size);
777 static drm_intel_bo *
778 drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
781 unsigned int alignment)
783 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size,
785 I915_TILING_NONE, 0);
788 static drm_intel_bo *
789 drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr,
792 unsigned int alignment)
794 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0,
795 I915_TILING_NONE, 0);
798 static drm_intel_bo *
799 drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
800 int x, int y, int cpp, uint32_t *tiling_mode,
801 unsigned long *pitch, unsigned long flags)
803 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
804 unsigned long size, stride;
808 unsigned long aligned_y, height_alignment;
810 tiling = *tiling_mode;
812 /* If we're tiled, our allocations are in 8 or 32-row blocks,
813 * so failure to align our height means that we won't allocate
816 * If we're untiled, we still have to align to 2 rows high
817 * because the data port accesses 2x2 blocks even if the
818 * bottom row isn't to be rendered, so failure to align means
819 * we could walk off the end of the GTT and fault. This is
820 * documented on 965, and may be the case on older chipsets
821 * too so we try to be careful.
824 height_alignment = 2;
826 if ((bufmgr_gem->gen == 2) && tiling != I915_TILING_NONE)
827 height_alignment = 16;
828 else if (tiling == I915_TILING_X
829 || (IS_915(bufmgr_gem->pci_device)
830 && tiling == I915_TILING_Y))
831 height_alignment = 8;
832 else if (tiling == I915_TILING_Y)
833 height_alignment = 32;
834 aligned_y = ALIGN(y, height_alignment);
837 stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, tiling_mode);
838 size = stride * aligned_y;
839 size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, tiling_mode);
840 } while (*tiling_mode != tiling);
843 if (tiling == I915_TILING_NONE)
846 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags,
851 * Returns a drm_intel_bo wrapping the given buffer object handle.
853 * This can be used when one application needs to pass a buffer object
857 drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
861 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
862 drm_intel_bo_gem *bo_gem;
864 struct drm_gem_open open_arg;
865 struct drm_i915_gem_get_tiling get_tiling;
868 /* At the moment most applications only have a few named bo.
869 * For instance, in a DRI client only the render buffers passed
870 * between X and the client are named. And since X returns the
871 * alternating names for the front/back buffer a linear search
872 * provides a sufficiently fast match.
874 for (list = bufmgr_gem->named.next;
875 list != &bufmgr_gem->named;
877 bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
878 if (bo_gem->global_name == handle) {
879 drm_intel_gem_bo_reference(&bo_gem->bo);
885 open_arg.name = handle;
886 ret = drmIoctl(bufmgr_gem->fd,
890 DBG("Couldn't reference %s handle 0x%08x: %s\n",
891 name, handle, strerror(errno));
894 /* Now see if someone has used a prime handle to get this
895 * object from the kernel before by looking through the list
896 * again for a matching gem_handle
898 for (list = bufmgr_gem->named.next;
899 list != &bufmgr_gem->named;
901 bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
902 if (bo_gem->gem_handle == open_arg.handle) {
903 drm_intel_gem_bo_reference(&bo_gem->bo);
908 bo_gem = calloc(1, sizeof(*bo_gem));
912 bo_gem->bo.size = open_arg.size;
913 bo_gem->bo.offset = 0;
914 bo_gem->bo.virtual = NULL;
915 bo_gem->bo.bufmgr = bufmgr;
917 atomic_set(&bo_gem->refcount, 1);
918 bo_gem->validate_index = -1;
919 bo_gem->gem_handle = open_arg.handle;
920 bo_gem->bo.handle = open_arg.handle;
921 bo_gem->global_name = handle;
922 bo_gem->reusable = false;
924 VG_CLEAR(get_tiling);
925 get_tiling.handle = bo_gem->gem_handle;
926 ret = drmIoctl(bufmgr_gem->fd,
927 DRM_IOCTL_I915_GEM_GET_TILING,
930 drm_intel_gem_bo_unreference(&bo_gem->bo);
933 bo_gem->tiling_mode = get_tiling.tiling_mode;
934 bo_gem->swizzle_mode = get_tiling.swizzle_mode;
935 /* XXX stride is unknown */
936 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
938 DRMINITLISTHEAD(&bo_gem->vma_list);
939 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
940 DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
946 drm_intel_gem_bo_free(drm_intel_bo *bo)
948 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
949 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
950 struct drm_gem_close close;
953 DRMLISTDEL(&bo_gem->vma_list);
954 if (bo_gem->mem_virtual) {
955 VG(VALGRIND_FREELIKE_BLOCK(bo_gem->mem_virtual, 0));
956 munmap(bo_gem->mem_virtual, bo_gem->bo.size);
957 bufmgr_gem->vma_count--;
959 if (bo_gem->gtt_virtual) {
960 munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
961 bufmgr_gem->vma_count--;
964 /* Close this object */
966 close.handle = bo_gem->gem_handle;
967 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
969 DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
970 bo_gem->gem_handle, bo_gem->name, strerror(errno));
972 free(bo_gem->aub_annotations);
977 drm_intel_gem_bo_mark_mmaps_incoherent(drm_intel_bo *bo)
980 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
982 if (bo_gem->mem_virtual)
983 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->mem_virtual, bo->size);
985 if (bo_gem->gtt_virtual)
986 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->gtt_virtual, bo->size);
990 /** Frees all cached buffers significantly older than @time. */
992 drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
996 if (bufmgr_gem->time == time)
999 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
1000 struct drm_intel_gem_bo_bucket *bucket =
1001 &bufmgr_gem->cache_bucket[i];
1003 while (!DRMLISTEMPTY(&bucket->head)) {
1004 drm_intel_bo_gem *bo_gem;
1006 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1007 bucket->head.next, head);
1008 if (time - bo_gem->free_time <= 1)
1011 DRMLISTDEL(&bo_gem->head);
1013 drm_intel_gem_bo_free(&bo_gem->bo);
1017 bufmgr_gem->time = time;
1020 static void drm_intel_gem_bo_purge_vma_cache(drm_intel_bufmgr_gem *bufmgr_gem)
1024 DBG("%s: cached=%d, open=%d, limit=%d\n", __FUNCTION__,
1025 bufmgr_gem->vma_count, bufmgr_gem->vma_open, bufmgr_gem->vma_max);
1027 if (bufmgr_gem->vma_max < 0)
1030 /* We may need to evict a few entries in order to create new mmaps */
1031 limit = bufmgr_gem->vma_max - 2*bufmgr_gem->vma_open;
1035 while (bufmgr_gem->vma_count > limit) {
1036 drm_intel_bo_gem *bo_gem;
1038 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1039 bufmgr_gem->vma_cache.next,
1041 assert(bo_gem->map_count == 0);
1042 DRMLISTDELINIT(&bo_gem->vma_list);
1044 if (bo_gem->mem_virtual) {
1045 munmap(bo_gem->mem_virtual, bo_gem->bo.size);
1046 bo_gem->mem_virtual = NULL;
1047 bufmgr_gem->vma_count--;
1049 if (bo_gem->gtt_virtual) {
1050 munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
1051 bo_gem->gtt_virtual = NULL;
1052 bufmgr_gem->vma_count--;
1057 static void drm_intel_gem_bo_close_vma(drm_intel_bufmgr_gem *bufmgr_gem,
1058 drm_intel_bo_gem *bo_gem)
1060 bufmgr_gem->vma_open--;
1061 DRMLISTADDTAIL(&bo_gem->vma_list, &bufmgr_gem->vma_cache);
1062 if (bo_gem->mem_virtual)
1063 bufmgr_gem->vma_count++;
1064 if (bo_gem->gtt_virtual)
1065 bufmgr_gem->vma_count++;
1066 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
1069 static void drm_intel_gem_bo_open_vma(drm_intel_bufmgr_gem *bufmgr_gem,
1070 drm_intel_bo_gem *bo_gem)
1072 bufmgr_gem->vma_open++;
1073 DRMLISTDEL(&bo_gem->vma_list);
1074 if (bo_gem->mem_virtual)
1075 bufmgr_gem->vma_count--;
1076 if (bo_gem->gtt_virtual)
1077 bufmgr_gem->vma_count--;
1078 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
1082 drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
1084 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1085 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1086 struct drm_intel_gem_bo_bucket *bucket;
1089 /* Unreference all the target buffers */
1090 for (i = 0; i < bo_gem->reloc_count; i++) {
1091 if (bo_gem->reloc_target_info[i].bo != bo) {
1092 drm_intel_gem_bo_unreference_locked_timed(bo_gem->
1093 reloc_target_info[i].bo,
1097 bo_gem->reloc_count = 0;
1098 bo_gem->used_as_reloc_target = false;
1100 DBG("bo_unreference final: %d (%s)\n",
1101 bo_gem->gem_handle, bo_gem->name);
1103 /* release memory associated with this object */
1104 if (bo_gem->reloc_target_info) {
1105 free(bo_gem->reloc_target_info);
1106 bo_gem->reloc_target_info = NULL;
1108 if (bo_gem->relocs) {
1109 free(bo_gem->relocs);
1110 bo_gem->relocs = NULL;
1113 /* Clear any left-over mappings */
1114 if (bo_gem->map_count) {
1115 DBG("bo freed with non-zero map-count %d\n", bo_gem->map_count);
1116 bo_gem->map_count = 0;
1117 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1118 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1121 DRMLISTDEL(&bo_gem->name_list);
1123 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
1124 /* Put the buffer into our internal cache for reuse if we can. */
1125 if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
1126 drm_intel_gem_bo_madvise_internal(bufmgr_gem, bo_gem,
1127 I915_MADV_DONTNEED)) {
1128 bo_gem->free_time = time;
1130 bo_gem->name = NULL;
1131 bo_gem->validate_index = -1;
1133 DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
1135 drm_intel_gem_bo_free(bo);
1139 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
1142 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1144 assert(atomic_read(&bo_gem->refcount) > 0);
1145 if (atomic_dec_and_test(&bo_gem->refcount))
1146 drm_intel_gem_bo_unreference_final(bo, time);
1149 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
1151 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1153 assert(atomic_read(&bo_gem->refcount) > 0);
1154 if (atomic_dec_and_test(&bo_gem->refcount)) {
1155 drm_intel_bufmgr_gem *bufmgr_gem =
1156 (drm_intel_bufmgr_gem *) bo->bufmgr;
1157 struct timespec time;
1159 clock_gettime(CLOCK_MONOTONIC, &time);
1161 pthread_mutex_lock(&bufmgr_gem->lock);
1162 drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
1163 drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
1164 pthread_mutex_unlock(&bufmgr_gem->lock);
1168 static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
1170 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1171 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1172 struct drm_i915_gem_set_domain set_domain;
1175 pthread_mutex_lock(&bufmgr_gem->lock);
1177 if (bo_gem->map_count++ == 0)
1178 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
1180 if (!bo_gem->mem_virtual) {
1181 struct drm_i915_gem_mmap mmap_arg;
1183 DBG("bo_map: %d (%s), map_count=%d\n",
1184 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
1187 mmap_arg.handle = bo_gem->gem_handle;
1188 mmap_arg.offset = 0;
1189 mmap_arg.size = bo->size;
1190 ret = drmIoctl(bufmgr_gem->fd,
1191 DRM_IOCTL_I915_GEM_MMAP,
1195 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1196 __FILE__, __LINE__, bo_gem->gem_handle,
1197 bo_gem->name, strerror(errno));
1198 if (--bo_gem->map_count == 0)
1199 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1200 pthread_mutex_unlock(&bufmgr_gem->lock);
1203 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
1204 bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
1206 DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1207 bo_gem->mem_virtual);
1208 bo->virtual = bo_gem->mem_virtual;
1210 VG_CLEAR(set_domain);
1211 set_domain.handle = bo_gem->gem_handle;
1212 set_domain.read_domains = I915_GEM_DOMAIN_CPU;
1214 set_domain.write_domain = I915_GEM_DOMAIN_CPU;
1216 set_domain.write_domain = 0;
1217 ret = drmIoctl(bufmgr_gem->fd,
1218 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1221 DBG("%s:%d: Error setting to CPU domain %d: %s\n",
1222 __FILE__, __LINE__, bo_gem->gem_handle,
1227 bo_gem->mapped_cpu_write = true;
1229 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1230 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->mem_virtual, bo->size));
1231 pthread_mutex_unlock(&bufmgr_gem->lock);
1237 map_gtt(drm_intel_bo *bo)
1239 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1240 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1243 if (bo_gem->map_count++ == 0)
1244 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
1246 /* Get a mapping of the buffer if we haven't before. */
1247 if (bo_gem->gtt_virtual == NULL) {
1248 struct drm_i915_gem_mmap_gtt mmap_arg;
1250 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
1251 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
1254 mmap_arg.handle = bo_gem->gem_handle;
1256 /* Get the fake offset back... */
1257 ret = drmIoctl(bufmgr_gem->fd,
1258 DRM_IOCTL_I915_GEM_MMAP_GTT,
1262 DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
1264 bo_gem->gem_handle, bo_gem->name,
1266 if (--bo_gem->map_count == 0)
1267 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1272 bo_gem->gtt_virtual = mmap(0, bo->size, PROT_READ | PROT_WRITE,
1273 MAP_SHARED, bufmgr_gem->fd,
1275 if (bo_gem->gtt_virtual == MAP_FAILED) {
1276 bo_gem->gtt_virtual = NULL;
1278 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1280 bo_gem->gem_handle, bo_gem->name,
1282 if (--bo_gem->map_count == 0)
1283 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1288 bo->virtual = bo_gem->gtt_virtual;
1290 DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1291 bo_gem->gtt_virtual);
1296 int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
1298 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1299 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1300 struct drm_i915_gem_set_domain set_domain;
1303 pthread_mutex_lock(&bufmgr_gem->lock);
1307 pthread_mutex_unlock(&bufmgr_gem->lock);
1311 /* Now move it to the GTT domain so that the GPU and CPU
1312 * caches are flushed and the GPU isn't actively using the
1315 * The pagefault handler does this domain change for us when
1316 * it has unbound the BO from the GTT, but it's up to us to
1317 * tell it when we're about to use things if we had done
1318 * rendering and it still happens to be bound to the GTT.
1320 VG_CLEAR(set_domain);
1321 set_domain.handle = bo_gem->gem_handle;
1322 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1323 set_domain.write_domain = I915_GEM_DOMAIN_GTT;
1324 ret = drmIoctl(bufmgr_gem->fd,
1325 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1328 DBG("%s:%d: Error setting domain %d: %s\n",
1329 __FILE__, __LINE__, bo_gem->gem_handle,
1333 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1334 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->gtt_virtual, bo->size));
1335 pthread_mutex_unlock(&bufmgr_gem->lock);
1341 * Performs a mapping of the buffer object like the normal GTT
1342 * mapping, but avoids waiting for the GPU to be done reading from or
1343 * rendering to the buffer.
1345 * This is used in the implementation of GL_ARB_map_buffer_range: The
1346 * user asks to create a buffer, then does a mapping, fills some
1347 * space, runs a drawing command, then asks to map it again without
1348 * synchronizing because it guarantees that it won't write over the
1349 * data that the GPU is busy using (or, more specifically, that if it
1350 * does write over the data, it acknowledges that rendering is
1354 int drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo)
1356 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1357 #ifdef HAVE_VALGRIND
1358 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1362 /* If the CPU cache isn't coherent with the GTT, then use a
1363 * regular synchronized mapping. The problem is that we don't
1364 * track where the buffer was last used on the CPU side in
1365 * terms of drm_intel_bo_map vs drm_intel_gem_bo_map_gtt, so
1366 * we would potentially corrupt the buffer even when the user
1367 * does reasonable things.
1369 if (!bufmgr_gem->has_llc)
1370 return drm_intel_gem_bo_map_gtt(bo);
1372 pthread_mutex_lock(&bufmgr_gem->lock);
1376 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1377 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->gtt_virtual, bo->size));
1380 pthread_mutex_unlock(&bufmgr_gem->lock);
1385 static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
1387 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1388 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1394 pthread_mutex_lock(&bufmgr_gem->lock);
1396 if (bo_gem->map_count <= 0) {
1397 DBG("attempted to unmap an unmapped bo\n");
1398 pthread_mutex_unlock(&bufmgr_gem->lock);
1399 /* Preserve the old behaviour of just treating this as a
1400 * no-op rather than reporting the error.
1405 if (bo_gem->mapped_cpu_write) {
1406 struct drm_i915_gem_sw_finish sw_finish;
1408 /* Cause a flush to happen if the buffer's pinned for
1409 * scanout, so the results show up in a timely manner.
1410 * Unlike GTT set domains, this only does work if the
1411 * buffer should be scanout-related.
1413 VG_CLEAR(sw_finish);
1414 sw_finish.handle = bo_gem->gem_handle;
1415 ret = drmIoctl(bufmgr_gem->fd,
1416 DRM_IOCTL_I915_GEM_SW_FINISH,
1418 ret = ret == -1 ? -errno : 0;
1420 bo_gem->mapped_cpu_write = false;
1423 /* We need to unmap after every innovation as we cannot track
1424 * an open vma for every bo as that will exhaasut the system
1425 * limits and cause later failures.
1427 if (--bo_gem->map_count == 0) {
1428 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1429 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1432 pthread_mutex_unlock(&bufmgr_gem->lock);
1437 int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
1439 return drm_intel_gem_bo_unmap(bo);
1443 drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
1444 unsigned long size, const void *data)
1446 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1447 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1448 struct drm_i915_gem_pwrite pwrite;
1452 pwrite.handle = bo_gem->gem_handle;
1453 pwrite.offset = offset;
1455 pwrite.data_ptr = (uint64_t) (uintptr_t) data;
1456 ret = drmIoctl(bufmgr_gem->fd,
1457 DRM_IOCTL_I915_GEM_PWRITE,
1461 DBG("%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
1462 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1463 (int)size, strerror(errno));
1470 drm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
1472 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1473 struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id;
1476 VG_CLEAR(get_pipe_from_crtc_id);
1477 get_pipe_from_crtc_id.crtc_id = crtc_id;
1478 ret = drmIoctl(bufmgr_gem->fd,
1479 DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID,
1480 &get_pipe_from_crtc_id);
1482 /* We return -1 here to signal that we don't
1483 * know which pipe is associated with this crtc.
1484 * This lets the caller know that this information
1485 * isn't available; using the wrong pipe for
1486 * vblank waiting can cause the chipset to lock up
1491 return get_pipe_from_crtc_id.pipe;
1495 drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
1496 unsigned long size, void *data)
1498 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1499 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1500 struct drm_i915_gem_pread pread;
1504 pread.handle = bo_gem->gem_handle;
1505 pread.offset = offset;
1507 pread.data_ptr = (uint64_t) (uintptr_t) data;
1508 ret = drmIoctl(bufmgr_gem->fd,
1509 DRM_IOCTL_I915_GEM_PREAD,
1513 DBG("%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
1514 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1515 (int)size, strerror(errno));
1521 /** Waits for all GPU rendering with the object to have completed. */
1523 drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo)
1525 drm_intel_gem_bo_start_gtt_access(bo, 1);
1529 * Waits on a BO for the given amount of time.
1531 * @bo: buffer object to wait for
1532 * @timeout_ns: amount of time to wait in nanoseconds.
1533 * If value is less than 0, an infinite wait will occur.
1535 * Returns 0 if the wait was successful ie. the last batch referencing the
1536 * object has completed within the allotted time. Otherwise some negative return
1537 * value describes the error. Of particular interest is -ETIME when the wait has
1538 * failed to yield the desired result.
1540 * Similar to drm_intel_gem_bo_wait_rendering except a timeout parameter allows
1541 * the operation to give up after a certain amount of time. Another subtle
1542 * difference is the internal locking semantics are different (this variant does
1543 * not hold the lock for the duration of the wait). This makes the wait subject
1544 * to a larger userspace race window.
1546 * The implementation shall wait until the object is no longer actively
1547 * referenced within a batch buffer at the time of the call. The wait will
1548 * not guarantee that the buffer is re-issued via another thread, or an flinked
1549 * handle. Userspace must make sure this race does not occur if such precision
1552 int drm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns)
1554 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1555 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1556 struct drm_i915_gem_wait wait;
1559 if (!bufmgr_gem->has_wait_timeout) {
1560 DBG("%s:%d: Timed wait is not supported. Falling back to "
1561 "infinite wait\n", __FILE__, __LINE__);
1563 drm_intel_gem_bo_wait_rendering(bo);
1566 return drm_intel_gem_bo_busy(bo) ? -ETIME : 0;
1570 wait.bo_handle = bo_gem->gem_handle;
1571 wait.timeout_ns = timeout_ns;
1573 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
1581 * Sets the object to the GTT read and possibly write domain, used by the X
1582 * 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt().
1584 * In combination with drm_intel_gem_bo_pin() and manual fence management, we
1585 * can do tiled pixmaps this way.
1588 drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
1590 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1591 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1592 struct drm_i915_gem_set_domain set_domain;
1595 VG_CLEAR(set_domain);
1596 set_domain.handle = bo_gem->gem_handle;
1597 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1598 set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0;
1599 ret = drmIoctl(bufmgr_gem->fd,
1600 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1603 DBG("%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
1604 __FILE__, __LINE__, bo_gem->gem_handle,
1605 set_domain.read_domains, set_domain.write_domain,
1611 drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
1613 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1616 free(bufmgr_gem->exec2_objects);
1617 free(bufmgr_gem->exec_objects);
1618 free(bufmgr_gem->exec_bos);
1619 free(bufmgr_gem->aub_filename);
1621 pthread_mutex_destroy(&bufmgr_gem->lock);
1623 /* Free any cached buffer objects we were going to reuse */
1624 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
1625 struct drm_intel_gem_bo_bucket *bucket =
1626 &bufmgr_gem->cache_bucket[i];
1627 drm_intel_bo_gem *bo_gem;
1629 while (!DRMLISTEMPTY(&bucket->head)) {
1630 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1631 bucket->head.next, head);
1632 DRMLISTDEL(&bo_gem->head);
1634 drm_intel_gem_bo_free(&bo_gem->bo);
1642 * Adds the target buffer to the validation list and adds the relocation
1643 * to the reloc_buffer's relocation list.
1645 * The relocation entry at the given offset must already contain the
1646 * precomputed relocation value, because the kernel will optimize out
1647 * the relocation entry write when the buffer hasn't moved from the
1648 * last known offset in target_bo.
1651 do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1652 drm_intel_bo *target_bo, uint32_t target_offset,
1653 uint32_t read_domains, uint32_t write_domain,
1656 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1657 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1658 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
1659 bool fenced_command;
1661 if (bo_gem->has_error)
1664 if (target_bo_gem->has_error) {
1665 bo_gem->has_error = true;
1669 /* We never use HW fences for rendering on 965+ */
1670 if (bufmgr_gem->gen >= 4)
1673 fenced_command = need_fence;
1674 if (target_bo_gem->tiling_mode == I915_TILING_NONE)
1677 /* Create a new relocation list if needed */
1678 if (bo_gem->relocs == NULL && drm_intel_setup_reloc_list(bo))
1681 /* Check overflow */
1682 assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
1685 assert(offset <= bo->size - 4);
1686 assert((write_domain & (write_domain - 1)) == 0);
1688 /* Make sure that we're not adding a reloc to something whose size has
1689 * already been accounted for.
1691 assert(!bo_gem->used_as_reloc_target);
1692 if (target_bo_gem != bo_gem) {
1693 target_bo_gem->used_as_reloc_target = true;
1694 bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
1696 /* An object needing a fence is a tiled buffer, so it won't have
1697 * relocs to other buffers.
1700 target_bo_gem->reloc_tree_fences = 1;
1701 bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
1703 bo_gem->relocs[bo_gem->reloc_count].offset = offset;
1704 bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
1705 bo_gem->relocs[bo_gem->reloc_count].target_handle =
1706 target_bo_gem->gem_handle;
1707 bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
1708 bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
1709 bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset;
1711 bo_gem->reloc_target_info[bo_gem->reloc_count].bo = target_bo;
1712 if (target_bo != bo)
1713 drm_intel_gem_bo_reference(target_bo);
1715 bo_gem->reloc_target_info[bo_gem->reloc_count].flags =
1716 DRM_INTEL_RELOC_FENCE;
1718 bo_gem->reloc_target_info[bo_gem->reloc_count].flags = 0;
1720 bo_gem->reloc_count++;
1726 drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1727 drm_intel_bo *target_bo, uint32_t target_offset,
1728 uint32_t read_domains, uint32_t write_domain)
1730 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
1732 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
1733 read_domains, write_domain,
1734 !bufmgr_gem->fenced_relocs);
1738 drm_intel_gem_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
1739 drm_intel_bo *target_bo,
1740 uint32_t target_offset,
1741 uint32_t read_domains, uint32_t write_domain)
1743 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
1744 read_domains, write_domain, true);
1748 drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo)
1750 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1752 return bo_gem->reloc_count;
1756 * Removes existing relocation entries in the BO after "start".
1758 * This allows a user to avoid a two-step process for state setup with
1759 * counting up all the buffer objects and doing a
1760 * drm_intel_bufmgr_check_aperture_space() before emitting any of the
1761 * relocations for the state setup. Instead, save the state of the
1762 * batchbuffer including drm_intel_gem_get_reloc_count(), emit all the
1763 * state, and then check if it still fits in the aperture.
1765 * Any further drm_intel_bufmgr_check_aperture_space() queries
1766 * involving this buffer in the tree are undefined after this call.
1769 drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start)
1771 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1773 struct timespec time;
1775 clock_gettime(CLOCK_MONOTONIC, &time);
1777 assert(bo_gem->reloc_count >= start);
1778 /* Unreference the cleared target buffers */
1779 for (i = start; i < bo_gem->reloc_count; i++) {
1780 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) bo_gem->reloc_target_info[i].bo;
1781 if (&target_bo_gem->bo != bo) {
1782 bo_gem->reloc_tree_fences -= target_bo_gem->reloc_tree_fences;
1783 drm_intel_gem_bo_unreference_locked_timed(&target_bo_gem->bo,
1787 bo_gem->reloc_count = start;
1791 * Walk the tree of relocations rooted at BO and accumulate the list of
1792 * validations to be performed and update the relocation buffers with
1793 * index values into the validation list.
1796 drm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
1798 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1801 if (bo_gem->relocs == NULL)
1804 for (i = 0; i < bo_gem->reloc_count; i++) {
1805 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
1807 if (target_bo == bo)
1810 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1812 /* Continue walking the tree depth-first. */
1813 drm_intel_gem_bo_process_reloc(target_bo);
1815 /* Add the target to the validate list */
1816 drm_intel_add_validate_buffer(target_bo);
1821 drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo)
1823 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1826 if (bo_gem->relocs == NULL)
1829 for (i = 0; i < bo_gem->reloc_count; i++) {
1830 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
1833 if (target_bo == bo)
1836 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1838 /* Continue walking the tree depth-first. */
1839 drm_intel_gem_bo_process_reloc2(target_bo);
1841 need_fence = (bo_gem->reloc_target_info[i].flags &
1842 DRM_INTEL_RELOC_FENCE);
1844 /* Add the target to the validate list */
1845 drm_intel_add_validate_buffer2(target_bo, need_fence);
1851 drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem)
1855 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1856 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1857 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1859 /* Update the buffer offset */
1860 if (bufmgr_gem->exec_objects[i].offset != bo->offset) {
1861 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
1862 bo_gem->gem_handle, bo_gem->name, bo->offset,
1863 (unsigned long long)bufmgr_gem->exec_objects[i].
1865 bo->offset = bufmgr_gem->exec_objects[i].offset;
1871 drm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem)
1875 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1876 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1877 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1879 /* Update the buffer offset */
1880 if (bufmgr_gem->exec2_objects[i].offset != bo->offset) {
1881 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
1882 bo_gem->gem_handle, bo_gem->name, bo->offset,
1883 (unsigned long long)bufmgr_gem->exec2_objects[i].offset);
1884 bo->offset = bufmgr_gem->exec2_objects[i].offset;
1890 aub_out(drm_intel_bufmgr_gem *bufmgr_gem, uint32_t data)
1892 fwrite(&data, 1, 4, bufmgr_gem->aub_file);
1896 aub_out_data(drm_intel_bufmgr_gem *bufmgr_gem, void *data, size_t size)
1898 fwrite(data, 1, size, bufmgr_gem->aub_file);
1902 aub_write_bo_data(drm_intel_bo *bo, uint32_t offset, uint32_t size)
1904 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1905 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1909 data = malloc(bo->size);
1910 drm_intel_bo_get_subdata(bo, offset, size, data);
1912 /* Easy mode: write out bo with no relocations */
1913 if (!bo_gem->reloc_count) {
1914 aub_out_data(bufmgr_gem, data, size);
1919 /* Otherwise, handle the relocations while writing. */
1920 for (i = 0; i < size / 4; i++) {
1922 for (r = 0; r < bo_gem->reloc_count; r++) {
1923 struct drm_i915_gem_relocation_entry *reloc;
1924 drm_intel_reloc_target *info;
1926 reloc = &bo_gem->relocs[r];
1927 info = &bo_gem->reloc_target_info[r];
1929 if (reloc->offset == offset + i * 4) {
1930 drm_intel_bo_gem *target_gem;
1933 target_gem = (drm_intel_bo_gem *)info->bo;
1936 val += target_gem->aub_offset;
1938 aub_out(bufmgr_gem, val);
1943 if (r == bo_gem->reloc_count) {
1944 /* no relocation, just the data */
1945 aub_out(bufmgr_gem, data[i]);
1953 aub_bo_get_address(drm_intel_bo *bo)
1955 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1956 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1958 /* Give the object a graphics address in the AUB file. We
1959 * don't just use the GEM object address because we do AUB
1960 * dumping before execution -- we want to successfully log
1961 * when the hardware might hang, and we might even want to aub
1962 * capture for a driver trying to execute on a different
1963 * generation of hardware by disabling the actual kernel exec
1966 bo_gem->aub_offset = bufmgr_gem->aub_offset;
1967 bufmgr_gem->aub_offset += bo->size;
1968 /* XXX: Handle aperture overflow. */
1969 assert(bufmgr_gem->aub_offset < 256 * 1024 * 1024);
1973 aub_write_trace_block(drm_intel_bo *bo, uint32_t type, uint32_t subtype,
1974 uint32_t offset, uint32_t size)
1976 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1977 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1980 CMD_AUB_TRACE_HEADER_BLOCK |
1981 ((bufmgr_gem->gen >= 8 ? 6 : 5) - 2));
1983 AUB_TRACE_MEMTYPE_GTT | type | AUB_TRACE_OP_DATA_WRITE);
1984 aub_out(bufmgr_gem, subtype);
1985 aub_out(bufmgr_gem, bo_gem->aub_offset + offset);
1986 aub_out(bufmgr_gem, size);
1987 if (bufmgr_gem->gen >= 8)
1988 aub_out(bufmgr_gem, 0);
1989 aub_write_bo_data(bo, offset, size);
1993 * Break up large objects into multiple writes. Otherwise a 128kb VBO
1994 * would overflow the 16 bits of size field in the packet header and
1995 * everything goes badly after that.
1998 aub_write_large_trace_block(drm_intel_bo *bo, uint32_t type, uint32_t subtype,
1999 uint32_t offset, uint32_t size)
2001 uint32_t block_size;
2002 uint32_t sub_offset;
2004 for (sub_offset = 0; sub_offset < size; sub_offset += block_size) {
2005 block_size = size - sub_offset;
2007 if (block_size > 8 * 4096)
2008 block_size = 8 * 4096;
2010 aub_write_trace_block(bo, type, subtype, offset + sub_offset,
2016 aub_write_bo(drm_intel_bo *bo)
2018 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2019 uint32_t offset = 0;
2022 aub_bo_get_address(bo);
2024 /* Write out each annotated section separately. */
2025 for (i = 0; i < bo_gem->aub_annotation_count; ++i) {
2026 drm_intel_aub_annotation *annotation =
2027 &bo_gem->aub_annotations[i];
2028 uint32_t ending_offset = annotation->ending_offset;
2029 if (ending_offset > bo->size)
2030 ending_offset = bo->size;
2031 if (ending_offset > offset) {
2032 aub_write_large_trace_block(bo, annotation->type,
2033 annotation->subtype,
2035 ending_offset - offset);
2036 offset = ending_offset;
2040 /* Write out any remaining unannotated data */
2041 if (offset < bo->size) {
2042 aub_write_large_trace_block(bo, AUB_TRACE_TYPE_NOTYPE, 0,
2043 offset, bo->size - offset);
2048 * Make a ringbuffer on fly and dump it
2051 aub_build_dump_ringbuffer(drm_intel_bufmgr_gem *bufmgr_gem,
2052 uint32_t batch_buffer, int ring_flag)
2054 uint32_t ringbuffer[4096];
2055 int ring = AUB_TRACE_TYPE_RING_PRB0; /* The default ring */
2058 if (ring_flag == I915_EXEC_BSD)
2059 ring = AUB_TRACE_TYPE_RING_PRB1;
2060 else if (ring_flag == I915_EXEC_BLT)
2061 ring = AUB_TRACE_TYPE_RING_PRB2;
2063 /* Make a ring buffer to execute our batchbuffer. */
2064 memset(ringbuffer, 0, sizeof(ringbuffer));
2065 if (bufmgr_gem->gen >= 8) {
2066 ringbuffer[ring_count++] = AUB_MI_BATCH_BUFFER_START | (3 - 2);
2067 ringbuffer[ring_count++] = batch_buffer;
2068 ringbuffer[ring_count++] = 0;
2070 ringbuffer[ring_count++] = AUB_MI_BATCH_BUFFER_START;
2071 ringbuffer[ring_count++] = batch_buffer;
2074 /* Write out the ring. This appears to trigger execution of
2075 * the ring in the simulator.
2078 CMD_AUB_TRACE_HEADER_BLOCK |
2079 ((bufmgr_gem->gen >= 8 ? 6 : 5) - 2));
2081 AUB_TRACE_MEMTYPE_GTT | ring | AUB_TRACE_OP_COMMAND_WRITE);
2082 aub_out(bufmgr_gem, 0); /* general/surface subtype */
2083 aub_out(bufmgr_gem, bufmgr_gem->aub_offset);
2084 aub_out(bufmgr_gem, ring_count * 4);
2085 if (bufmgr_gem->gen >= 8)
2086 aub_out(bufmgr_gem, 0);
2088 /* FIXME: Need some flush operations here? */
2089 aub_out_data(bufmgr_gem, ringbuffer, ring_count * 4);
2091 /* Update offset pointer */
2092 bufmgr_gem->aub_offset += 4096;
2096 drm_intel_gem_bo_aub_dump_bmp(drm_intel_bo *bo,
2097 int x1, int y1, int width, int height,
2098 enum aub_dump_bmp_format format,
2099 int pitch, int offset)
2101 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2102 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
2106 case AUB_DUMP_BMP_FORMAT_8BIT:
2109 case AUB_DUMP_BMP_FORMAT_ARGB_4444:
2112 case AUB_DUMP_BMP_FORMAT_ARGB_0888:
2113 case AUB_DUMP_BMP_FORMAT_ARGB_8888:
2117 printf("Unknown AUB dump format %d\n", format);
2121 if (!bufmgr_gem->aub_file)
2124 aub_out(bufmgr_gem, CMD_AUB_DUMP_BMP | 4);
2125 aub_out(bufmgr_gem, (y1 << 16) | x1);
2130 aub_out(bufmgr_gem, (height << 16) | width);
2131 aub_out(bufmgr_gem, bo_gem->aub_offset + offset);
2133 ((bo_gem->tiling_mode != I915_TILING_NONE) ? (1 << 2) : 0) |
2134 ((bo_gem->tiling_mode == I915_TILING_Y) ? (1 << 3) : 0));
2138 aub_exec(drm_intel_bo *bo, int ring_flag, int used)
2140 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2141 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2143 bool batch_buffer_needs_annotations;
2145 if (!bufmgr_gem->aub_file)
2148 /* If batch buffer is not annotated, annotate it the best we
2151 batch_buffer_needs_annotations = bo_gem->aub_annotation_count == 0;
2152 if (batch_buffer_needs_annotations) {
2153 drm_intel_aub_annotation annotations[2] = {
2154 { AUB_TRACE_TYPE_BATCH, 0, used },
2155 { AUB_TRACE_TYPE_NOTYPE, 0, bo->size }
2157 drm_intel_bufmgr_gem_set_aub_annotations(bo, annotations, 2);
2160 /* Write out all buffers to AUB memory */
2161 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2162 aub_write_bo(bufmgr_gem->exec_bos[i]);
2165 /* Remove any annotations we added */
2166 if (batch_buffer_needs_annotations)
2167 drm_intel_bufmgr_gem_set_aub_annotations(bo, NULL, 0);
2169 /* Dump ring buffer */
2170 aub_build_dump_ringbuffer(bufmgr_gem, bo_gem->aub_offset, ring_flag);
2172 fflush(bufmgr_gem->aub_file);
2175 * One frame has been dumped. So reset the aub_offset for the next frame.
2177 * FIXME: Can we do this?
2179 bufmgr_gem->aub_offset = 0x10000;
2183 drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
2184 drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
2186 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2187 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2188 struct drm_i915_gem_execbuffer execbuf;
2191 if (bo_gem->has_error)
2194 pthread_mutex_lock(&bufmgr_gem->lock);
2195 /* Update indices and set up the validate list. */
2196 drm_intel_gem_bo_process_reloc(bo);
2198 /* Add the batch buffer to the validation list. There are no
2199 * relocations pointing to it.
2201 drm_intel_add_validate_buffer(bo);
2204 execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects;
2205 execbuf.buffer_count = bufmgr_gem->exec_count;
2206 execbuf.batch_start_offset = 0;
2207 execbuf.batch_len = used;
2208 execbuf.cliprects_ptr = (uintptr_t) cliprects;
2209 execbuf.num_cliprects = num_cliprects;
2213 ret = drmIoctl(bufmgr_gem->fd,
2214 DRM_IOCTL_I915_GEM_EXECBUFFER,
2218 if (errno == ENOSPC) {
2219 DBG("Execbuffer fails to pin. "
2220 "Estimate: %u. Actual: %u. Available: %u\n",
2221 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
2224 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
2227 (unsigned int)bufmgr_gem->gtt_size);
2230 drm_intel_update_buffer_offsets(bufmgr_gem);
2232 if (bufmgr_gem->bufmgr.debug)
2233 drm_intel_gem_dump_validation_list(bufmgr_gem);
2235 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2236 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
2237 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2239 bo_gem->idle = false;
2241 /* Disconnect the buffer from the validate list */
2242 bo_gem->validate_index = -1;
2243 bufmgr_gem->exec_bos[i] = NULL;
2245 bufmgr_gem->exec_count = 0;
2246 pthread_mutex_unlock(&bufmgr_gem->lock);
2252 do_exec2(drm_intel_bo *bo, int used, drm_intel_context *ctx,
2253 drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
2256 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
2257 struct drm_i915_gem_execbuffer2 execbuf;
2261 switch (flags & 0x7) {
2265 if (!bufmgr_gem->has_blt)
2269 if (!bufmgr_gem->has_bsd)
2272 case I915_EXEC_VEBOX:
2273 if (!bufmgr_gem->has_vebox)
2276 case I915_EXEC_RENDER:
2277 case I915_EXEC_DEFAULT:
2281 pthread_mutex_lock(&bufmgr_gem->lock);
2282 /* Update indices and set up the validate list. */
2283 drm_intel_gem_bo_process_reloc2(bo);
2285 /* Add the batch buffer to the validation list. There are no relocations
2288 drm_intel_add_validate_buffer2(bo, 0);
2291 execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec2_objects;
2292 execbuf.buffer_count = bufmgr_gem->exec_count;
2293 execbuf.batch_start_offset = 0;
2294 execbuf.batch_len = used;
2295 execbuf.cliprects_ptr = (uintptr_t)cliprects;
2296 execbuf.num_cliprects = num_cliprects;
2299 execbuf.flags = flags;
2301 i915_execbuffer2_set_context_id(execbuf, 0);
2303 i915_execbuffer2_set_context_id(execbuf, ctx->ctx_id);
2306 aub_exec(bo, flags, used);
2308 if (bufmgr_gem->no_exec)
2309 goto skip_execution;
2311 ret = drmIoctl(bufmgr_gem->fd,
2312 DRM_IOCTL_I915_GEM_EXECBUFFER2,
2316 if (ret == -ENOSPC) {
2317 DBG("Execbuffer fails to pin. "
2318 "Estimate: %u. Actual: %u. Available: %u\n",
2319 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
2320 bufmgr_gem->exec_count),
2321 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
2322 bufmgr_gem->exec_count),
2323 (unsigned int) bufmgr_gem->gtt_size);
2326 drm_intel_update_buffer_offsets2(bufmgr_gem);
2329 if (bufmgr_gem->bufmgr.debug)
2330 drm_intel_gem_dump_validation_list(bufmgr_gem);
2332 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2333 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
2334 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
2336 bo_gem->idle = false;
2338 /* Disconnect the buffer from the validate list */
2339 bo_gem->validate_index = -1;
2340 bufmgr_gem->exec_bos[i] = NULL;
2342 bufmgr_gem->exec_count = 0;
2343 pthread_mutex_unlock(&bufmgr_gem->lock);
2349 drm_intel_gem_bo_exec2(drm_intel_bo *bo, int used,
2350 drm_clip_rect_t *cliprects, int num_cliprects,
2353 return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4,
2358 drm_intel_gem_bo_mrb_exec2(drm_intel_bo *bo, int used,
2359 drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
2362 return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4,
2367 drm_intel_gem_bo_context_exec(drm_intel_bo *bo, drm_intel_context *ctx,
2368 int used, unsigned int flags)
2370 return do_exec2(bo, used, ctx, NULL, 0, 0, flags);
2374 drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
2376 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2377 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2378 struct drm_i915_gem_pin pin;
2382 pin.handle = bo_gem->gem_handle;
2383 pin.alignment = alignment;
2385 ret = drmIoctl(bufmgr_gem->fd,
2386 DRM_IOCTL_I915_GEM_PIN,
2391 bo->offset = pin.offset;
2396 drm_intel_gem_bo_unpin(drm_intel_bo *bo)
2398 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2399 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2400 struct drm_i915_gem_unpin unpin;
2404 unpin.handle = bo_gem->gem_handle;
2406 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
2414 drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
2415 uint32_t tiling_mode,
2418 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2419 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2420 struct drm_i915_gem_set_tiling set_tiling;
2423 if (bo_gem->global_name == 0 &&
2424 tiling_mode == bo_gem->tiling_mode &&
2425 stride == bo_gem->stride)
2428 memset(&set_tiling, 0, sizeof(set_tiling));
2430 /* set_tiling is slightly broken and overwrites the
2431 * input on the error path, so we have to open code
2434 set_tiling.handle = bo_gem->gem_handle;
2435 set_tiling.tiling_mode = tiling_mode;
2436 set_tiling.stride = stride;
2438 ret = ioctl(bufmgr_gem->fd,
2439 DRM_IOCTL_I915_GEM_SET_TILING,
2441 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
2445 bo_gem->tiling_mode = set_tiling.tiling_mode;
2446 bo_gem->swizzle_mode = set_tiling.swizzle_mode;
2447 bo_gem->stride = set_tiling.stride;
2452 drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
2455 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2456 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2459 /* Linear buffers have no stride. By ensuring that we only ever use
2460 * stride 0 with linear buffers, we simplify our code.
2462 if (*tiling_mode == I915_TILING_NONE)
2465 ret = drm_intel_gem_bo_set_tiling_internal(bo, *tiling_mode, stride);
2467 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
2469 *tiling_mode = bo_gem->tiling_mode;
2474 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
2475 uint32_t * swizzle_mode)
2477 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2479 *tiling_mode = bo_gem->tiling_mode;
2480 *swizzle_mode = bo_gem->swizzle_mode;
2485 drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int size)
2487 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
2490 drm_intel_bo_gem *bo_gem;
2491 struct drm_i915_gem_get_tiling get_tiling;
2492 drmMMListHead *list;
2494 ret = drmPrimeFDToHandle(bufmgr_gem->fd, prime_fd, &handle);
2497 * See if the kernel has already returned this buffer to us. Just as
2498 * for named buffers, we must not create two bo's pointing at the same
2501 for (list = bufmgr_gem->named.next;
2502 list != &bufmgr_gem->named;
2503 list = list->next) {
2504 bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
2505 if (bo_gem->gem_handle == handle) {
2506 drm_intel_gem_bo_reference(&bo_gem->bo);
2512 fprintf(stderr,"ret is %d %d\n", ret, errno);
2516 bo_gem = calloc(1, sizeof(*bo_gem));
2520 /* Determine size of bo. The fd-to-handle ioctl really should
2521 * return the size, but it doesn't. If we have kernel 3.12 or
2522 * later, we can lseek on the prime fd to get the size. Older
2523 * kernels will just fail, in which case we fall back to the
2524 * provided (estimated or guess size). */
2525 ret = lseek(prime_fd, 0, SEEK_END);
2527 bo_gem->bo.size = ret;
2529 bo_gem->bo.size = size;
2531 bo_gem->bo.handle = handle;
2532 bo_gem->bo.bufmgr = bufmgr;
2534 bo_gem->gem_handle = handle;
2536 atomic_set(&bo_gem->refcount, 1);
2538 bo_gem->name = "prime";
2539 bo_gem->validate_index = -1;
2540 bo_gem->reloc_tree_fences = 0;
2541 bo_gem->used_as_reloc_target = false;
2542 bo_gem->has_error = false;
2543 bo_gem->reusable = false;
2545 DRMINITLISTHEAD(&bo_gem->vma_list);
2546 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
2548 VG_CLEAR(get_tiling);
2549 get_tiling.handle = bo_gem->gem_handle;
2550 ret = drmIoctl(bufmgr_gem->fd,
2551 DRM_IOCTL_I915_GEM_GET_TILING,
2554 drm_intel_gem_bo_unreference(&bo_gem->bo);
2557 bo_gem->tiling_mode = get_tiling.tiling_mode;
2558 bo_gem->swizzle_mode = get_tiling.swizzle_mode;
2559 /* XXX stride is unknown */
2560 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
2566 drm_intel_bo_gem_export_to_prime(drm_intel_bo *bo, int *prime_fd)
2568 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2569 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2571 if (DRMLISTEMPTY(&bo_gem->name_list))
2572 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
2574 if (drmPrimeHandleToFD(bufmgr_gem->fd, bo_gem->gem_handle,
2575 DRM_CLOEXEC, prime_fd) != 0)
2578 bo_gem->reusable = false;
2584 drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
2586 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2587 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2590 if (!bo_gem->global_name) {
2591 struct drm_gem_flink flink;
2594 flink.handle = bo_gem->gem_handle;
2596 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink);
2600 bo_gem->global_name = flink.name;
2601 bo_gem->reusable = false;
2603 if (DRMLISTEMPTY(&bo_gem->name_list))
2604 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
2607 *name = bo_gem->global_name;
2612 * Enables unlimited caching of buffer objects for reuse.
2614 * This is potentially very memory expensive, as the cache at each bucket
2615 * size is only bounded by how many buffers of that size we've managed to have
2616 * in flight at once.
2619 drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
2621 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
2623 bufmgr_gem->bo_reuse = true;
2627 * Enable use of fenced reloc type.
2629 * New code should enable this to avoid unnecessary fence register
2630 * allocation. If this option is not enabled, all relocs will have fence
2631 * register allocated.
2634 drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr)
2636 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2638 if (bufmgr_gem->bufmgr.bo_exec == drm_intel_gem_bo_exec2)
2639 bufmgr_gem->fenced_relocs = true;
2643 * Return the additional aperture space required by the tree of buffer objects
2647 drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
2649 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2653 if (bo == NULL || bo_gem->included_in_check_aperture)
2657 bo_gem->included_in_check_aperture = true;
2659 for (i = 0; i < bo_gem->reloc_count; i++)
2661 drm_intel_gem_bo_get_aperture_space(bo_gem->
2662 reloc_target_info[i].bo);
2668 * Count the number of buffers in this list that need a fence reg
2670 * If the count is greater than the number of available regs, we'll have
2671 * to ask the caller to resubmit a batch with fewer tiled buffers.
2673 * This function over-counts if the same buffer is used multiple times.
2676 drm_intel_gem_total_fences(drm_intel_bo ** bo_array, int count)
2679 unsigned int total = 0;
2681 for (i = 0; i < count; i++) {
2682 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
2687 total += bo_gem->reloc_tree_fences;
2693 * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready
2694 * for the next drm_intel_bufmgr_check_aperture_space() call.
2697 drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo)
2699 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2702 if (bo == NULL || !bo_gem->included_in_check_aperture)
2705 bo_gem->included_in_check_aperture = false;
2707 for (i = 0; i < bo_gem->reloc_count; i++)
2708 drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->
2709 reloc_target_info[i].bo);
2713 * Return a conservative estimate for the amount of aperture required
2714 * for a collection of buffers. This may double-count some buffers.
2717 drm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count)
2720 unsigned int total = 0;
2722 for (i = 0; i < count; i++) {
2723 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
2725 total += bo_gem->reloc_tree_size;
2731 * Return the amount of aperture needed for a collection of buffers.
2732 * This avoids double counting any buffers, at the cost of looking
2733 * at every buffer in the set.
2736 drm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count)
2739 unsigned int total = 0;
2741 for (i = 0; i < count; i++) {
2742 total += drm_intel_gem_bo_get_aperture_space(bo_array[i]);
2743 /* For the first buffer object in the array, we get an
2744 * accurate count back for its reloc_tree size (since nothing
2745 * had been flagged as being counted yet). We can save that
2746 * value out as a more conservative reloc_tree_size that
2747 * avoids double-counting target buffers. Since the first
2748 * buffer happens to usually be the batch buffer in our
2749 * callers, this can pull us back from doing the tree
2750 * walk on every new batch emit.
2753 drm_intel_bo_gem *bo_gem =
2754 (drm_intel_bo_gem *) bo_array[i];
2755 bo_gem->reloc_tree_size = total;
2759 for (i = 0; i < count; i++)
2760 drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]);
2765 * Return -1 if the batchbuffer should be flushed before attempting to
2766 * emit rendering referencing the buffers pointed to by bo_array.
2768 * This is required because if we try to emit a batchbuffer with relocations
2769 * to a tree of buffers that won't simultaneously fit in the aperture,
2770 * the rendering will return an error at a point where the software is not
2771 * prepared to recover from it.
2773 * However, we also want to emit the batchbuffer significantly before we reach
2774 * the limit, as a series of batchbuffers each of which references buffers
2775 * covering almost all of the aperture means that at each emit we end up
2776 * waiting to evict a buffer from the last rendering, and we get synchronous
2777 * performance. By emitting smaller batchbuffers, we eat some CPU overhead to
2778 * get better parallelism.
2781 drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
2783 drm_intel_bufmgr_gem *bufmgr_gem =
2784 (drm_intel_bufmgr_gem *) bo_array[0]->bufmgr;
2785 unsigned int total = 0;
2786 unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
2789 /* Check for fence reg constraints if necessary */
2790 if (bufmgr_gem->available_fences) {
2791 total_fences = drm_intel_gem_total_fences(bo_array, count);
2792 if (total_fences > bufmgr_gem->available_fences)
2796 total = drm_intel_gem_estimate_batch_space(bo_array, count);
2798 if (total > threshold)
2799 total = drm_intel_gem_compute_batch_space(bo_array, count);
2801 if (total > threshold) {
2802 DBG("check_space: overflowed available aperture, "
2804 total / 1024, (int)bufmgr_gem->gtt_size / 1024);
2807 DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024,
2808 (int)bufmgr_gem->gtt_size / 1024);
2814 * Disable buffer reuse for objects which are shared with the kernel
2815 * as scanout buffers
2818 drm_intel_gem_bo_disable_reuse(drm_intel_bo *bo)
2820 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2822 bo_gem->reusable = false;
2827 drm_intel_gem_bo_is_reusable(drm_intel_bo *bo)
2829 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2831 return bo_gem->reusable;
2835 _drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
2837 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2840 for (i = 0; i < bo_gem->reloc_count; i++) {
2841 if (bo_gem->reloc_target_info[i].bo == target_bo)
2843 if (bo == bo_gem->reloc_target_info[i].bo)
2845 if (_drm_intel_gem_bo_references(bo_gem->reloc_target_info[i].bo,
2853 /** Return true if target_bo is referenced by bo's relocation tree. */
2855 drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
2857 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
2859 if (bo == NULL || target_bo == NULL)
2861 if (target_bo_gem->used_as_reloc_target)
2862 return _drm_intel_gem_bo_references(bo, target_bo);
2867 add_bucket(drm_intel_bufmgr_gem *bufmgr_gem, int size)
2869 unsigned int i = bufmgr_gem->num_buckets;
2871 assert(i < ARRAY_SIZE(bufmgr_gem->cache_bucket));
2873 DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head);
2874 bufmgr_gem->cache_bucket[i].size = size;
2875 bufmgr_gem->num_buckets++;
2879 init_cache_buckets(drm_intel_bufmgr_gem *bufmgr_gem)
2881 unsigned long size, cache_max_size = 64 * 1024 * 1024;
2883 /* OK, so power of two buckets was too wasteful of memory.
2884 * Give 3 other sizes between each power of two, to hopefully
2885 * cover things accurately enough. (The alternative is
2886 * probably to just go for exact matching of sizes, and assume
2887 * that for things like composited window resize the tiled
2888 * width/height alignment and rounding of sizes to pages will
2889 * get us useful cache hit rates anyway)
2891 add_bucket(bufmgr_gem, 4096);
2892 add_bucket(bufmgr_gem, 4096 * 2);
2893 add_bucket(bufmgr_gem, 4096 * 3);
2895 /* Initialize the linked lists for BO reuse cache. */
2896 for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
2897 add_bucket(bufmgr_gem, size);
2899 add_bucket(bufmgr_gem, size + size * 1 / 4);
2900 add_bucket(bufmgr_gem, size + size * 2 / 4);
2901 add_bucket(bufmgr_gem, size + size * 3 / 4);
2906 drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr, int limit)
2908 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2910 bufmgr_gem->vma_max = limit;
2912 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
2916 * Get the PCI ID for the device. This can be overridden by setting the
2917 * INTEL_DEVID_OVERRIDE environment variable to the desired ID.
2920 get_pci_device_id(drm_intel_bufmgr_gem *bufmgr_gem)
2922 char *devid_override;
2925 drm_i915_getparam_t gp;
2927 if (geteuid() == getuid()) {
2928 devid_override = getenv("INTEL_DEVID_OVERRIDE");
2929 if (devid_override) {
2930 bufmgr_gem->no_exec = true;
2931 return strtod(devid_override, NULL);
2937 gp.param = I915_PARAM_CHIPSET_ID;
2939 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2941 fprintf(stderr, "get chip id failed: %d [%d]\n", ret, errno);
2942 fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
2948 drm_intel_bufmgr_gem_get_devid(drm_intel_bufmgr *bufmgr)
2950 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2952 return bufmgr_gem->pci_device;
2956 * Sets the AUB filename.
2958 * This function has to be called before drm_intel_bufmgr_gem_set_aub_dump()
2959 * for it to have any effect.
2962 drm_intel_bufmgr_gem_set_aub_filename(drm_intel_bufmgr *bufmgr,
2963 const char *filename)
2965 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2967 free(bufmgr_gem->aub_filename);
2969 bufmgr_gem->aub_filename = strdup(filename);
2973 * Sets up AUB dumping.
2975 * This is a trace file format that can be used with the simulator.
2976 * Packets are emitted in a format somewhat like GPU command packets.
2977 * You can set up a GTT and upload your objects into the referenced
2978 * space, then send off batchbuffers and get BMPs out the other end.
2981 drm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr *bufmgr, int enable)
2983 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2984 int entry = 0x200003;
2986 int gtt_size = 0x10000;
2987 const char *filename;
2990 if (bufmgr_gem->aub_file) {
2991 fclose(bufmgr_gem->aub_file);
2992 bufmgr_gem->aub_file = NULL;
2997 if (geteuid() != getuid())
3000 if (bufmgr_gem->aub_filename)
3001 filename = bufmgr_gem->aub_filename;
3003 filename = "intel.aub";
3004 bufmgr_gem->aub_file = fopen(filename, "w+");
3005 if (!bufmgr_gem->aub_file)
3008 /* Start allocating objects from just after the GTT. */
3009 bufmgr_gem->aub_offset = gtt_size;
3011 /* Start with a (required) version packet. */
3012 aub_out(bufmgr_gem, CMD_AUB_HEADER | (13 - 2));
3014 (4 << AUB_HEADER_MAJOR_SHIFT) |
3015 (0 << AUB_HEADER_MINOR_SHIFT));
3016 for (i = 0; i < 8; i++) {
3017 aub_out(bufmgr_gem, 0); /* app name */
3019 aub_out(bufmgr_gem, 0); /* timestamp */
3020 aub_out(bufmgr_gem, 0); /* timestamp */
3021 aub_out(bufmgr_gem, 0); /* comment len */
3023 /* Set up the GTT. The max we can handle is 256M */
3024 aub_out(bufmgr_gem, CMD_AUB_TRACE_HEADER_BLOCK | ((bufmgr_gem->gen >= 8 ? 6 : 5) - 2));
3025 aub_out(bufmgr_gem, AUB_TRACE_MEMTYPE_NONLOCAL | 0 | AUB_TRACE_OP_DATA_WRITE);
3026 aub_out(bufmgr_gem, 0); /* subtype */
3027 aub_out(bufmgr_gem, 0); /* offset */
3028 aub_out(bufmgr_gem, gtt_size); /* size */
3029 if (bufmgr_gem->gen >= 8)
3030 aub_out(bufmgr_gem, 0);
3031 for (i = 0x000; i < gtt_size; i += 4, entry += 0x1000) {
3032 aub_out(bufmgr_gem, entry);
3037 drm_intel_gem_context_create(drm_intel_bufmgr *bufmgr)
3039 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3040 struct drm_i915_gem_context_create create;
3041 drm_intel_context *context = NULL;
3044 context = calloc(1, sizeof(*context));
3049 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
3051 DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n",
3057 context->ctx_id = create.ctx_id;
3058 context->bufmgr = bufmgr;
3064 drm_intel_gem_context_destroy(drm_intel_context *ctx)
3066 drm_intel_bufmgr_gem *bufmgr_gem;
3067 struct drm_i915_gem_context_destroy destroy;
3075 bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr;
3076 destroy.ctx_id = ctx->ctx_id;
3077 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY,
3080 fprintf(stderr, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n",
3087 drm_intel_get_reset_stats(drm_intel_context *ctx,
3088 uint32_t *reset_count,
3092 drm_intel_bufmgr_gem *bufmgr_gem;
3093 struct drm_i915_reset_stats stats;
3099 memset(&stats, 0, sizeof(stats));
3101 bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr;
3102 stats.ctx_id = ctx->ctx_id;
3103 ret = drmIoctl(bufmgr_gem->fd,
3104 DRM_IOCTL_I915_GET_RESET_STATS,
3107 if (reset_count != NULL)
3108 *reset_count = stats.reset_count;
3111 *active = stats.batch_active;
3113 if (pending != NULL)
3114 *pending = stats.batch_pending;
3121 drm_intel_reg_read(drm_intel_bufmgr *bufmgr,
3125 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3126 struct drm_i915_reg_read reg_read;
3130 reg_read.offset = offset;
3132 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_REG_READ, ®_read);
3134 *result = reg_read.val;
3140 * Annotate the given bo for use in aub dumping.
3142 * \param annotations is an array of drm_intel_aub_annotation objects
3143 * describing the type of data in various sections of the bo. Each
3144 * element of the array specifies the type and subtype of a section of
3145 * the bo, and the past-the-end offset of that section. The elements
3146 * of \c annotations must be sorted so that ending_offset is
3149 * \param count is the number of elements in the \c annotations array.
3150 * If \c count is zero, then \c annotations will not be dereferenced.
3152 * Annotations are copied into a private data structure, so caller may
3153 * re-use the memory pointed to by \c annotations after the call
3156 * Annotations are stored for the lifetime of the bo; to reset to the
3157 * default state (no annotations), call this function with a \c count
3161 drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo,
3162 drm_intel_aub_annotation *annotations,
3165 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
3166 unsigned size = sizeof(*annotations) * count;
3167 drm_intel_aub_annotation *new_annotations =
3168 count > 0 ? realloc(bo_gem->aub_annotations, size) : NULL;
3169 if (new_annotations == NULL) {
3170 free(bo_gem->aub_annotations);
3171 bo_gem->aub_annotations = NULL;
3172 bo_gem->aub_annotation_count = 0;
3175 memcpy(new_annotations, annotations, size);
3176 bo_gem->aub_annotations = new_annotations;
3177 bo_gem->aub_annotation_count = count;
3181 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
3182 * and manage map buffer objections.
3184 * \param fd File descriptor of the opened DRM device.
3187 drm_intel_bufmgr_gem_init(int fd, int batch_size)
3189 drm_intel_bufmgr_gem *bufmgr_gem;
3190 struct drm_i915_gem_get_aperture aperture;
3191 drm_i915_getparam_t gp;
3195 bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
3196 if (bufmgr_gem == NULL)
3199 bufmgr_gem->fd = fd;
3201 if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
3206 ret = drmIoctl(bufmgr_gem->fd,
3207 DRM_IOCTL_I915_GEM_GET_APERTURE,
3211 bufmgr_gem->gtt_size = aperture.aper_available_size;
3213 fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
3215 bufmgr_gem->gtt_size = 128 * 1024 * 1024;
3216 fprintf(stderr, "Assuming %dkB available aperture size.\n"
3217 "May lead to reduced performance or incorrect "
3219 (int)bufmgr_gem->gtt_size / 1024);
3222 bufmgr_gem->pci_device = get_pci_device_id(bufmgr_gem);
3224 if (IS_GEN2(bufmgr_gem->pci_device))
3225 bufmgr_gem->gen = 2;
3226 else if (IS_GEN3(bufmgr_gem->pci_device))
3227 bufmgr_gem->gen = 3;
3228 else if (IS_GEN4(bufmgr_gem->pci_device))
3229 bufmgr_gem->gen = 4;
3230 else if (IS_GEN5(bufmgr_gem->pci_device))
3231 bufmgr_gem->gen = 5;
3232 else if (IS_GEN6(bufmgr_gem->pci_device))
3233 bufmgr_gem->gen = 6;
3234 else if (IS_GEN7(bufmgr_gem->pci_device))
3235 bufmgr_gem->gen = 7;
3236 else if (IS_GEN8(bufmgr_gem->pci_device))
3237 bufmgr_gem->gen = 8;
3243 if (IS_GEN3(bufmgr_gem->pci_device) &&
3244 bufmgr_gem->gtt_size > 256*1024*1024) {
3245 /* The unmappable part of gtt on gen 3 (i.e. above 256MB) can't
3246 * be used for tiled blits. To simplify the accounting, just
3247 * substract the unmappable part (fixed to 256MB on all known
3248 * gen3 devices) if the kernel advertises it. */
3249 bufmgr_gem->gtt_size -= 256*1024*1024;
3255 gp.param = I915_PARAM_HAS_EXECBUF2;
3256 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3260 gp.param = I915_PARAM_HAS_BSD;
3261 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3262 bufmgr_gem->has_bsd = ret == 0;
3264 gp.param = I915_PARAM_HAS_BLT;
3265 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3266 bufmgr_gem->has_blt = ret == 0;
3268 gp.param = I915_PARAM_HAS_RELAXED_FENCING;
3269 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3270 bufmgr_gem->has_relaxed_fencing = ret == 0;
3272 gp.param = I915_PARAM_HAS_WAIT_TIMEOUT;
3273 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3274 bufmgr_gem->has_wait_timeout = ret == 0;
3276 gp.param = I915_PARAM_HAS_LLC;
3277 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3279 /* Kernel does not supports HAS_LLC query, fallback to GPU
3280 * generation detection and assume that we have LLC on GEN6/7
3282 bufmgr_gem->has_llc = (IS_GEN6(bufmgr_gem->pci_device) |
3283 IS_GEN7(bufmgr_gem->pci_device));
3285 bufmgr_gem->has_llc = *gp.value;
3287 gp.param = I915_PARAM_HAS_VEBOX;
3288 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3289 bufmgr_gem->has_vebox = (ret == 0) & (*gp.value > 0);
3291 if (bufmgr_gem->gen < 4) {
3292 gp.param = I915_PARAM_NUM_FENCES_AVAIL;
3293 gp.value = &bufmgr_gem->available_fences;
3294 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3296 fprintf(stderr, "get fences failed: %d [%d]\n", ret,
3298 fprintf(stderr, "param: %d, val: %d\n", gp.param,
3300 bufmgr_gem->available_fences = 0;
3302 /* XXX The kernel reports the total number of fences,
3303 * including any that may be pinned.
3305 * We presume that there will be at least one pinned
3306 * fence for the scanout buffer, but there may be more
3307 * than one scanout and the user may be manually
3308 * pinning buffers. Let's move to execbuffer2 and
3309 * thereby forget the insanity of using fences...
3311 bufmgr_gem->available_fences -= 2;
3312 if (bufmgr_gem->available_fences < 0)
3313 bufmgr_gem->available_fences = 0;
3317 /* Let's go with one relocation per every 2 dwords (but round down a bit
3318 * since a power of two will mean an extra page allocation for the reloc
3321 * Every 4 was too few for the blender benchmark.
3323 bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
3325 bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
3326 bufmgr_gem->bufmgr.bo_alloc_for_render =
3327 drm_intel_gem_bo_alloc_for_render;
3328 bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled;
3329 bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
3330 bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
3331 bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
3332 bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap;
3333 bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata;
3334 bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata;
3335 bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering;
3336 bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc;
3337 bufmgr_gem->bufmgr.bo_emit_reloc_fence = drm_intel_gem_bo_emit_reloc_fence;
3338 bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin;
3339 bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin;
3340 bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
3341 bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
3342 bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
3343 /* Use the new one if available */
3345 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec2;
3346 bufmgr_gem->bufmgr.bo_mrb_exec = drm_intel_gem_bo_mrb_exec2;
3348 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
3349 bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
3350 bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
3351 bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy;
3352 bufmgr_gem->bufmgr.debug = 0;
3353 bufmgr_gem->bufmgr.check_aperture_space =
3354 drm_intel_gem_check_aperture_space;
3355 bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse;
3356 bufmgr_gem->bufmgr.bo_is_reusable = drm_intel_gem_bo_is_reusable;
3357 bufmgr_gem->bufmgr.get_pipe_from_crtc_id =
3358 drm_intel_gem_get_pipe_from_crtc_id;
3359 bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references;
3361 DRMINITLISTHEAD(&bufmgr_gem->named);
3362 init_cache_buckets(bufmgr_gem);
3364 DRMINITLISTHEAD(&bufmgr_gem->vma_cache);
3365 bufmgr_gem->vma_max = -1; /* unlimited by default */
3367 return &bufmgr_gem->bufmgr;