1 /**************************************************************************
3 * Copyright © 2007 Red Hat Inc.
4 * Copyright © 2007-2012 Intel Corporation
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
29 **************************************************************************/
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
42 #include <xf86atomic.h>
50 #include <sys/ioctl.h>
52 #include <sys/types.h>
57 #define ETIME ETIMEDOUT
59 #include "libdrm_macros.h"
60 #include "libdrm_lists.h"
61 #include "intel_bufmgr.h"
62 #include "intel_bufmgr_priv.h"
63 #include "intel_chipset.h"
64 #include "intel_aub.h"
77 #define memclear(s) memset(&s, 0, sizeof(s))
79 #define DBG(...) do { \
80 if (bufmgr_gem->bufmgr.debug) \
81 fprintf(stderr, __VA_ARGS__); \
84 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
85 #define MAX2(A, B) ((A) > (B) ? (A) : (B))
87 typedef struct _drm_intel_bo_gem drm_intel_bo_gem;
89 struct drm_intel_gem_bo_bucket {
94 typedef struct _drm_intel_bufmgr_gem {
95 drm_intel_bufmgr bufmgr;
103 pthread_mutex_t lock;
105 struct drm_i915_gem_exec_object *exec_objects;
106 struct drm_i915_gem_exec_object2 *exec2_objects;
107 drm_intel_bo **exec_bos;
111 /** Array of lists of cached gem objects of power-of-two sizes */
112 struct drm_intel_gem_bo_bucket cache_bucket[14 * 4];
116 drmMMListHead managers;
119 drmMMListHead vma_cache;
120 int vma_count, vma_open, vma_max;
123 int available_fences;
126 unsigned int has_bsd : 1;
127 unsigned int has_blt : 1;
128 unsigned int has_relaxed_fencing : 1;
129 unsigned int has_llc : 1;
130 unsigned int has_wait_timeout : 1;
131 unsigned int bo_reuse : 1;
132 unsigned int no_exec : 1;
133 unsigned int has_vebox : 1;
144 } drm_intel_bufmgr_gem;
146 #define DRM_INTEL_RELOC_FENCE (1<<0)
148 typedef struct _drm_intel_reloc_target_info {
151 } drm_intel_reloc_target;
153 struct _drm_intel_bo_gem {
161 * Kenel-assigned global name for this object
163 * List contains both flink named and prime fd'd objects
165 unsigned int global_name;
166 drmMMListHead name_list;
169 * Index of the buffer within the validation list while preparing a
170 * batchbuffer execution.
175 * Current tiling mode
177 uint32_t tiling_mode;
178 uint32_t swizzle_mode;
179 unsigned long stride;
183 /** Array passed to the DRM containing relocation information. */
184 struct drm_i915_gem_relocation_entry *relocs;
186 * Array of info structs corresponding to relocs[i].target_handle etc
188 drm_intel_reloc_target *reloc_target_info;
189 /** Number of entries in relocs */
191 /** Mapped address for the buffer, saved across map/unmap cycles */
193 /** GTT virtual address for the buffer, saved across map/unmap cycles */
196 * Virtual address of the buffer allocated by user, used for userptr
201 drmMMListHead vma_list;
207 * Boolean of whether this BO and its children have been included in
208 * the current drm_intel_bufmgr_check_aperture_space() total.
210 bool included_in_check_aperture;
213 * Boolean of whether this buffer has been used as a relocation
214 * target and had its size accounted for, and thus can't have any
215 * further relocations added to it.
217 bool used_as_reloc_target;
220 * Boolean of whether we have encountered an error whilst building the relocation tree.
225 * Boolean of whether this buffer can be re-used
230 * Boolean of whether the GPU is definitely not accessing the buffer.
232 * This is only valid when reusable, since non-reusable
233 * buffers are those that have been shared wth other
234 * processes, so we don't know their state.
239 * Boolean of whether this buffer was allocated with userptr
244 * Size in bytes of this buffer and its relocation descendents.
246 * Used to avoid costly tree walking in
247 * drm_intel_bufmgr_check_aperture in the common case.
252 * Number of potential fence registers required by this buffer and its
255 int reloc_tree_fences;
257 /** Flags that we may need to do the SW_FINSIH ioctl on unmap. */
258 bool mapped_cpu_write;
262 drm_intel_aub_annotation *aub_annotations;
263 unsigned aub_annotation_count;
267 drm_intel_gem_estimate_batch_space(drm_intel_bo ** bo_array, int count);
270 drm_intel_gem_compute_batch_space(drm_intel_bo ** bo_array, int count);
273 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
274 uint32_t * swizzle_mode);
277 drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
278 uint32_t tiling_mode,
281 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
284 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo);
286 static void drm_intel_gem_bo_free(drm_intel_bo *bo);
289 drm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size,
290 uint32_t *tiling_mode)
292 unsigned long min_size, max_size;
295 if (*tiling_mode == I915_TILING_NONE)
298 /* 965+ just need multiples of page size for tiling */
299 if (bufmgr_gem->gen >= 4)
300 return ROUND_UP_TO(size, 4096);
302 /* Older chips need powers of two, of at least 512k or 1M */
303 if (bufmgr_gem->gen == 3) {
304 min_size = 1024*1024;
305 max_size = 128*1024*1024;
308 max_size = 64*1024*1024;
311 if (size > max_size) {
312 *tiling_mode = I915_TILING_NONE;
316 /* Do we need to allocate every page for the fence? */
317 if (bufmgr_gem->has_relaxed_fencing)
318 return ROUND_UP_TO(size, 4096);
320 for (i = min_size; i < size; i <<= 1)
327 * Round a given pitch up to the minimum required for X tiling on a
328 * given chip. We use 512 as the minimum to allow for a later tiling
332 drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem,
333 unsigned long pitch, uint32_t *tiling_mode)
335 unsigned long tile_width;
338 /* If untiled, then just align it so that we can do rendering
339 * to it with the 3D engine.
341 if (*tiling_mode == I915_TILING_NONE)
342 return ALIGN(pitch, 64);
344 if (*tiling_mode == I915_TILING_X
345 || (IS_915(bufmgr_gem->pci_device)
346 && *tiling_mode == I915_TILING_Y))
351 /* 965 is flexible */
352 if (bufmgr_gem->gen >= 4)
353 return ROUND_UP_TO(pitch, tile_width);
355 /* The older hardware has a maximum pitch of 8192 with tiled
356 * surfaces, so fallback to untiled if it's too large.
359 *tiling_mode = I915_TILING_NONE;
360 return ALIGN(pitch, 64);
363 /* Pre-965 needs power of two tile width */
364 for (i = tile_width; i < pitch; i <<= 1)
370 static struct drm_intel_gem_bo_bucket *
371 drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
376 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
377 struct drm_intel_gem_bo_bucket *bucket =
378 &bufmgr_gem->cache_bucket[i];
379 if (bucket->size >= size) {
388 drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
392 for (i = 0; i < bufmgr_gem->exec_count; i++) {
393 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
394 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
396 if (bo_gem->relocs == NULL) {
397 DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle,
402 for (j = 0; j < bo_gem->reloc_count; j++) {
403 drm_intel_bo *target_bo = bo_gem->reloc_target_info[j].bo;
404 drm_intel_bo_gem *target_gem =
405 (drm_intel_bo_gem *) target_bo;
407 DBG("%2d: %d (%s)@0x%08llx -> "
408 "%d (%s)@0x%08lx + 0x%08x\n",
410 bo_gem->gem_handle, bo_gem->name,
411 (unsigned long long)bo_gem->relocs[j].offset,
412 target_gem->gem_handle,
415 bo_gem->relocs[j].delta);
421 drm_intel_gem_bo_reference(drm_intel_bo *bo)
423 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
425 atomic_inc(&bo_gem->refcount);
429 * Adds the given buffer to the list of buffers to be validated (moved into the
430 * appropriate memory type) with the next batch submission.
432 * If a buffer is validated multiple times in a batch submission, it ends up
433 * with the intersection of the memory type flags and the union of the
437 drm_intel_add_validate_buffer(drm_intel_bo *bo)
439 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
440 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
443 if (bo_gem->validate_index != -1)
446 /* Extend the array of validation entries as necessary. */
447 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
448 int new_size = bufmgr_gem->exec_size * 2;
453 bufmgr_gem->exec_objects =
454 realloc(bufmgr_gem->exec_objects,
455 sizeof(*bufmgr_gem->exec_objects) * new_size);
456 bufmgr_gem->exec_bos =
457 realloc(bufmgr_gem->exec_bos,
458 sizeof(*bufmgr_gem->exec_bos) * new_size);
459 bufmgr_gem->exec_size = new_size;
462 index = bufmgr_gem->exec_count;
463 bo_gem->validate_index = index;
464 /* Fill in array entry */
465 bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
466 bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
467 bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs;
468 bufmgr_gem->exec_objects[index].alignment = bo->align;
469 bufmgr_gem->exec_objects[index].offset = 0;
470 bufmgr_gem->exec_bos[index] = bo;
471 bufmgr_gem->exec_count++;
475 drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence)
477 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
478 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
481 if (bo_gem->validate_index != -1) {
483 bufmgr_gem->exec2_objects[bo_gem->validate_index].flags |=
484 EXEC_OBJECT_NEEDS_FENCE;
488 /* Extend the array of validation entries as necessary. */
489 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
490 int new_size = bufmgr_gem->exec_size * 2;
495 bufmgr_gem->exec2_objects =
496 realloc(bufmgr_gem->exec2_objects,
497 sizeof(*bufmgr_gem->exec2_objects) * new_size);
498 bufmgr_gem->exec_bos =
499 realloc(bufmgr_gem->exec_bos,
500 sizeof(*bufmgr_gem->exec_bos) * new_size);
501 bufmgr_gem->exec_size = new_size;
504 index = bufmgr_gem->exec_count;
505 bo_gem->validate_index = index;
506 /* Fill in array entry */
507 bufmgr_gem->exec2_objects[index].handle = bo_gem->gem_handle;
508 bufmgr_gem->exec2_objects[index].relocation_count = bo_gem->reloc_count;
509 bufmgr_gem->exec2_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
510 bufmgr_gem->exec2_objects[index].alignment = bo->align;
511 bufmgr_gem->exec2_objects[index].offset = 0;
512 bufmgr_gem->exec_bos[index] = bo;
513 bufmgr_gem->exec2_objects[index].flags = 0;
514 bufmgr_gem->exec2_objects[index].rsvd1 = 0;
515 bufmgr_gem->exec2_objects[index].rsvd2 = 0;
517 bufmgr_gem->exec2_objects[index].flags |=
518 EXEC_OBJECT_NEEDS_FENCE;
520 bufmgr_gem->exec_count++;
523 #define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
527 drm_intel_bo_gem_set_in_aperture_size(drm_intel_bufmgr_gem *bufmgr_gem,
528 drm_intel_bo_gem *bo_gem,
529 unsigned int alignment)
533 assert(!bo_gem->used_as_reloc_target);
535 /* The older chipsets are far-less flexible in terms of tiling,
536 * and require tiled buffer to be size aligned in the aperture.
537 * This means that in the worst possible case we will need a hole
538 * twice as large as the object in order for it to fit into the
539 * aperture. Optimal packing is for wimps.
541 size = bo_gem->bo.size;
542 if (bufmgr_gem->gen < 4 && bo_gem->tiling_mode != I915_TILING_NONE) {
543 unsigned int min_size;
545 if (bufmgr_gem->has_relaxed_fencing) {
546 if (bufmgr_gem->gen == 3)
547 min_size = 1024*1024;
551 while (min_size < size)
556 /* Account for worst-case alignment. */
557 alignment = MAX2(alignment, min_size);
560 bo_gem->reloc_tree_size = size + alignment;
564 drm_intel_setup_reloc_list(drm_intel_bo *bo)
566 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
567 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
568 unsigned int max_relocs = bufmgr_gem->max_relocs;
570 if (bo->size / 4 < max_relocs)
571 max_relocs = bo->size / 4;
573 bo_gem->relocs = malloc(max_relocs *
574 sizeof(struct drm_i915_gem_relocation_entry));
575 bo_gem->reloc_target_info = malloc(max_relocs *
576 sizeof(drm_intel_reloc_target));
577 if (bo_gem->relocs == NULL || bo_gem->reloc_target_info == NULL) {
578 bo_gem->has_error = true;
580 free (bo_gem->relocs);
581 bo_gem->relocs = NULL;
583 free (bo_gem->reloc_target_info);
584 bo_gem->reloc_target_info = NULL;
593 drm_intel_gem_bo_busy(drm_intel_bo *bo)
595 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
596 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
597 struct drm_i915_gem_busy busy;
600 if (bo_gem->reusable && bo_gem->idle)
604 busy.handle = bo_gem->gem_handle;
606 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
608 bo_gem->idle = !busy.busy;
613 return (ret == 0 && busy.busy);
617 drm_intel_gem_bo_madvise_internal(drm_intel_bufmgr_gem *bufmgr_gem,
618 drm_intel_bo_gem *bo_gem, int state)
620 struct drm_i915_gem_madvise madv;
623 madv.handle = bo_gem->gem_handle;
626 drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
628 return madv.retained;
632 drm_intel_gem_bo_madvise(drm_intel_bo *bo, int madv)
634 return drm_intel_gem_bo_madvise_internal
635 ((drm_intel_bufmgr_gem *) bo->bufmgr,
636 (drm_intel_bo_gem *) bo,
640 /* drop the oldest entries that have been purged by the kernel */
642 drm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem *bufmgr_gem,
643 struct drm_intel_gem_bo_bucket *bucket)
645 while (!DRMLISTEMPTY(&bucket->head)) {
646 drm_intel_bo_gem *bo_gem;
648 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
649 bucket->head.next, head);
650 if (drm_intel_gem_bo_madvise_internal
651 (bufmgr_gem, bo_gem, I915_MADV_DONTNEED))
654 DRMLISTDEL(&bo_gem->head);
655 drm_intel_gem_bo_free(&bo_gem->bo);
659 static drm_intel_bo *
660 drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
664 uint32_t tiling_mode,
665 unsigned long stride,
666 unsigned int alignment)
668 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
669 drm_intel_bo_gem *bo_gem;
670 unsigned int page_size = getpagesize();
672 struct drm_intel_gem_bo_bucket *bucket;
673 bool alloc_from_cache;
674 unsigned long bo_size;
675 bool for_render = false;
677 if (flags & BO_ALLOC_FOR_RENDER)
680 /* Round the allocated size up to a power of two number of pages. */
681 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size);
683 /* If we don't have caching at this size, don't actually round the
686 if (bucket == NULL) {
688 if (bo_size < page_size)
691 bo_size = bucket->size;
694 pthread_mutex_lock(&bufmgr_gem->lock);
695 /* Get a buffer out of the cache if available */
697 alloc_from_cache = false;
698 if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) {
700 /* Allocate new render-target BOs from the tail (MRU)
701 * of the list, as it will likely be hot in the GPU
702 * cache and in the aperture for us.
704 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
705 bucket->head.prev, head);
706 DRMLISTDEL(&bo_gem->head);
707 alloc_from_cache = true;
708 bo_gem->bo.align = alignment;
710 assert(alignment == 0);
711 /* For non-render-target BOs (where we're probably
712 * going to map it first thing in order to fill it
713 * with data), check if the last BO in the cache is
714 * unbusy, and only reuse in that case. Otherwise,
715 * allocating a new buffer is probably faster than
716 * waiting for the GPU to finish.
718 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
719 bucket->head.next, head);
720 if (!drm_intel_gem_bo_busy(&bo_gem->bo)) {
721 alloc_from_cache = true;
722 DRMLISTDEL(&bo_gem->head);
726 if (alloc_from_cache) {
727 if (!drm_intel_gem_bo_madvise_internal
728 (bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) {
729 drm_intel_gem_bo_free(&bo_gem->bo);
730 drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem,
735 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
738 drm_intel_gem_bo_free(&bo_gem->bo);
743 pthread_mutex_unlock(&bufmgr_gem->lock);
745 if (!alloc_from_cache) {
746 struct drm_i915_gem_create create;
748 bo_gem = calloc(1, sizeof(*bo_gem));
752 bo_gem->bo.size = bo_size;
755 create.size = bo_size;
757 ret = drmIoctl(bufmgr_gem->fd,
758 DRM_IOCTL_I915_GEM_CREATE,
760 bo_gem->gem_handle = create.handle;
761 bo_gem->bo.handle = bo_gem->gem_handle;
766 bo_gem->bo.bufmgr = bufmgr;
767 bo_gem->bo.align = alignment;
769 bo_gem->tiling_mode = I915_TILING_NONE;
770 bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
773 /* drm_intel_gem_bo_free calls DRMLISTDEL() for an uninitialized
774 list (vma_list), so better set the list head here */
775 DRMINITLISTHEAD(&bo_gem->name_list);
776 DRMINITLISTHEAD(&bo_gem->vma_list);
777 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
780 drm_intel_gem_bo_free(&bo_gem->bo);
786 atomic_set(&bo_gem->refcount, 1);
787 bo_gem->validate_index = -1;
788 bo_gem->reloc_tree_fences = 0;
789 bo_gem->used_as_reloc_target = false;
790 bo_gem->has_error = false;
791 bo_gem->reusable = true;
792 bo_gem->aub_annotations = NULL;
793 bo_gem->aub_annotation_count = 0;
795 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, alignment);
797 DBG("bo_create: buf %d (%s) %ldb\n",
798 bo_gem->gem_handle, bo_gem->name, size);
803 static drm_intel_bo *
804 drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
807 unsigned int alignment)
809 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size,
815 static drm_intel_bo *
816 drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr,
819 unsigned int alignment)
821 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0,
822 I915_TILING_NONE, 0, 0);
825 static drm_intel_bo *
826 drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
827 int x, int y, int cpp, uint32_t *tiling_mode,
828 unsigned long *pitch, unsigned long flags)
830 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
831 unsigned long size, stride;
835 unsigned long aligned_y, height_alignment;
837 tiling = *tiling_mode;
839 /* If we're tiled, our allocations are in 8 or 32-row blocks,
840 * so failure to align our height means that we won't allocate
843 * If we're untiled, we still have to align to 2 rows high
844 * because the data port accesses 2x2 blocks even if the
845 * bottom row isn't to be rendered, so failure to align means
846 * we could walk off the end of the GTT and fault. This is
847 * documented on 965, and may be the case on older chipsets
848 * too so we try to be careful.
851 height_alignment = 2;
853 if ((bufmgr_gem->gen == 2) && tiling != I915_TILING_NONE)
854 height_alignment = 16;
855 else if (tiling == I915_TILING_X
856 || (IS_915(bufmgr_gem->pci_device)
857 && tiling == I915_TILING_Y))
858 height_alignment = 8;
859 else if (tiling == I915_TILING_Y)
860 height_alignment = 32;
861 aligned_y = ALIGN(y, height_alignment);
864 stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, tiling_mode);
865 size = stride * aligned_y;
866 size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, tiling_mode);
867 } while (*tiling_mode != tiling);
870 if (tiling == I915_TILING_NONE)
873 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags,
877 static drm_intel_bo *
878 drm_intel_gem_bo_alloc_userptr(drm_intel_bufmgr *bufmgr,
881 uint32_t tiling_mode,
886 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
887 drm_intel_bo_gem *bo_gem;
889 struct drm_i915_gem_userptr userptr;
891 /* Tiling with userptr surfaces is not supported
892 * on all hardware so refuse it for time being.
894 if (tiling_mode != I915_TILING_NONE)
897 bo_gem = calloc(1, sizeof(*bo_gem));
901 bo_gem->bo.size = size;
904 userptr.user_ptr = (__u64)((unsigned long)addr);
905 userptr.user_size = size;
906 userptr.flags = flags;
908 ret = drmIoctl(bufmgr_gem->fd,
909 DRM_IOCTL_I915_GEM_USERPTR,
912 DBG("bo_create_userptr: "
913 "ioctl failed with user ptr %p size 0x%lx, "
914 "user flags 0x%lx\n", addr, size, flags);
919 bo_gem->gem_handle = userptr.handle;
920 bo_gem->bo.handle = bo_gem->gem_handle;
921 bo_gem->bo.bufmgr = bufmgr;
922 bo_gem->is_userptr = true;
923 bo_gem->bo.virtual = addr;
924 /* Save the address provided by user */
925 bo_gem->user_virtual = addr;
926 bo_gem->tiling_mode = I915_TILING_NONE;
927 bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
930 DRMINITLISTHEAD(&bo_gem->name_list);
931 DRMINITLISTHEAD(&bo_gem->vma_list);
934 atomic_set(&bo_gem->refcount, 1);
935 bo_gem->validate_index = -1;
936 bo_gem->reloc_tree_fences = 0;
937 bo_gem->used_as_reloc_target = false;
938 bo_gem->has_error = false;
939 bo_gem->reusable = false;
941 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
943 DBG("bo_create_userptr: "
944 "ptr %p buf %d (%s) size %ldb, stride 0x%x, tile mode %d\n",
945 addr, bo_gem->gem_handle, bo_gem->name,
946 size, stride, tiling_mode);
952 has_userptr(drm_intel_bufmgr_gem *bufmgr_gem)
957 struct drm_i915_gem_userptr userptr;
959 pgsz = sysconf(_SC_PAGESIZE);
962 ret = posix_memalign(&ptr, pgsz, pgsz);
964 DBG("Failed to get a page (%ld) for userptr detection!\n",
970 userptr.user_ptr = (__u64)(unsigned long)ptr;
971 userptr.user_size = pgsz;
974 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_USERPTR, &userptr);
976 if (errno == ENODEV && userptr.flags == 0) {
977 userptr.flags = I915_USERPTR_UNSYNCHRONIZED;
984 /* We don't release the userptr bo here as we want to keep the
985 * kernel mm tracking alive for our lifetime. The first time we
986 * create a userptr object the kernel has to install a mmu_notifer
987 * which is a heavyweight operation (e.g. it requires taking all
988 * mm_locks and stop_machine()).
991 bufmgr_gem->userptr_active.ptr = ptr;
992 bufmgr_gem->userptr_active.handle = userptr.handle;
997 static drm_intel_bo *
998 check_bo_alloc_userptr(drm_intel_bufmgr *bufmgr,
1001 uint32_t tiling_mode,
1004 unsigned long flags)
1006 if (has_userptr((drm_intel_bufmgr_gem *)bufmgr))
1007 bufmgr->bo_alloc_userptr = drm_intel_gem_bo_alloc_userptr;
1009 bufmgr->bo_alloc_userptr = NULL;
1011 return drm_intel_bo_alloc_userptr(bufmgr, name, addr,
1012 tiling_mode, stride, size, flags);
1016 * Returns a drm_intel_bo wrapping the given buffer object handle.
1018 * This can be used when one application needs to pass a buffer object
1022 drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
1024 unsigned int handle)
1026 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1027 drm_intel_bo_gem *bo_gem;
1029 struct drm_gem_open open_arg;
1030 struct drm_i915_gem_get_tiling get_tiling;
1031 drmMMListHead *list;
1033 /* At the moment most applications only have a few named bo.
1034 * For instance, in a DRI client only the render buffers passed
1035 * between X and the client are named. And since X returns the
1036 * alternating names for the front/back buffer a linear search
1037 * provides a sufficiently fast match.
1039 pthread_mutex_lock(&bufmgr_gem->lock);
1040 for (list = bufmgr_gem->named.next;
1041 list != &bufmgr_gem->named;
1042 list = list->next) {
1043 bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
1044 if (bo_gem->global_name == handle) {
1045 drm_intel_gem_bo_reference(&bo_gem->bo);
1046 pthread_mutex_unlock(&bufmgr_gem->lock);
1052 open_arg.name = handle;
1053 ret = drmIoctl(bufmgr_gem->fd,
1057 DBG("Couldn't reference %s handle 0x%08x: %s\n",
1058 name, handle, strerror(errno));
1059 pthread_mutex_unlock(&bufmgr_gem->lock);
1062 /* Now see if someone has used a prime handle to get this
1063 * object from the kernel before by looking through the list
1064 * again for a matching gem_handle
1066 for (list = bufmgr_gem->named.next;
1067 list != &bufmgr_gem->named;
1068 list = list->next) {
1069 bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
1070 if (bo_gem->gem_handle == open_arg.handle) {
1071 drm_intel_gem_bo_reference(&bo_gem->bo);
1072 pthread_mutex_unlock(&bufmgr_gem->lock);
1077 bo_gem = calloc(1, sizeof(*bo_gem));
1079 pthread_mutex_unlock(&bufmgr_gem->lock);
1083 bo_gem->bo.size = open_arg.size;
1084 bo_gem->bo.offset = 0;
1085 bo_gem->bo.offset64 = 0;
1086 bo_gem->bo.virtual = NULL;
1087 bo_gem->bo.bufmgr = bufmgr;
1088 bo_gem->name = name;
1089 atomic_set(&bo_gem->refcount, 1);
1090 bo_gem->validate_index = -1;
1091 bo_gem->gem_handle = open_arg.handle;
1092 bo_gem->bo.handle = open_arg.handle;
1093 bo_gem->global_name = handle;
1094 bo_gem->reusable = false;
1096 memclear(get_tiling);
1097 get_tiling.handle = bo_gem->gem_handle;
1098 ret = drmIoctl(bufmgr_gem->fd,
1099 DRM_IOCTL_I915_GEM_GET_TILING,
1102 drm_intel_gem_bo_unreference(&bo_gem->bo);
1103 pthread_mutex_unlock(&bufmgr_gem->lock);
1106 bo_gem->tiling_mode = get_tiling.tiling_mode;
1107 bo_gem->swizzle_mode = get_tiling.swizzle_mode;
1108 /* XXX stride is unknown */
1109 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
1111 DRMINITLISTHEAD(&bo_gem->vma_list);
1112 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
1113 pthread_mutex_unlock(&bufmgr_gem->lock);
1114 DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
1120 drm_intel_gem_bo_free(drm_intel_bo *bo)
1122 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1123 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1124 struct drm_gem_close close;
1127 DRMLISTDEL(&bo_gem->vma_list);
1128 if (bo_gem->mem_virtual) {
1129 VG(VALGRIND_FREELIKE_BLOCK(bo_gem->mem_virtual, 0));
1130 drm_munmap(bo_gem->mem_virtual, bo_gem->bo.size);
1131 bufmgr_gem->vma_count--;
1133 if (bo_gem->gtt_virtual) {
1134 drm_munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
1135 bufmgr_gem->vma_count--;
1138 /* Close this object */
1140 close.handle = bo_gem->gem_handle;
1141 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
1143 DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
1144 bo_gem->gem_handle, bo_gem->name, strerror(errno));
1146 free(bo_gem->aub_annotations);
1151 drm_intel_gem_bo_mark_mmaps_incoherent(drm_intel_bo *bo)
1154 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1156 if (bo_gem->mem_virtual)
1157 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->mem_virtual, bo->size);
1159 if (bo_gem->gtt_virtual)
1160 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->gtt_virtual, bo->size);
1164 /** Frees all cached buffers significantly older than @time. */
1166 drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
1170 if (bufmgr_gem->time == time)
1173 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
1174 struct drm_intel_gem_bo_bucket *bucket =
1175 &bufmgr_gem->cache_bucket[i];
1177 while (!DRMLISTEMPTY(&bucket->head)) {
1178 drm_intel_bo_gem *bo_gem;
1180 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1181 bucket->head.next, head);
1182 if (time - bo_gem->free_time <= 1)
1185 DRMLISTDEL(&bo_gem->head);
1187 drm_intel_gem_bo_free(&bo_gem->bo);
1191 bufmgr_gem->time = time;
1194 static void drm_intel_gem_bo_purge_vma_cache(drm_intel_bufmgr_gem *bufmgr_gem)
1198 DBG("%s: cached=%d, open=%d, limit=%d\n", __FUNCTION__,
1199 bufmgr_gem->vma_count, bufmgr_gem->vma_open, bufmgr_gem->vma_max);
1201 if (bufmgr_gem->vma_max < 0)
1204 /* We may need to evict a few entries in order to create new mmaps */
1205 limit = bufmgr_gem->vma_max - 2*bufmgr_gem->vma_open;
1209 while (bufmgr_gem->vma_count > limit) {
1210 drm_intel_bo_gem *bo_gem;
1212 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1213 bufmgr_gem->vma_cache.next,
1215 assert(bo_gem->map_count == 0);
1216 DRMLISTDELINIT(&bo_gem->vma_list);
1218 if (bo_gem->mem_virtual) {
1219 drm_munmap(bo_gem->mem_virtual, bo_gem->bo.size);
1220 bo_gem->mem_virtual = NULL;
1221 bufmgr_gem->vma_count--;
1223 if (bo_gem->gtt_virtual) {
1224 drm_munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
1225 bo_gem->gtt_virtual = NULL;
1226 bufmgr_gem->vma_count--;
1231 static void drm_intel_gem_bo_close_vma(drm_intel_bufmgr_gem *bufmgr_gem,
1232 drm_intel_bo_gem *bo_gem)
1234 bufmgr_gem->vma_open--;
1235 DRMLISTADDTAIL(&bo_gem->vma_list, &bufmgr_gem->vma_cache);
1236 if (bo_gem->mem_virtual)
1237 bufmgr_gem->vma_count++;
1238 if (bo_gem->gtt_virtual)
1239 bufmgr_gem->vma_count++;
1240 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
1243 static void drm_intel_gem_bo_open_vma(drm_intel_bufmgr_gem *bufmgr_gem,
1244 drm_intel_bo_gem *bo_gem)
1246 bufmgr_gem->vma_open++;
1247 DRMLISTDEL(&bo_gem->vma_list);
1248 if (bo_gem->mem_virtual)
1249 bufmgr_gem->vma_count--;
1250 if (bo_gem->gtt_virtual)
1251 bufmgr_gem->vma_count--;
1252 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
1256 drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
1258 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1259 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1260 struct drm_intel_gem_bo_bucket *bucket;
1263 /* Unreference all the target buffers */
1264 for (i = 0; i < bo_gem->reloc_count; i++) {
1265 if (bo_gem->reloc_target_info[i].bo != bo) {
1266 drm_intel_gem_bo_unreference_locked_timed(bo_gem->
1267 reloc_target_info[i].bo,
1271 bo_gem->reloc_count = 0;
1272 bo_gem->used_as_reloc_target = false;
1274 DBG("bo_unreference final: %d (%s)\n",
1275 bo_gem->gem_handle, bo_gem->name);
1277 /* release memory associated with this object */
1278 if (bo_gem->reloc_target_info) {
1279 free(bo_gem->reloc_target_info);
1280 bo_gem->reloc_target_info = NULL;
1282 if (bo_gem->relocs) {
1283 free(bo_gem->relocs);
1284 bo_gem->relocs = NULL;
1287 /* Clear any left-over mappings */
1288 if (bo_gem->map_count) {
1289 DBG("bo freed with non-zero map-count %d\n", bo_gem->map_count);
1290 bo_gem->map_count = 0;
1291 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1292 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1295 DRMLISTDEL(&bo_gem->name_list);
1297 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
1298 /* Put the buffer into our internal cache for reuse if we can. */
1299 if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
1300 drm_intel_gem_bo_madvise_internal(bufmgr_gem, bo_gem,
1301 I915_MADV_DONTNEED)) {
1302 bo_gem->free_time = time;
1304 bo_gem->name = NULL;
1305 bo_gem->validate_index = -1;
1307 DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
1309 drm_intel_gem_bo_free(bo);
1313 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
1316 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1318 assert(atomic_read(&bo_gem->refcount) > 0);
1319 if (atomic_dec_and_test(&bo_gem->refcount))
1320 drm_intel_gem_bo_unreference_final(bo, time);
1323 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
1325 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1327 assert(atomic_read(&bo_gem->refcount) > 0);
1329 if (atomic_add_unless(&bo_gem->refcount, -1, 1)) {
1330 drm_intel_bufmgr_gem *bufmgr_gem =
1331 (drm_intel_bufmgr_gem *) bo->bufmgr;
1332 struct timespec time;
1334 clock_gettime(CLOCK_MONOTONIC, &time);
1336 pthread_mutex_lock(&bufmgr_gem->lock);
1338 if (atomic_dec_and_test(&bo_gem->refcount)) {
1339 drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
1340 drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
1343 pthread_mutex_unlock(&bufmgr_gem->lock);
1347 static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
1349 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1350 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1351 struct drm_i915_gem_set_domain set_domain;
1354 if (bo_gem->is_userptr) {
1355 /* Return the same user ptr */
1356 bo->virtual = bo_gem->user_virtual;
1360 pthread_mutex_lock(&bufmgr_gem->lock);
1362 if (bo_gem->map_count++ == 0)
1363 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
1365 if (!bo_gem->mem_virtual) {
1366 struct drm_i915_gem_mmap mmap_arg;
1368 DBG("bo_map: %d (%s), map_count=%d\n",
1369 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
1372 mmap_arg.handle = bo_gem->gem_handle;
1373 mmap_arg.size = bo->size;
1374 ret = drmIoctl(bufmgr_gem->fd,
1375 DRM_IOCTL_I915_GEM_MMAP,
1379 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1380 __FILE__, __LINE__, bo_gem->gem_handle,
1381 bo_gem->name, strerror(errno));
1382 if (--bo_gem->map_count == 0)
1383 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1384 pthread_mutex_unlock(&bufmgr_gem->lock);
1387 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
1388 bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
1390 DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1391 bo_gem->mem_virtual);
1392 bo->virtual = bo_gem->mem_virtual;
1394 memclear(set_domain);
1395 set_domain.handle = bo_gem->gem_handle;
1396 set_domain.read_domains = I915_GEM_DOMAIN_CPU;
1398 set_domain.write_domain = I915_GEM_DOMAIN_CPU;
1400 set_domain.write_domain = 0;
1401 ret = drmIoctl(bufmgr_gem->fd,
1402 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1405 DBG("%s:%d: Error setting to CPU domain %d: %s\n",
1406 __FILE__, __LINE__, bo_gem->gem_handle,
1411 bo_gem->mapped_cpu_write = true;
1413 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1414 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->mem_virtual, bo->size));
1415 pthread_mutex_unlock(&bufmgr_gem->lock);
1421 map_gtt(drm_intel_bo *bo)
1423 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1424 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1427 if (bo_gem->is_userptr)
1430 if (bo_gem->map_count++ == 0)
1431 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
1433 /* Get a mapping of the buffer if we haven't before. */
1434 if (bo_gem->gtt_virtual == NULL) {
1435 struct drm_i915_gem_mmap_gtt mmap_arg;
1437 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
1438 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
1441 mmap_arg.handle = bo_gem->gem_handle;
1443 /* Get the fake offset back... */
1444 ret = drmIoctl(bufmgr_gem->fd,
1445 DRM_IOCTL_I915_GEM_MMAP_GTT,
1449 DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
1451 bo_gem->gem_handle, bo_gem->name,
1453 if (--bo_gem->map_count == 0)
1454 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1459 bo_gem->gtt_virtual = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
1460 MAP_SHARED, bufmgr_gem->fd,
1462 if (bo_gem->gtt_virtual == MAP_FAILED) {
1463 bo_gem->gtt_virtual = NULL;
1465 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1467 bo_gem->gem_handle, bo_gem->name,
1469 if (--bo_gem->map_count == 0)
1470 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1475 bo->virtual = bo_gem->gtt_virtual;
1477 DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1478 bo_gem->gtt_virtual);
1484 drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
1486 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1487 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1488 struct drm_i915_gem_set_domain set_domain;
1491 pthread_mutex_lock(&bufmgr_gem->lock);
1495 pthread_mutex_unlock(&bufmgr_gem->lock);
1499 /* Now move it to the GTT domain so that the GPU and CPU
1500 * caches are flushed and the GPU isn't actively using the
1503 * The pagefault handler does this domain change for us when
1504 * it has unbound the BO from the GTT, but it's up to us to
1505 * tell it when we're about to use things if we had done
1506 * rendering and it still happens to be bound to the GTT.
1508 memclear(set_domain);
1509 set_domain.handle = bo_gem->gem_handle;
1510 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1511 set_domain.write_domain = I915_GEM_DOMAIN_GTT;
1512 ret = drmIoctl(bufmgr_gem->fd,
1513 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1516 DBG("%s:%d: Error setting domain %d: %s\n",
1517 __FILE__, __LINE__, bo_gem->gem_handle,
1521 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1522 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->gtt_virtual, bo->size));
1523 pthread_mutex_unlock(&bufmgr_gem->lock);
1529 * Performs a mapping of the buffer object like the normal GTT
1530 * mapping, but avoids waiting for the GPU to be done reading from or
1531 * rendering to the buffer.
1533 * This is used in the implementation of GL_ARB_map_buffer_range: The
1534 * user asks to create a buffer, then does a mapping, fills some
1535 * space, runs a drawing command, then asks to map it again without
1536 * synchronizing because it guarantees that it won't write over the
1537 * data that the GPU is busy using (or, more specifically, that if it
1538 * does write over the data, it acknowledges that rendering is
1543 drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo)
1545 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1546 #ifdef HAVE_VALGRIND
1547 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1551 /* If the CPU cache isn't coherent with the GTT, then use a
1552 * regular synchronized mapping. The problem is that we don't
1553 * track where the buffer was last used on the CPU side in
1554 * terms of drm_intel_bo_map vs drm_intel_gem_bo_map_gtt, so
1555 * we would potentially corrupt the buffer even when the user
1556 * does reasonable things.
1558 if (!bufmgr_gem->has_llc)
1559 return drm_intel_gem_bo_map_gtt(bo);
1561 pthread_mutex_lock(&bufmgr_gem->lock);
1565 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1566 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->gtt_virtual, bo->size));
1569 pthread_mutex_unlock(&bufmgr_gem->lock);
1574 static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
1576 drm_intel_bufmgr_gem *bufmgr_gem;
1577 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1583 if (bo_gem->is_userptr)
1586 bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1588 pthread_mutex_lock(&bufmgr_gem->lock);
1590 if (bo_gem->map_count <= 0) {
1591 DBG("attempted to unmap an unmapped bo\n");
1592 pthread_mutex_unlock(&bufmgr_gem->lock);
1593 /* Preserve the old behaviour of just treating this as a
1594 * no-op rather than reporting the error.
1599 if (bo_gem->mapped_cpu_write) {
1600 struct drm_i915_gem_sw_finish sw_finish;
1602 /* Cause a flush to happen if the buffer's pinned for
1603 * scanout, so the results show up in a timely manner.
1604 * Unlike GTT set domains, this only does work if the
1605 * buffer should be scanout-related.
1607 memclear(sw_finish);
1608 sw_finish.handle = bo_gem->gem_handle;
1609 ret = drmIoctl(bufmgr_gem->fd,
1610 DRM_IOCTL_I915_GEM_SW_FINISH,
1612 ret = ret == -1 ? -errno : 0;
1614 bo_gem->mapped_cpu_write = false;
1617 /* We need to unmap after every innovation as we cannot track
1618 * an open vma for every bo as that will exhaasut the system
1619 * limits and cause later failures.
1621 if (--bo_gem->map_count == 0) {
1622 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1623 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1626 pthread_mutex_unlock(&bufmgr_gem->lock);
1632 drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
1634 return drm_intel_gem_bo_unmap(bo);
1638 drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
1639 unsigned long size, const void *data)
1641 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1642 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1643 struct drm_i915_gem_pwrite pwrite;
1646 if (bo_gem->is_userptr)
1650 pwrite.handle = bo_gem->gem_handle;
1651 pwrite.offset = offset;
1653 pwrite.data_ptr = (uint64_t) (uintptr_t) data;
1654 ret = drmIoctl(bufmgr_gem->fd,
1655 DRM_IOCTL_I915_GEM_PWRITE,
1659 DBG("%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
1660 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1661 (int)size, strerror(errno));
1668 drm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
1670 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1671 struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id;
1674 memclear(get_pipe_from_crtc_id);
1675 get_pipe_from_crtc_id.crtc_id = crtc_id;
1676 ret = drmIoctl(bufmgr_gem->fd,
1677 DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID,
1678 &get_pipe_from_crtc_id);
1680 /* We return -1 here to signal that we don't
1681 * know which pipe is associated with this crtc.
1682 * This lets the caller know that this information
1683 * isn't available; using the wrong pipe for
1684 * vblank waiting can cause the chipset to lock up
1689 return get_pipe_from_crtc_id.pipe;
1693 drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
1694 unsigned long size, void *data)
1696 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1697 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1698 struct drm_i915_gem_pread pread;
1701 if (bo_gem->is_userptr)
1705 pread.handle = bo_gem->gem_handle;
1706 pread.offset = offset;
1708 pread.data_ptr = (uint64_t) (uintptr_t) data;
1709 ret = drmIoctl(bufmgr_gem->fd,
1710 DRM_IOCTL_I915_GEM_PREAD,
1714 DBG("%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
1715 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1716 (int)size, strerror(errno));
1722 /** Waits for all GPU rendering with the object to have completed. */
1724 drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo)
1726 drm_intel_gem_bo_start_gtt_access(bo, 1);
1730 * Waits on a BO for the given amount of time.
1732 * @bo: buffer object to wait for
1733 * @timeout_ns: amount of time to wait in nanoseconds.
1734 * If value is less than 0, an infinite wait will occur.
1736 * Returns 0 if the wait was successful ie. the last batch referencing the
1737 * object has completed within the allotted time. Otherwise some negative return
1738 * value describes the error. Of particular interest is -ETIME when the wait has
1739 * failed to yield the desired result.
1741 * Similar to drm_intel_gem_bo_wait_rendering except a timeout parameter allows
1742 * the operation to give up after a certain amount of time. Another subtle
1743 * difference is the internal locking semantics are different (this variant does
1744 * not hold the lock for the duration of the wait). This makes the wait subject
1745 * to a larger userspace race window.
1747 * The implementation shall wait until the object is no longer actively
1748 * referenced within a batch buffer at the time of the call. The wait will
1749 * not guarantee that the buffer is re-issued via another thread, or an flinked
1750 * handle. Userspace must make sure this race does not occur if such precision
1753 * Note that some kernels have broken the inifite wait for negative values
1754 * promise, upgrade to latest stable kernels if this is the case.
1757 drm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns)
1759 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1760 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1761 struct drm_i915_gem_wait wait;
1764 if (!bufmgr_gem->has_wait_timeout) {
1765 DBG("%s:%d: Timed wait is not supported. Falling back to "
1766 "infinite wait\n", __FILE__, __LINE__);
1768 drm_intel_gem_bo_wait_rendering(bo);
1771 return drm_intel_gem_bo_busy(bo) ? -ETIME : 0;
1776 wait.bo_handle = bo_gem->gem_handle;
1777 wait.timeout_ns = timeout_ns;
1778 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
1786 * Sets the object to the GTT read and possibly write domain, used by the X
1787 * 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt().
1789 * In combination with drm_intel_gem_bo_pin() and manual fence management, we
1790 * can do tiled pixmaps this way.
1793 drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
1795 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1796 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1797 struct drm_i915_gem_set_domain set_domain;
1800 memclear(set_domain);
1801 set_domain.handle = bo_gem->gem_handle;
1802 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1803 set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0;
1804 ret = drmIoctl(bufmgr_gem->fd,
1805 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1808 DBG("%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
1809 __FILE__, __LINE__, bo_gem->gem_handle,
1810 set_domain.read_domains, set_domain.write_domain,
1816 drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
1818 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1819 struct drm_gem_close close_bo;
1822 free(bufmgr_gem->exec2_objects);
1823 free(bufmgr_gem->exec_objects);
1824 free(bufmgr_gem->exec_bos);
1825 free(bufmgr_gem->aub_filename);
1827 pthread_mutex_destroy(&bufmgr_gem->lock);
1829 /* Free any cached buffer objects we were going to reuse */
1830 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
1831 struct drm_intel_gem_bo_bucket *bucket =
1832 &bufmgr_gem->cache_bucket[i];
1833 drm_intel_bo_gem *bo_gem;
1835 while (!DRMLISTEMPTY(&bucket->head)) {
1836 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1837 bucket->head.next, head);
1838 DRMLISTDEL(&bo_gem->head);
1840 drm_intel_gem_bo_free(&bo_gem->bo);
1844 /* Release userptr bo kept hanging around for optimisation. */
1845 if (bufmgr_gem->userptr_active.ptr) {
1847 close_bo.handle = bufmgr_gem->userptr_active.handle;
1848 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close_bo);
1849 free(bufmgr_gem->userptr_active.ptr);
1852 "Failed to release test userptr object! (%d) "
1853 "i915 kernel driver may not be sane!\n", errno);
1860 * Adds the target buffer to the validation list and adds the relocation
1861 * to the reloc_buffer's relocation list.
1863 * The relocation entry at the given offset must already contain the
1864 * precomputed relocation value, because the kernel will optimize out
1865 * the relocation entry write when the buffer hasn't moved from the
1866 * last known offset in target_bo.
1869 do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1870 drm_intel_bo *target_bo, uint32_t target_offset,
1871 uint32_t read_domains, uint32_t write_domain,
1874 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1875 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1876 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
1877 bool fenced_command;
1879 if (bo_gem->has_error)
1882 if (target_bo_gem->has_error) {
1883 bo_gem->has_error = true;
1887 /* We never use HW fences for rendering on 965+ */
1888 if (bufmgr_gem->gen >= 4)
1891 fenced_command = need_fence;
1892 if (target_bo_gem->tiling_mode == I915_TILING_NONE)
1895 /* Create a new relocation list if needed */
1896 if (bo_gem->relocs == NULL && drm_intel_setup_reloc_list(bo))
1899 /* Check overflow */
1900 assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
1903 assert(offset <= bo->size - 4);
1904 assert((write_domain & (write_domain - 1)) == 0);
1906 /* An object needing a fence is a tiled buffer, so it won't have
1907 * relocs to other buffers.
1910 assert(target_bo_gem->reloc_count == 0);
1911 target_bo_gem->reloc_tree_fences = 1;
1914 /* Make sure that we're not adding a reloc to something whose size has
1915 * already been accounted for.
1917 assert(!bo_gem->used_as_reloc_target);
1918 if (target_bo_gem != bo_gem) {
1919 target_bo_gem->used_as_reloc_target = true;
1920 bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
1921 bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
1924 bo_gem->relocs[bo_gem->reloc_count].offset = offset;
1925 bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
1926 bo_gem->relocs[bo_gem->reloc_count].target_handle =
1927 target_bo_gem->gem_handle;
1928 bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
1929 bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
1930 bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset64;
1932 bo_gem->reloc_target_info[bo_gem->reloc_count].bo = target_bo;
1933 if (target_bo != bo)
1934 drm_intel_gem_bo_reference(target_bo);
1936 bo_gem->reloc_target_info[bo_gem->reloc_count].flags =
1937 DRM_INTEL_RELOC_FENCE;
1939 bo_gem->reloc_target_info[bo_gem->reloc_count].flags = 0;
1941 bo_gem->reloc_count++;
1947 drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1948 drm_intel_bo *target_bo, uint32_t target_offset,
1949 uint32_t read_domains, uint32_t write_domain)
1951 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
1953 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
1954 read_domains, write_domain,
1955 !bufmgr_gem->fenced_relocs);
1959 drm_intel_gem_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
1960 drm_intel_bo *target_bo,
1961 uint32_t target_offset,
1962 uint32_t read_domains, uint32_t write_domain)
1964 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
1965 read_domains, write_domain, true);
1969 drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo)
1971 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1973 return bo_gem->reloc_count;
1977 * Removes existing relocation entries in the BO after "start".
1979 * This allows a user to avoid a two-step process for state setup with
1980 * counting up all the buffer objects and doing a
1981 * drm_intel_bufmgr_check_aperture_space() before emitting any of the
1982 * relocations for the state setup. Instead, save the state of the
1983 * batchbuffer including drm_intel_gem_get_reloc_count(), emit all the
1984 * state, and then check if it still fits in the aperture.
1986 * Any further drm_intel_bufmgr_check_aperture_space() queries
1987 * involving this buffer in the tree are undefined after this call.
1990 drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start)
1992 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1993 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1995 struct timespec time;
1997 clock_gettime(CLOCK_MONOTONIC, &time);
1999 assert(bo_gem->reloc_count >= start);
2001 /* Unreference the cleared target buffers */
2002 pthread_mutex_lock(&bufmgr_gem->lock);
2004 for (i = start; i < bo_gem->reloc_count; i++) {
2005 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) bo_gem->reloc_target_info[i].bo;
2006 if (&target_bo_gem->bo != bo) {
2007 bo_gem->reloc_tree_fences -= target_bo_gem->reloc_tree_fences;
2008 drm_intel_gem_bo_unreference_locked_timed(&target_bo_gem->bo,
2012 bo_gem->reloc_count = start;
2014 pthread_mutex_unlock(&bufmgr_gem->lock);
2019 * Walk the tree of relocations rooted at BO and accumulate the list of
2020 * validations to be performed and update the relocation buffers with
2021 * index values into the validation list.
2024 drm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
2026 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2029 if (bo_gem->relocs == NULL)
2032 for (i = 0; i < bo_gem->reloc_count; i++) {
2033 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
2035 if (target_bo == bo)
2038 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
2040 /* Continue walking the tree depth-first. */
2041 drm_intel_gem_bo_process_reloc(target_bo);
2043 /* Add the target to the validate list */
2044 drm_intel_add_validate_buffer(target_bo);
2049 drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo)
2051 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
2054 if (bo_gem->relocs == NULL)
2057 for (i = 0; i < bo_gem->reloc_count; i++) {
2058 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
2061 if (target_bo == bo)
2064 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
2066 /* Continue walking the tree depth-first. */
2067 drm_intel_gem_bo_process_reloc2(target_bo);
2069 need_fence = (bo_gem->reloc_target_info[i].flags &
2070 DRM_INTEL_RELOC_FENCE);
2072 /* Add the target to the validate list */
2073 drm_intel_add_validate_buffer2(target_bo, need_fence);
2079 drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem)
2083 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2084 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
2085 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2087 /* Update the buffer offset */
2088 if (bufmgr_gem->exec_objects[i].offset != bo->offset64) {
2089 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
2090 bo_gem->gem_handle, bo_gem->name, bo->offset64,
2091 (unsigned long long)bufmgr_gem->exec_objects[i].
2093 bo->offset64 = bufmgr_gem->exec_objects[i].offset;
2094 bo->offset = bufmgr_gem->exec_objects[i].offset;
2100 drm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem)
2104 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2105 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
2106 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
2108 /* Update the buffer offset */
2109 if (bufmgr_gem->exec2_objects[i].offset != bo->offset64) {
2110 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
2111 bo_gem->gem_handle, bo_gem->name, bo->offset64,
2112 (unsigned long long)bufmgr_gem->exec2_objects[i].offset);
2113 bo->offset64 = bufmgr_gem->exec2_objects[i].offset;
2114 bo->offset = bufmgr_gem->exec2_objects[i].offset;
2120 aub_out(drm_intel_bufmgr_gem *bufmgr_gem, uint32_t data)
2122 fwrite(&data, 1, 4, bufmgr_gem->aub_file);
2126 aub_out_data(drm_intel_bufmgr_gem *bufmgr_gem, void *data, size_t size)
2128 fwrite(data, 1, size, bufmgr_gem->aub_file);
2132 aub_write_bo_data(drm_intel_bo *bo, uint32_t offset, uint32_t size)
2134 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2135 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2139 data = malloc(bo->size);
2140 drm_intel_bo_get_subdata(bo, offset, size, data);
2142 /* Easy mode: write out bo with no relocations */
2143 if (!bo_gem->reloc_count) {
2144 aub_out_data(bufmgr_gem, data, size);
2149 /* Otherwise, handle the relocations while writing. */
2150 for (i = 0; i < size / 4; i++) {
2152 for (r = 0; r < bo_gem->reloc_count; r++) {
2153 struct drm_i915_gem_relocation_entry *reloc;
2154 drm_intel_reloc_target *info;
2156 reloc = &bo_gem->relocs[r];
2157 info = &bo_gem->reloc_target_info[r];
2159 if (reloc->offset == offset + i * 4) {
2160 drm_intel_bo_gem *target_gem;
2163 target_gem = (drm_intel_bo_gem *)info->bo;
2166 val += target_gem->aub_offset;
2168 aub_out(bufmgr_gem, val);
2173 if (r == bo_gem->reloc_count) {
2174 /* no relocation, just the data */
2175 aub_out(bufmgr_gem, data[i]);
2183 aub_bo_get_address(drm_intel_bo *bo)
2185 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2186 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2188 /* Give the object a graphics address in the AUB file. We
2189 * don't just use the GEM object address because we do AUB
2190 * dumping before execution -- we want to successfully log
2191 * when the hardware might hang, and we might even want to aub
2192 * capture for a driver trying to execute on a different
2193 * generation of hardware by disabling the actual kernel exec
2196 bo_gem->aub_offset = bufmgr_gem->aub_offset;
2197 bufmgr_gem->aub_offset += bo->size;
2198 /* XXX: Handle aperture overflow. */
2199 assert(bufmgr_gem->aub_offset < 256 * 1024 * 1024);
2203 aub_write_trace_block(drm_intel_bo *bo, uint32_t type, uint32_t subtype,
2204 uint32_t offset, uint32_t size)
2206 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2207 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2210 CMD_AUB_TRACE_HEADER_BLOCK |
2211 ((bufmgr_gem->gen >= 8 ? 6 : 5) - 2));
2213 AUB_TRACE_MEMTYPE_GTT | type | AUB_TRACE_OP_DATA_WRITE);
2214 aub_out(bufmgr_gem, subtype);
2215 aub_out(bufmgr_gem, bo_gem->aub_offset + offset);
2216 aub_out(bufmgr_gem, size);
2217 if (bufmgr_gem->gen >= 8)
2218 aub_out(bufmgr_gem, 0);
2219 aub_write_bo_data(bo, offset, size);
2223 * Break up large objects into multiple writes. Otherwise a 128kb VBO
2224 * would overflow the 16 bits of size field in the packet header and
2225 * everything goes badly after that.
2228 aub_write_large_trace_block(drm_intel_bo *bo, uint32_t type, uint32_t subtype,
2229 uint32_t offset, uint32_t size)
2231 uint32_t block_size;
2232 uint32_t sub_offset;
2234 for (sub_offset = 0; sub_offset < size; sub_offset += block_size) {
2235 block_size = size - sub_offset;
2237 if (block_size > 8 * 4096)
2238 block_size = 8 * 4096;
2240 aub_write_trace_block(bo, type, subtype, offset + sub_offset,
2246 aub_write_bo(drm_intel_bo *bo)
2248 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2249 uint32_t offset = 0;
2252 aub_bo_get_address(bo);
2254 /* Write out each annotated section separately. */
2255 for (i = 0; i < bo_gem->aub_annotation_count; ++i) {
2256 drm_intel_aub_annotation *annotation =
2257 &bo_gem->aub_annotations[i];
2258 uint32_t ending_offset = annotation->ending_offset;
2259 if (ending_offset > bo->size)
2260 ending_offset = bo->size;
2261 if (ending_offset > offset) {
2262 aub_write_large_trace_block(bo, annotation->type,
2263 annotation->subtype,
2265 ending_offset - offset);
2266 offset = ending_offset;
2270 /* Write out any remaining unannotated data */
2271 if (offset < bo->size) {
2272 aub_write_large_trace_block(bo, AUB_TRACE_TYPE_NOTYPE, 0,
2273 offset, bo->size - offset);
2278 * Make a ringbuffer on fly and dump it
2281 aub_build_dump_ringbuffer(drm_intel_bufmgr_gem *bufmgr_gem,
2282 uint32_t batch_buffer, int ring_flag)
2284 uint32_t ringbuffer[4096];
2285 int ring = AUB_TRACE_TYPE_RING_PRB0; /* The default ring */
2288 if (ring_flag == I915_EXEC_BSD)
2289 ring = AUB_TRACE_TYPE_RING_PRB1;
2290 else if (ring_flag == I915_EXEC_BLT)
2291 ring = AUB_TRACE_TYPE_RING_PRB2;
2293 /* Make a ring buffer to execute our batchbuffer. */
2294 memset(ringbuffer, 0, sizeof(ringbuffer));
2295 if (bufmgr_gem->gen >= 8) {
2296 ringbuffer[ring_count++] = AUB_MI_BATCH_BUFFER_START | (3 - 2);
2297 ringbuffer[ring_count++] = batch_buffer;
2298 ringbuffer[ring_count++] = 0;
2300 ringbuffer[ring_count++] = AUB_MI_BATCH_BUFFER_START;
2301 ringbuffer[ring_count++] = batch_buffer;
2304 /* Write out the ring. This appears to trigger execution of
2305 * the ring in the simulator.
2308 CMD_AUB_TRACE_HEADER_BLOCK |
2309 ((bufmgr_gem->gen >= 8 ? 6 : 5) - 2));
2311 AUB_TRACE_MEMTYPE_GTT | ring | AUB_TRACE_OP_COMMAND_WRITE);
2312 aub_out(bufmgr_gem, 0); /* general/surface subtype */
2313 aub_out(bufmgr_gem, bufmgr_gem->aub_offset);
2314 aub_out(bufmgr_gem, ring_count * 4);
2315 if (bufmgr_gem->gen >= 8)
2316 aub_out(bufmgr_gem, 0);
2318 /* FIXME: Need some flush operations here? */
2319 aub_out_data(bufmgr_gem, ringbuffer, ring_count * 4);
2321 /* Update offset pointer */
2322 bufmgr_gem->aub_offset += 4096;
2326 drm_intel_gem_bo_aub_dump_bmp(drm_intel_bo *bo,
2327 int x1, int y1, int width, int height,
2328 enum aub_dump_bmp_format format,
2329 int pitch, int offset)
2331 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2332 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
2336 case AUB_DUMP_BMP_FORMAT_8BIT:
2339 case AUB_DUMP_BMP_FORMAT_ARGB_4444:
2342 case AUB_DUMP_BMP_FORMAT_ARGB_0888:
2343 case AUB_DUMP_BMP_FORMAT_ARGB_8888:
2347 printf("Unknown AUB dump format %d\n", format);
2351 if (!bufmgr_gem->aub_file)
2354 aub_out(bufmgr_gem, CMD_AUB_DUMP_BMP | 4);
2355 aub_out(bufmgr_gem, (y1 << 16) | x1);
2360 aub_out(bufmgr_gem, (height << 16) | width);
2361 aub_out(bufmgr_gem, bo_gem->aub_offset + offset);
2363 ((bo_gem->tiling_mode != I915_TILING_NONE) ? (1 << 2) : 0) |
2364 ((bo_gem->tiling_mode == I915_TILING_Y) ? (1 << 3) : 0));
2368 aub_exec(drm_intel_bo *bo, int ring_flag, int used)
2370 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2371 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2373 bool batch_buffer_needs_annotations;
2375 if (!bufmgr_gem->aub_file)
2378 /* If batch buffer is not annotated, annotate it the best we
2381 batch_buffer_needs_annotations = bo_gem->aub_annotation_count == 0;
2382 if (batch_buffer_needs_annotations) {
2383 drm_intel_aub_annotation annotations[2] = {
2384 { AUB_TRACE_TYPE_BATCH, 0, used },
2385 { AUB_TRACE_TYPE_NOTYPE, 0, bo->size }
2387 drm_intel_bufmgr_gem_set_aub_annotations(bo, annotations, 2);
2390 /* Write out all buffers to AUB memory */
2391 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2392 aub_write_bo(bufmgr_gem->exec_bos[i]);
2395 /* Remove any annotations we added */
2396 if (batch_buffer_needs_annotations)
2397 drm_intel_bufmgr_gem_set_aub_annotations(bo, NULL, 0);
2399 /* Dump ring buffer */
2400 aub_build_dump_ringbuffer(bufmgr_gem, bo_gem->aub_offset, ring_flag);
2402 fflush(bufmgr_gem->aub_file);
2405 * One frame has been dumped. So reset the aub_offset for the next frame.
2407 * FIXME: Can we do this?
2409 bufmgr_gem->aub_offset = 0x10000;
2413 drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
2414 drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
2416 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2417 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2418 struct drm_i915_gem_execbuffer execbuf;
2421 if (bo_gem->has_error)
2424 pthread_mutex_lock(&bufmgr_gem->lock);
2425 /* Update indices and set up the validate list. */
2426 drm_intel_gem_bo_process_reloc(bo);
2428 /* Add the batch buffer to the validation list. There are no
2429 * relocations pointing to it.
2431 drm_intel_add_validate_buffer(bo);
2434 execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects;
2435 execbuf.buffer_count = bufmgr_gem->exec_count;
2436 execbuf.batch_start_offset = 0;
2437 execbuf.batch_len = used;
2438 execbuf.cliprects_ptr = (uintptr_t) cliprects;
2439 execbuf.num_cliprects = num_cliprects;
2443 ret = drmIoctl(bufmgr_gem->fd,
2444 DRM_IOCTL_I915_GEM_EXECBUFFER,
2448 if (errno == ENOSPC) {
2449 DBG("Execbuffer fails to pin. "
2450 "Estimate: %u. Actual: %u. Available: %u\n",
2451 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
2454 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
2457 (unsigned int)bufmgr_gem->gtt_size);
2460 drm_intel_update_buffer_offsets(bufmgr_gem);
2462 if (bufmgr_gem->bufmgr.debug)
2463 drm_intel_gem_dump_validation_list(bufmgr_gem);
2465 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2466 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
2467 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2469 bo_gem->idle = false;
2471 /* Disconnect the buffer from the validate list */
2472 bo_gem->validate_index = -1;
2473 bufmgr_gem->exec_bos[i] = NULL;
2475 bufmgr_gem->exec_count = 0;
2476 pthread_mutex_unlock(&bufmgr_gem->lock);
2482 do_exec2(drm_intel_bo *bo, int used, drm_intel_context *ctx,
2483 drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
2486 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
2487 struct drm_i915_gem_execbuffer2 execbuf;
2491 switch (flags & 0x7) {
2495 if (!bufmgr_gem->has_blt)
2499 if (!bufmgr_gem->has_bsd)
2502 case I915_EXEC_VEBOX:
2503 if (!bufmgr_gem->has_vebox)
2506 case I915_EXEC_RENDER:
2507 case I915_EXEC_DEFAULT:
2511 pthread_mutex_lock(&bufmgr_gem->lock);
2512 /* Update indices and set up the validate list. */
2513 drm_intel_gem_bo_process_reloc2(bo);
2515 /* Add the batch buffer to the validation list. There are no relocations
2518 drm_intel_add_validate_buffer2(bo, 0);
2521 execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec2_objects;
2522 execbuf.buffer_count = bufmgr_gem->exec_count;
2523 execbuf.batch_start_offset = 0;
2524 execbuf.batch_len = used;
2525 execbuf.cliprects_ptr = (uintptr_t)cliprects;
2526 execbuf.num_cliprects = num_cliprects;
2529 execbuf.flags = flags;
2531 i915_execbuffer2_set_context_id(execbuf, 0);
2533 i915_execbuffer2_set_context_id(execbuf, ctx->ctx_id);
2536 aub_exec(bo, flags, used);
2538 if (bufmgr_gem->no_exec)
2539 goto skip_execution;
2541 ret = drmIoctl(bufmgr_gem->fd,
2542 DRM_IOCTL_I915_GEM_EXECBUFFER2,
2546 if (ret == -ENOSPC) {
2547 DBG("Execbuffer fails to pin. "
2548 "Estimate: %u. Actual: %u. Available: %u\n",
2549 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
2550 bufmgr_gem->exec_count),
2551 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
2552 bufmgr_gem->exec_count),
2553 (unsigned int) bufmgr_gem->gtt_size);
2556 drm_intel_update_buffer_offsets2(bufmgr_gem);
2559 if (bufmgr_gem->bufmgr.debug)
2560 drm_intel_gem_dump_validation_list(bufmgr_gem);
2562 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2563 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
2564 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
2566 bo_gem->idle = false;
2568 /* Disconnect the buffer from the validate list */
2569 bo_gem->validate_index = -1;
2570 bufmgr_gem->exec_bos[i] = NULL;
2572 bufmgr_gem->exec_count = 0;
2573 pthread_mutex_unlock(&bufmgr_gem->lock);
2579 drm_intel_gem_bo_exec2(drm_intel_bo *bo, int used,
2580 drm_clip_rect_t *cliprects, int num_cliprects,
2583 return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4,
2588 drm_intel_gem_bo_mrb_exec2(drm_intel_bo *bo, int used,
2589 drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
2592 return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4,
2597 drm_intel_gem_bo_context_exec(drm_intel_bo *bo, drm_intel_context *ctx,
2598 int used, unsigned int flags)
2600 return do_exec2(bo, used, ctx, NULL, 0, 0, flags);
2604 drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
2606 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2607 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2608 struct drm_i915_gem_pin pin;
2612 pin.handle = bo_gem->gem_handle;
2613 pin.alignment = alignment;
2615 ret = drmIoctl(bufmgr_gem->fd,
2616 DRM_IOCTL_I915_GEM_PIN,
2621 bo->offset64 = pin.offset;
2622 bo->offset = pin.offset;
2627 drm_intel_gem_bo_unpin(drm_intel_bo *bo)
2629 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2630 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2631 struct drm_i915_gem_unpin unpin;
2635 unpin.handle = bo_gem->gem_handle;
2637 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
2645 drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
2646 uint32_t tiling_mode,
2649 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2650 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2651 struct drm_i915_gem_set_tiling set_tiling;
2654 if (bo_gem->global_name == 0 &&
2655 tiling_mode == bo_gem->tiling_mode &&
2656 stride == bo_gem->stride)
2659 memset(&set_tiling, 0, sizeof(set_tiling));
2661 /* set_tiling is slightly broken and overwrites the
2662 * input on the error path, so we have to open code
2665 set_tiling.handle = bo_gem->gem_handle;
2666 set_tiling.tiling_mode = tiling_mode;
2667 set_tiling.stride = stride;
2669 ret = ioctl(bufmgr_gem->fd,
2670 DRM_IOCTL_I915_GEM_SET_TILING,
2672 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
2676 bo_gem->tiling_mode = set_tiling.tiling_mode;
2677 bo_gem->swizzle_mode = set_tiling.swizzle_mode;
2678 bo_gem->stride = set_tiling.stride;
2683 drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
2686 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2687 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2690 /* Tiling with userptr surfaces is not supported
2691 * on all hardware so refuse it for time being.
2693 if (bo_gem->is_userptr)
2696 /* Linear buffers have no stride. By ensuring that we only ever use
2697 * stride 0 with linear buffers, we simplify our code.
2699 if (*tiling_mode == I915_TILING_NONE)
2702 ret = drm_intel_gem_bo_set_tiling_internal(bo, *tiling_mode, stride);
2704 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
2706 *tiling_mode = bo_gem->tiling_mode;
2711 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
2712 uint32_t * swizzle_mode)
2714 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2716 *tiling_mode = bo_gem->tiling_mode;
2717 *swizzle_mode = bo_gem->swizzle_mode;
2722 drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int size)
2724 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
2727 drm_intel_bo_gem *bo_gem;
2728 struct drm_i915_gem_get_tiling get_tiling;
2729 drmMMListHead *list;
2731 ret = drmPrimeFDToHandle(bufmgr_gem->fd, prime_fd, &handle);
2734 * See if the kernel has already returned this buffer to us. Just as
2735 * for named buffers, we must not create two bo's pointing at the same
2738 pthread_mutex_lock(&bufmgr_gem->lock);
2739 for (list = bufmgr_gem->named.next;
2740 list != &bufmgr_gem->named;
2741 list = list->next) {
2742 bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
2743 if (bo_gem->gem_handle == handle) {
2744 drm_intel_gem_bo_reference(&bo_gem->bo);
2745 pthread_mutex_unlock(&bufmgr_gem->lock);
2751 fprintf(stderr,"ret is %d %d\n", ret, errno);
2752 pthread_mutex_unlock(&bufmgr_gem->lock);
2756 bo_gem = calloc(1, sizeof(*bo_gem));
2758 pthread_mutex_unlock(&bufmgr_gem->lock);
2761 /* Determine size of bo. The fd-to-handle ioctl really should
2762 * return the size, but it doesn't. If we have kernel 3.12 or
2763 * later, we can lseek on the prime fd to get the size. Older
2764 * kernels will just fail, in which case we fall back to the
2765 * provided (estimated or guess size). */
2766 ret = lseek(prime_fd, 0, SEEK_END);
2768 bo_gem->bo.size = ret;
2770 bo_gem->bo.size = size;
2772 bo_gem->bo.handle = handle;
2773 bo_gem->bo.bufmgr = bufmgr;
2775 bo_gem->gem_handle = handle;
2777 atomic_set(&bo_gem->refcount, 1);
2779 bo_gem->name = "prime";
2780 bo_gem->validate_index = -1;
2781 bo_gem->reloc_tree_fences = 0;
2782 bo_gem->used_as_reloc_target = false;
2783 bo_gem->has_error = false;
2784 bo_gem->reusable = false;
2786 DRMINITLISTHEAD(&bo_gem->vma_list);
2787 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
2788 pthread_mutex_unlock(&bufmgr_gem->lock);
2790 memclear(get_tiling);
2791 get_tiling.handle = bo_gem->gem_handle;
2792 ret = drmIoctl(bufmgr_gem->fd,
2793 DRM_IOCTL_I915_GEM_GET_TILING,
2796 drm_intel_gem_bo_unreference(&bo_gem->bo);
2799 bo_gem->tiling_mode = get_tiling.tiling_mode;
2800 bo_gem->swizzle_mode = get_tiling.swizzle_mode;
2801 /* XXX stride is unknown */
2802 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
2808 drm_intel_bo_gem_export_to_prime(drm_intel_bo *bo, int *prime_fd)
2810 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2811 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2813 pthread_mutex_lock(&bufmgr_gem->lock);
2814 if (DRMLISTEMPTY(&bo_gem->name_list))
2815 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
2816 pthread_mutex_unlock(&bufmgr_gem->lock);
2818 if (drmPrimeHandleToFD(bufmgr_gem->fd, bo_gem->gem_handle,
2819 DRM_CLOEXEC, prime_fd) != 0)
2822 bo_gem->reusable = false;
2828 drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
2830 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2831 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2834 if (!bo_gem->global_name) {
2835 struct drm_gem_flink flink;
2838 flink.handle = bo_gem->gem_handle;
2840 pthread_mutex_lock(&bufmgr_gem->lock);
2842 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink);
2844 pthread_mutex_unlock(&bufmgr_gem->lock);
2848 bo_gem->global_name = flink.name;
2849 bo_gem->reusable = false;
2851 if (DRMLISTEMPTY(&bo_gem->name_list))
2852 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
2853 pthread_mutex_unlock(&bufmgr_gem->lock);
2856 *name = bo_gem->global_name;
2861 * Enables unlimited caching of buffer objects for reuse.
2863 * This is potentially very memory expensive, as the cache at each bucket
2864 * size is only bounded by how many buffers of that size we've managed to have
2865 * in flight at once.
2868 drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
2870 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
2872 bufmgr_gem->bo_reuse = true;
2876 * Enable use of fenced reloc type.
2878 * New code should enable this to avoid unnecessary fence register
2879 * allocation. If this option is not enabled, all relocs will have fence
2880 * register allocated.
2883 drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr)
2885 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2887 if (bufmgr_gem->bufmgr.bo_exec == drm_intel_gem_bo_exec2)
2888 bufmgr_gem->fenced_relocs = true;
2892 * Return the additional aperture space required by the tree of buffer objects
2896 drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
2898 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2902 if (bo == NULL || bo_gem->included_in_check_aperture)
2906 bo_gem->included_in_check_aperture = true;
2908 for (i = 0; i < bo_gem->reloc_count; i++)
2910 drm_intel_gem_bo_get_aperture_space(bo_gem->
2911 reloc_target_info[i].bo);
2917 * Count the number of buffers in this list that need a fence reg
2919 * If the count is greater than the number of available regs, we'll have
2920 * to ask the caller to resubmit a batch with fewer tiled buffers.
2922 * This function over-counts if the same buffer is used multiple times.
2925 drm_intel_gem_total_fences(drm_intel_bo ** bo_array, int count)
2928 unsigned int total = 0;
2930 for (i = 0; i < count; i++) {
2931 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
2936 total += bo_gem->reloc_tree_fences;
2942 * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready
2943 * for the next drm_intel_bufmgr_check_aperture_space() call.
2946 drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo)
2948 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2951 if (bo == NULL || !bo_gem->included_in_check_aperture)
2954 bo_gem->included_in_check_aperture = false;
2956 for (i = 0; i < bo_gem->reloc_count; i++)
2957 drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->
2958 reloc_target_info[i].bo);
2962 * Return a conservative estimate for the amount of aperture required
2963 * for a collection of buffers. This may double-count some buffers.
2966 drm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count)
2969 unsigned int total = 0;
2971 for (i = 0; i < count; i++) {
2972 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
2974 total += bo_gem->reloc_tree_size;
2980 * Return the amount of aperture needed for a collection of buffers.
2981 * This avoids double counting any buffers, at the cost of looking
2982 * at every buffer in the set.
2985 drm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count)
2988 unsigned int total = 0;
2990 for (i = 0; i < count; i++) {
2991 total += drm_intel_gem_bo_get_aperture_space(bo_array[i]);
2992 /* For the first buffer object in the array, we get an
2993 * accurate count back for its reloc_tree size (since nothing
2994 * had been flagged as being counted yet). We can save that
2995 * value out as a more conservative reloc_tree_size that
2996 * avoids double-counting target buffers. Since the first
2997 * buffer happens to usually be the batch buffer in our
2998 * callers, this can pull us back from doing the tree
2999 * walk on every new batch emit.
3002 drm_intel_bo_gem *bo_gem =
3003 (drm_intel_bo_gem *) bo_array[i];
3004 bo_gem->reloc_tree_size = total;
3008 for (i = 0; i < count; i++)
3009 drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]);
3014 * Return -1 if the batchbuffer should be flushed before attempting to
3015 * emit rendering referencing the buffers pointed to by bo_array.
3017 * This is required because if we try to emit a batchbuffer with relocations
3018 * to a tree of buffers that won't simultaneously fit in the aperture,
3019 * the rendering will return an error at a point where the software is not
3020 * prepared to recover from it.
3022 * However, we also want to emit the batchbuffer significantly before we reach
3023 * the limit, as a series of batchbuffers each of which references buffers
3024 * covering almost all of the aperture means that at each emit we end up
3025 * waiting to evict a buffer from the last rendering, and we get synchronous
3026 * performance. By emitting smaller batchbuffers, we eat some CPU overhead to
3027 * get better parallelism.
3030 drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
3032 drm_intel_bufmgr_gem *bufmgr_gem =
3033 (drm_intel_bufmgr_gem *) bo_array[0]->bufmgr;
3034 unsigned int total = 0;
3035 unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
3038 /* Check for fence reg constraints if necessary */
3039 if (bufmgr_gem->available_fences) {
3040 total_fences = drm_intel_gem_total_fences(bo_array, count);
3041 if (total_fences > bufmgr_gem->available_fences)
3045 total = drm_intel_gem_estimate_batch_space(bo_array, count);
3047 if (total > threshold)
3048 total = drm_intel_gem_compute_batch_space(bo_array, count);
3050 if (total > threshold) {
3051 DBG("check_space: overflowed available aperture, "
3053 total / 1024, (int)bufmgr_gem->gtt_size / 1024);
3056 DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024,
3057 (int)bufmgr_gem->gtt_size / 1024);
3063 * Disable buffer reuse for objects which are shared with the kernel
3064 * as scanout buffers
3067 drm_intel_gem_bo_disable_reuse(drm_intel_bo *bo)
3069 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
3071 bo_gem->reusable = false;
3076 drm_intel_gem_bo_is_reusable(drm_intel_bo *bo)
3078 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
3080 return bo_gem->reusable;
3084 _drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
3086 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
3089 for (i = 0; i < bo_gem->reloc_count; i++) {
3090 if (bo_gem->reloc_target_info[i].bo == target_bo)
3092 if (bo == bo_gem->reloc_target_info[i].bo)
3094 if (_drm_intel_gem_bo_references(bo_gem->reloc_target_info[i].bo,
3102 /** Return true if target_bo is referenced by bo's relocation tree. */
3104 drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
3106 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
3108 if (bo == NULL || target_bo == NULL)
3110 if (target_bo_gem->used_as_reloc_target)
3111 return _drm_intel_gem_bo_references(bo, target_bo);
3116 add_bucket(drm_intel_bufmgr_gem *bufmgr_gem, int size)
3118 unsigned int i = bufmgr_gem->num_buckets;
3120 assert(i < ARRAY_SIZE(bufmgr_gem->cache_bucket));
3122 DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head);
3123 bufmgr_gem->cache_bucket[i].size = size;
3124 bufmgr_gem->num_buckets++;
3128 init_cache_buckets(drm_intel_bufmgr_gem *bufmgr_gem)
3130 unsigned long size, cache_max_size = 64 * 1024 * 1024;
3132 /* OK, so power of two buckets was too wasteful of memory.
3133 * Give 3 other sizes between each power of two, to hopefully
3134 * cover things accurately enough. (The alternative is
3135 * probably to just go for exact matching of sizes, and assume
3136 * that for things like composited window resize the tiled
3137 * width/height alignment and rounding of sizes to pages will
3138 * get us useful cache hit rates anyway)
3140 add_bucket(bufmgr_gem, 4096);
3141 add_bucket(bufmgr_gem, 4096 * 2);
3142 add_bucket(bufmgr_gem, 4096 * 3);
3144 /* Initialize the linked lists for BO reuse cache. */
3145 for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
3146 add_bucket(bufmgr_gem, size);
3148 add_bucket(bufmgr_gem, size + size * 1 / 4);
3149 add_bucket(bufmgr_gem, size + size * 2 / 4);
3150 add_bucket(bufmgr_gem, size + size * 3 / 4);
3155 drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr, int limit)
3157 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3159 bufmgr_gem->vma_max = limit;
3161 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
3165 * Get the PCI ID for the device. This can be overridden by setting the
3166 * INTEL_DEVID_OVERRIDE environment variable to the desired ID.
3169 get_pci_device_id(drm_intel_bufmgr_gem *bufmgr_gem)
3171 char *devid_override;
3174 drm_i915_getparam_t gp;
3176 if (geteuid() == getuid()) {
3177 devid_override = getenv("INTEL_DEVID_OVERRIDE");
3178 if (devid_override) {
3179 bufmgr_gem->no_exec = true;
3180 return strtod(devid_override, NULL);
3185 gp.param = I915_PARAM_CHIPSET_ID;
3187 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3189 fprintf(stderr, "get chip id failed: %d [%d]\n", ret, errno);
3190 fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
3196 drm_intel_bufmgr_gem_get_devid(drm_intel_bufmgr *bufmgr)
3198 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3200 return bufmgr_gem->pci_device;
3204 * Sets the AUB filename.
3206 * This function has to be called before drm_intel_bufmgr_gem_set_aub_dump()
3207 * for it to have any effect.
3210 drm_intel_bufmgr_gem_set_aub_filename(drm_intel_bufmgr *bufmgr,
3211 const char *filename)
3213 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3215 free(bufmgr_gem->aub_filename);
3217 bufmgr_gem->aub_filename = strdup(filename);
3221 * Sets up AUB dumping.
3223 * This is a trace file format that can be used with the simulator.
3224 * Packets are emitted in a format somewhat like GPU command packets.
3225 * You can set up a GTT and upload your objects into the referenced
3226 * space, then send off batchbuffers and get BMPs out the other end.
3229 drm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr *bufmgr, int enable)
3231 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3232 int entry = 0x200003;
3234 int gtt_size = 0x10000;
3235 const char *filename;
3238 if (bufmgr_gem->aub_file) {
3239 fclose(bufmgr_gem->aub_file);
3240 bufmgr_gem->aub_file = NULL;
3245 if (geteuid() != getuid())
3248 if (bufmgr_gem->aub_filename)
3249 filename = bufmgr_gem->aub_filename;
3251 filename = "intel.aub";
3252 bufmgr_gem->aub_file = fopen(filename, "w+");
3253 if (!bufmgr_gem->aub_file)
3256 /* Start allocating objects from just after the GTT. */
3257 bufmgr_gem->aub_offset = gtt_size;
3259 /* Start with a (required) version packet. */
3260 aub_out(bufmgr_gem, CMD_AUB_HEADER | (13 - 2));
3262 (4 << AUB_HEADER_MAJOR_SHIFT) |
3263 (0 << AUB_HEADER_MINOR_SHIFT));
3264 for (i = 0; i < 8; i++) {
3265 aub_out(bufmgr_gem, 0); /* app name */
3267 aub_out(bufmgr_gem, 0); /* timestamp */
3268 aub_out(bufmgr_gem, 0); /* timestamp */
3269 aub_out(bufmgr_gem, 0); /* comment len */
3271 /* Set up the GTT. The max we can handle is 256M */
3272 aub_out(bufmgr_gem, CMD_AUB_TRACE_HEADER_BLOCK | ((bufmgr_gem->gen >= 8 ? 6 : 5) - 2));
3273 /* Need to use GTT_ENTRY type for recent emulator */
3274 aub_out(bufmgr_gem, AUB_TRACE_MEMTYPE_GTT_ENTRY | 0 | AUB_TRACE_OP_DATA_WRITE);
3275 aub_out(bufmgr_gem, 0); /* subtype */
3276 aub_out(bufmgr_gem, 0); /* offset */
3277 aub_out(bufmgr_gem, gtt_size); /* size */
3278 if (bufmgr_gem->gen >= 8)
3279 aub_out(bufmgr_gem, 0);
3280 for (i = 0x000; i < gtt_size; i += 4, entry += 0x1000) {
3281 aub_out(bufmgr_gem, entry);
3286 drm_intel_gem_context_create(drm_intel_bufmgr *bufmgr)
3288 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3289 struct drm_i915_gem_context_create create;
3290 drm_intel_context *context = NULL;
3293 context = calloc(1, sizeof(*context));
3298 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
3300 DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n",
3306 context->ctx_id = create.ctx_id;
3307 context->bufmgr = bufmgr;
3313 drm_intel_gem_context_destroy(drm_intel_context *ctx)
3315 drm_intel_bufmgr_gem *bufmgr_gem;
3316 struct drm_i915_gem_context_destroy destroy;
3324 bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr;
3325 destroy.ctx_id = ctx->ctx_id;
3326 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY,
3329 fprintf(stderr, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n",
3336 drm_intel_get_reset_stats(drm_intel_context *ctx,
3337 uint32_t *reset_count,
3341 drm_intel_bufmgr_gem *bufmgr_gem;
3342 struct drm_i915_reset_stats stats;
3350 bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr;
3351 stats.ctx_id = ctx->ctx_id;
3352 ret = drmIoctl(bufmgr_gem->fd,
3353 DRM_IOCTL_I915_GET_RESET_STATS,
3356 if (reset_count != NULL)
3357 *reset_count = stats.reset_count;
3360 *active = stats.batch_active;
3362 if (pending != NULL)
3363 *pending = stats.batch_pending;
3370 drm_intel_reg_read(drm_intel_bufmgr *bufmgr,
3374 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3375 struct drm_i915_reg_read reg_read;
3379 reg_read.offset = offset;
3381 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_REG_READ, ®_read);
3383 *result = reg_read.val;
3388 drm_intel_get_subslice_total(int fd, unsigned int *subslice_total)
3390 drm_i915_getparam_t gp;
3394 gp.value = (int*)subslice_total;
3395 gp.param = I915_PARAM_SUBSLICE_TOTAL;
3396 ret = drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
3404 drm_intel_get_eu_total(int fd, unsigned int *eu_total)
3406 drm_i915_getparam_t gp;
3410 gp.value = (int*)eu_total;
3411 gp.param = I915_PARAM_EU_TOTAL;
3412 ret = drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
3420 * Annotate the given bo for use in aub dumping.
3422 * \param annotations is an array of drm_intel_aub_annotation objects
3423 * describing the type of data in various sections of the bo. Each
3424 * element of the array specifies the type and subtype of a section of
3425 * the bo, and the past-the-end offset of that section. The elements
3426 * of \c annotations must be sorted so that ending_offset is
3429 * \param count is the number of elements in the \c annotations array.
3430 * If \c count is zero, then \c annotations will not be dereferenced.
3432 * Annotations are copied into a private data structure, so caller may
3433 * re-use the memory pointed to by \c annotations after the call
3436 * Annotations are stored for the lifetime of the bo; to reset to the
3437 * default state (no annotations), call this function with a \c count
3441 drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo,
3442 drm_intel_aub_annotation *annotations,
3445 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
3446 unsigned size = sizeof(*annotations) * count;
3447 drm_intel_aub_annotation *new_annotations =
3448 count > 0 ? realloc(bo_gem->aub_annotations, size) : NULL;
3449 if (new_annotations == NULL) {
3450 free(bo_gem->aub_annotations);
3451 bo_gem->aub_annotations = NULL;
3452 bo_gem->aub_annotation_count = 0;
3455 memcpy(new_annotations, annotations, size);
3456 bo_gem->aub_annotations = new_annotations;
3457 bo_gem->aub_annotation_count = count;
3460 static pthread_mutex_t bufmgr_list_mutex = PTHREAD_MUTEX_INITIALIZER;
3461 static drmMMListHead bufmgr_list = { &bufmgr_list, &bufmgr_list };
3463 static drm_intel_bufmgr_gem *
3464 drm_intel_bufmgr_gem_find(int fd)
3466 drm_intel_bufmgr_gem *bufmgr_gem;
3468 DRMLISTFOREACHENTRY(bufmgr_gem, &bufmgr_list, managers) {
3469 if (bufmgr_gem->fd == fd) {
3470 atomic_inc(&bufmgr_gem->refcount);
3479 drm_intel_bufmgr_gem_unref(drm_intel_bufmgr *bufmgr)
3481 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3483 if (atomic_add_unless(&bufmgr_gem->refcount, -1, 1)) {
3484 pthread_mutex_lock(&bufmgr_list_mutex);
3486 if (atomic_dec_and_test(&bufmgr_gem->refcount)) {
3487 DRMLISTDEL(&bufmgr_gem->managers);
3488 drm_intel_bufmgr_gem_destroy(bufmgr);
3491 pthread_mutex_unlock(&bufmgr_list_mutex);
3496 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
3497 * and manage map buffer objections.
3499 * \param fd File descriptor of the opened DRM device.
3502 drm_intel_bufmgr_gem_init(int fd, int batch_size)
3504 drm_intel_bufmgr_gem *bufmgr_gem;
3505 struct drm_i915_gem_get_aperture aperture;
3506 drm_i915_getparam_t gp;
3510 pthread_mutex_lock(&bufmgr_list_mutex);
3512 bufmgr_gem = drm_intel_bufmgr_gem_find(fd);
3516 bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
3517 if (bufmgr_gem == NULL)
3520 bufmgr_gem->fd = fd;
3521 atomic_set(&bufmgr_gem->refcount, 1);
3523 if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
3530 ret = drmIoctl(bufmgr_gem->fd,
3531 DRM_IOCTL_I915_GEM_GET_APERTURE,
3535 bufmgr_gem->gtt_size = aperture.aper_available_size;
3537 fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
3539 bufmgr_gem->gtt_size = 128 * 1024 * 1024;
3540 fprintf(stderr, "Assuming %dkB available aperture size.\n"
3541 "May lead to reduced performance or incorrect "
3543 (int)bufmgr_gem->gtt_size / 1024);
3546 bufmgr_gem->pci_device = get_pci_device_id(bufmgr_gem);
3548 if (IS_GEN2(bufmgr_gem->pci_device))
3549 bufmgr_gem->gen = 2;
3550 else if (IS_GEN3(bufmgr_gem->pci_device))
3551 bufmgr_gem->gen = 3;
3552 else if (IS_GEN4(bufmgr_gem->pci_device))
3553 bufmgr_gem->gen = 4;
3554 else if (IS_GEN5(bufmgr_gem->pci_device))
3555 bufmgr_gem->gen = 5;
3556 else if (IS_GEN6(bufmgr_gem->pci_device))
3557 bufmgr_gem->gen = 6;
3558 else if (IS_GEN7(bufmgr_gem->pci_device))
3559 bufmgr_gem->gen = 7;
3560 else if (IS_GEN8(bufmgr_gem->pci_device))
3561 bufmgr_gem->gen = 8;
3562 else if (IS_GEN9(bufmgr_gem->pci_device))
3563 bufmgr_gem->gen = 9;
3570 if (IS_GEN3(bufmgr_gem->pci_device) &&
3571 bufmgr_gem->gtt_size > 256*1024*1024) {
3572 /* The unmappable part of gtt on gen 3 (i.e. above 256MB) can't
3573 * be used for tiled blits. To simplify the accounting, just
3574 * substract the unmappable part (fixed to 256MB on all known
3575 * gen3 devices) if the kernel advertises it. */
3576 bufmgr_gem->gtt_size -= 256*1024*1024;
3582 gp.param = I915_PARAM_HAS_EXECBUF2;
3583 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3587 gp.param = I915_PARAM_HAS_BSD;
3588 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3589 bufmgr_gem->has_bsd = ret == 0;
3591 gp.param = I915_PARAM_HAS_BLT;
3592 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3593 bufmgr_gem->has_blt = ret == 0;
3595 gp.param = I915_PARAM_HAS_RELAXED_FENCING;
3596 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3597 bufmgr_gem->has_relaxed_fencing = ret == 0;
3599 bufmgr_gem->bufmgr.bo_alloc_userptr = check_bo_alloc_userptr;
3601 gp.param = I915_PARAM_HAS_WAIT_TIMEOUT;
3602 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3603 bufmgr_gem->has_wait_timeout = ret == 0;
3605 gp.param = I915_PARAM_HAS_LLC;
3606 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3608 /* Kernel does not supports HAS_LLC query, fallback to GPU
3609 * generation detection and assume that we have LLC on GEN6/7
3611 bufmgr_gem->has_llc = (IS_GEN6(bufmgr_gem->pci_device) |
3612 IS_GEN7(bufmgr_gem->pci_device));
3614 bufmgr_gem->has_llc = *gp.value;
3616 gp.param = I915_PARAM_HAS_VEBOX;
3617 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3618 bufmgr_gem->has_vebox = (ret == 0) & (*gp.value > 0);
3620 if (bufmgr_gem->gen < 4) {
3621 gp.param = I915_PARAM_NUM_FENCES_AVAIL;
3622 gp.value = &bufmgr_gem->available_fences;
3623 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3625 fprintf(stderr, "get fences failed: %d [%d]\n", ret,
3627 fprintf(stderr, "param: %d, val: %d\n", gp.param,
3629 bufmgr_gem->available_fences = 0;
3631 /* XXX The kernel reports the total number of fences,
3632 * including any that may be pinned.
3634 * We presume that there will be at least one pinned
3635 * fence for the scanout buffer, but there may be more
3636 * than one scanout and the user may be manually
3637 * pinning buffers. Let's move to execbuffer2 and
3638 * thereby forget the insanity of using fences...
3640 bufmgr_gem->available_fences -= 2;
3641 if (bufmgr_gem->available_fences < 0)
3642 bufmgr_gem->available_fences = 0;
3646 /* Let's go with one relocation per every 2 dwords (but round down a bit
3647 * since a power of two will mean an extra page allocation for the reloc
3650 * Every 4 was too few for the blender benchmark.
3652 bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
3654 bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
3655 bufmgr_gem->bufmgr.bo_alloc_for_render =
3656 drm_intel_gem_bo_alloc_for_render;
3657 bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled;
3658 bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
3659 bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
3660 bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
3661 bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap;
3662 bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata;
3663 bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata;
3664 bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering;
3665 bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc;
3666 bufmgr_gem->bufmgr.bo_emit_reloc_fence = drm_intel_gem_bo_emit_reloc_fence;
3667 bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin;
3668 bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin;
3669 bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
3670 bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
3671 bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
3672 /* Use the new one if available */
3674 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec2;
3675 bufmgr_gem->bufmgr.bo_mrb_exec = drm_intel_gem_bo_mrb_exec2;
3677 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
3678 bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
3679 bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
3680 bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_unref;
3681 bufmgr_gem->bufmgr.debug = 0;
3682 bufmgr_gem->bufmgr.check_aperture_space =
3683 drm_intel_gem_check_aperture_space;
3684 bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse;
3685 bufmgr_gem->bufmgr.bo_is_reusable = drm_intel_gem_bo_is_reusable;
3686 bufmgr_gem->bufmgr.get_pipe_from_crtc_id =
3687 drm_intel_gem_get_pipe_from_crtc_id;
3688 bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references;
3690 DRMINITLISTHEAD(&bufmgr_gem->named);
3691 init_cache_buckets(bufmgr_gem);
3693 DRMINITLISTHEAD(&bufmgr_gem->vma_cache);
3694 bufmgr_gem->vma_max = -1; /* unlimited by default */
3696 DRMLISTADD(&bufmgr_gem->managers, &bufmgr_list);
3699 pthread_mutex_unlock(&bufmgr_list_mutex);
3701 return bufmgr_gem != NULL ? &bufmgr_gem->bufmgr : NULL;