1 /**************************************************************************
3 * Copyright © 2007 Red Hat Inc.
4 * Copyright © 2007-2012 Intel Corporation
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
29 **************************************************************************/
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
42 #include <xf86atomic.h>
50 #include <sys/ioctl.h>
53 #include <sys/types.h>
57 #include "libdrm_lists.h"
58 #include "intel_bufmgr.h"
59 #include "intel_bufmgr_priv.h"
60 #include "intel_chipset.h"
61 #include "intel_aub.h"
74 #define VG_CLEAR(s) VG(memset(&s, 0, sizeof(s)))
76 #define DBG(...) do { \
77 if (bufmgr_gem->bufmgr.debug) \
78 fprintf(stderr, __VA_ARGS__); \
81 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
83 typedef struct _drm_intel_bo_gem drm_intel_bo_gem;
85 struct drm_intel_gem_bo_bucket {
90 typedef struct _drm_intel_bufmgr_gem {
91 drm_intel_bufmgr bufmgr;
99 struct drm_i915_gem_exec_object *exec_objects;
100 struct drm_i915_gem_exec_object2 *exec2_objects;
101 drm_intel_bo **exec_bos;
105 /** Array of lists of cached gem objects of power-of-two sizes */
106 struct drm_intel_gem_bo_bucket cache_bucket[14 * 4];
111 drmMMListHead vma_cache;
112 int vma_count, vma_open, vma_max;
115 int available_fences;
118 unsigned int has_bsd : 1;
119 unsigned int has_blt : 1;
120 unsigned int has_relaxed_fencing : 1;
121 unsigned int has_llc : 1;
122 unsigned int bo_reuse : 1;
123 unsigned int no_exec : 1;
128 } drm_intel_bufmgr_gem;
130 #define DRM_INTEL_RELOC_FENCE (1<<0)
132 typedef struct _drm_intel_reloc_target_info {
135 } drm_intel_reloc_target;
137 struct _drm_intel_bo_gem {
145 * Kenel-assigned global name for this object
147 unsigned int global_name;
148 drmMMListHead name_list;
151 * Index of the buffer within the validation list while preparing a
152 * batchbuffer execution.
157 * Current tiling mode
159 uint32_t tiling_mode;
160 uint32_t swizzle_mode;
161 unsigned long stride;
165 /** Array passed to the DRM containing relocation information. */
166 struct drm_i915_gem_relocation_entry *relocs;
168 * Array of info structs corresponding to relocs[i].target_handle etc
170 drm_intel_reloc_target *reloc_target_info;
171 /** Number of entries in relocs */
173 /** Mapped address for the buffer, saved across map/unmap cycles */
175 /** GTT virtual address for the buffer, saved across map/unmap cycles */
178 drmMMListHead vma_list;
184 * Boolean of whether this BO and its children have been included in
185 * the current drm_intel_bufmgr_check_aperture_space() total.
187 bool included_in_check_aperture;
190 * Boolean of whether this buffer has been used as a relocation
191 * target and had its size accounted for, and thus can't have any
192 * further relocations added to it.
194 bool used_as_reloc_target;
197 * Boolean of whether we have encountered an error whilst building the relocation tree.
202 * Boolean of whether this buffer can be re-used
207 * Size in bytes of this buffer and its relocation descendents.
209 * Used to avoid costly tree walking in
210 * drm_intel_bufmgr_check_aperture in the common case.
215 * Number of potential fence registers required by this buffer and its
218 int reloc_tree_fences;
220 /** Flags that we may need to do the SW_FINSIH ioctl on unmap. */
221 bool mapped_cpu_write;
225 drm_intel_aub_annotation *aub_annotations;
226 unsigned aub_annotation_count;
230 drm_intel_gem_estimate_batch_space(drm_intel_bo ** bo_array, int count);
233 drm_intel_gem_compute_batch_space(drm_intel_bo ** bo_array, int count);
236 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
237 uint32_t * swizzle_mode);
240 drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
241 uint32_t tiling_mode,
244 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
247 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo);
249 static void drm_intel_gem_bo_free(drm_intel_bo *bo);
252 drm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size,
253 uint32_t *tiling_mode)
255 unsigned long min_size, max_size;
258 if (*tiling_mode == I915_TILING_NONE)
261 /* 965+ just need multiples of page size for tiling */
262 if (bufmgr_gem->gen >= 4)
263 return ROUND_UP_TO(size, 4096);
265 /* Older chips need powers of two, of at least 512k or 1M */
266 if (bufmgr_gem->gen == 3) {
267 min_size = 1024*1024;
268 max_size = 128*1024*1024;
271 max_size = 64*1024*1024;
274 if (size > max_size) {
275 *tiling_mode = I915_TILING_NONE;
279 /* Do we need to allocate every page for the fence? */
280 if (bufmgr_gem->has_relaxed_fencing)
281 return ROUND_UP_TO(size, 4096);
283 for (i = min_size; i < size; i <<= 1)
290 * Round a given pitch up to the minimum required for X tiling on a
291 * given chip. We use 512 as the minimum to allow for a later tiling
295 drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem,
296 unsigned long pitch, uint32_t *tiling_mode)
298 unsigned long tile_width;
301 /* If untiled, then just align it so that we can do rendering
302 * to it with the 3D engine.
304 if (*tiling_mode == I915_TILING_NONE)
305 return ALIGN(pitch, 64);
307 if (*tiling_mode == I915_TILING_X
308 || (IS_915(bufmgr_gem->pci_device)
309 && *tiling_mode == I915_TILING_Y))
314 /* 965 is flexible */
315 if (bufmgr_gem->gen >= 4)
316 return ROUND_UP_TO(pitch, tile_width);
318 /* The older hardware has a maximum pitch of 8192 with tiled
319 * surfaces, so fallback to untiled if it's too large.
322 *tiling_mode = I915_TILING_NONE;
323 return ALIGN(pitch, 64);
326 /* Pre-965 needs power of two tile width */
327 for (i = tile_width; i < pitch; i <<= 1)
333 static struct drm_intel_gem_bo_bucket *
334 drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
339 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
340 struct drm_intel_gem_bo_bucket *bucket =
341 &bufmgr_gem->cache_bucket[i];
342 if (bucket->size >= size) {
351 drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
355 for (i = 0; i < bufmgr_gem->exec_count; i++) {
356 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
357 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
359 if (bo_gem->relocs == NULL) {
360 DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle,
365 for (j = 0; j < bo_gem->reloc_count; j++) {
366 drm_intel_bo *target_bo = bo_gem->reloc_target_info[j].bo;
367 drm_intel_bo_gem *target_gem =
368 (drm_intel_bo_gem *) target_bo;
370 DBG("%2d: %d (%s)@0x%08llx -> "
371 "%d (%s)@0x%08lx + 0x%08x\n",
373 bo_gem->gem_handle, bo_gem->name,
374 (unsigned long long)bo_gem->relocs[j].offset,
375 target_gem->gem_handle,
378 bo_gem->relocs[j].delta);
384 drm_intel_gem_bo_reference(drm_intel_bo *bo)
386 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
388 atomic_inc(&bo_gem->refcount);
392 * Adds the given buffer to the list of buffers to be validated (moved into the
393 * appropriate memory type) with the next batch submission.
395 * If a buffer is validated multiple times in a batch submission, it ends up
396 * with the intersection of the memory type flags and the union of the
400 drm_intel_add_validate_buffer(drm_intel_bo *bo)
402 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
403 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
406 if (bo_gem->validate_index != -1)
409 /* Extend the array of validation entries as necessary. */
410 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
411 int new_size = bufmgr_gem->exec_size * 2;
416 bufmgr_gem->exec_objects =
417 realloc(bufmgr_gem->exec_objects,
418 sizeof(*bufmgr_gem->exec_objects) * new_size);
419 bufmgr_gem->exec_bos =
420 realloc(bufmgr_gem->exec_bos,
421 sizeof(*bufmgr_gem->exec_bos) * new_size);
422 bufmgr_gem->exec_size = new_size;
425 index = bufmgr_gem->exec_count;
426 bo_gem->validate_index = index;
427 /* Fill in array entry */
428 bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
429 bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
430 bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs;
431 bufmgr_gem->exec_objects[index].alignment = 0;
432 bufmgr_gem->exec_objects[index].offset = 0;
433 bufmgr_gem->exec_bos[index] = bo;
434 bufmgr_gem->exec_count++;
438 drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence)
440 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
441 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
444 if (bo_gem->validate_index != -1) {
446 bufmgr_gem->exec2_objects[bo_gem->validate_index].flags |=
447 EXEC_OBJECT_NEEDS_FENCE;
451 /* Extend the array of validation entries as necessary. */
452 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
453 int new_size = bufmgr_gem->exec_size * 2;
458 bufmgr_gem->exec2_objects =
459 realloc(bufmgr_gem->exec2_objects,
460 sizeof(*bufmgr_gem->exec2_objects) * new_size);
461 bufmgr_gem->exec_bos =
462 realloc(bufmgr_gem->exec_bos,
463 sizeof(*bufmgr_gem->exec_bos) * new_size);
464 bufmgr_gem->exec_size = new_size;
467 index = bufmgr_gem->exec_count;
468 bo_gem->validate_index = index;
469 /* Fill in array entry */
470 bufmgr_gem->exec2_objects[index].handle = bo_gem->gem_handle;
471 bufmgr_gem->exec2_objects[index].relocation_count = bo_gem->reloc_count;
472 bufmgr_gem->exec2_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
473 bufmgr_gem->exec2_objects[index].alignment = 0;
474 bufmgr_gem->exec2_objects[index].offset = 0;
475 bufmgr_gem->exec_bos[index] = bo;
476 bufmgr_gem->exec2_objects[index].flags = 0;
477 bufmgr_gem->exec2_objects[index].rsvd1 = 0;
478 bufmgr_gem->exec2_objects[index].rsvd2 = 0;
480 bufmgr_gem->exec2_objects[index].flags |=
481 EXEC_OBJECT_NEEDS_FENCE;
483 bufmgr_gem->exec_count++;
486 #define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
490 drm_intel_bo_gem_set_in_aperture_size(drm_intel_bufmgr_gem *bufmgr_gem,
491 drm_intel_bo_gem *bo_gem)
495 assert(!bo_gem->used_as_reloc_target);
497 /* The older chipsets are far-less flexible in terms of tiling,
498 * and require tiled buffer to be size aligned in the aperture.
499 * This means that in the worst possible case we will need a hole
500 * twice as large as the object in order for it to fit into the
501 * aperture. Optimal packing is for wimps.
503 size = bo_gem->bo.size;
504 if (bufmgr_gem->gen < 4 && bo_gem->tiling_mode != I915_TILING_NONE) {
507 if (bufmgr_gem->has_relaxed_fencing) {
508 if (bufmgr_gem->gen == 3)
509 min_size = 1024*1024;
513 while (min_size < size)
518 /* Account for worst-case alignment. */
522 bo_gem->reloc_tree_size = size;
526 drm_intel_setup_reloc_list(drm_intel_bo *bo)
528 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
529 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
530 unsigned int max_relocs = bufmgr_gem->max_relocs;
532 if (bo->size / 4 < max_relocs)
533 max_relocs = bo->size / 4;
535 bo_gem->relocs = malloc(max_relocs *
536 sizeof(struct drm_i915_gem_relocation_entry));
537 bo_gem->reloc_target_info = malloc(max_relocs *
538 sizeof(drm_intel_reloc_target));
539 if (bo_gem->relocs == NULL || bo_gem->reloc_target_info == NULL) {
540 bo_gem->has_error = true;
542 free (bo_gem->relocs);
543 bo_gem->relocs = NULL;
545 free (bo_gem->reloc_target_info);
546 bo_gem->reloc_target_info = NULL;
555 drm_intel_gem_bo_busy(drm_intel_bo *bo)
557 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
558 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
559 struct drm_i915_gem_busy busy;
563 busy.handle = bo_gem->gem_handle;
565 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
567 return (ret == 0 && busy.busy);
571 drm_intel_gem_bo_madvise_internal(drm_intel_bufmgr_gem *bufmgr_gem,
572 drm_intel_bo_gem *bo_gem, int state)
574 struct drm_i915_gem_madvise madv;
577 madv.handle = bo_gem->gem_handle;
580 drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
582 return madv.retained;
586 drm_intel_gem_bo_madvise(drm_intel_bo *bo, int madv)
588 return drm_intel_gem_bo_madvise_internal
589 ((drm_intel_bufmgr_gem *) bo->bufmgr,
590 (drm_intel_bo_gem *) bo,
594 /* drop the oldest entries that have been purged by the kernel */
596 drm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem *bufmgr_gem,
597 struct drm_intel_gem_bo_bucket *bucket)
599 while (!DRMLISTEMPTY(&bucket->head)) {
600 drm_intel_bo_gem *bo_gem;
602 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
603 bucket->head.next, head);
604 if (drm_intel_gem_bo_madvise_internal
605 (bufmgr_gem, bo_gem, I915_MADV_DONTNEED))
608 DRMLISTDEL(&bo_gem->head);
609 drm_intel_gem_bo_free(&bo_gem->bo);
613 static drm_intel_bo *
614 drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
618 uint32_t tiling_mode,
619 unsigned long stride)
621 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
622 drm_intel_bo_gem *bo_gem;
623 unsigned int page_size = getpagesize();
625 struct drm_intel_gem_bo_bucket *bucket;
626 bool alloc_from_cache;
627 unsigned long bo_size;
628 bool for_render = false;
630 if (flags & BO_ALLOC_FOR_RENDER)
633 /* Round the allocated size up to a power of two number of pages. */
634 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size);
636 /* If we don't have caching at this size, don't actually round the
639 if (bucket == NULL) {
641 if (bo_size < page_size)
644 bo_size = bucket->size;
647 pthread_mutex_lock(&bufmgr_gem->lock);
648 /* Get a buffer out of the cache if available */
650 alloc_from_cache = false;
651 if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) {
653 /* Allocate new render-target BOs from the tail (MRU)
654 * of the list, as it will likely be hot in the GPU
655 * cache and in the aperture for us.
657 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
658 bucket->head.prev, head);
659 DRMLISTDEL(&bo_gem->head);
660 alloc_from_cache = true;
662 /* For non-render-target BOs (where we're probably
663 * going to map it first thing in order to fill it
664 * with data), check if the last BO in the cache is
665 * unbusy, and only reuse in that case. Otherwise,
666 * allocating a new buffer is probably faster than
667 * waiting for the GPU to finish.
669 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
670 bucket->head.next, head);
671 if (!drm_intel_gem_bo_busy(&bo_gem->bo)) {
672 alloc_from_cache = true;
673 DRMLISTDEL(&bo_gem->head);
677 if (alloc_from_cache) {
678 if (!drm_intel_gem_bo_madvise_internal
679 (bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) {
680 drm_intel_gem_bo_free(&bo_gem->bo);
681 drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem,
686 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
689 drm_intel_gem_bo_free(&bo_gem->bo);
694 pthread_mutex_unlock(&bufmgr_gem->lock);
696 if (!alloc_from_cache) {
697 struct drm_i915_gem_create create;
699 bo_gem = calloc(1, sizeof(*bo_gem));
703 bo_gem->bo.size = bo_size;
706 create.size = bo_size;
708 ret = drmIoctl(bufmgr_gem->fd,
709 DRM_IOCTL_I915_GEM_CREATE,
711 bo_gem->gem_handle = create.handle;
712 bo_gem->bo.handle = bo_gem->gem_handle;
717 bo_gem->bo.bufmgr = bufmgr;
719 bo_gem->tiling_mode = I915_TILING_NONE;
720 bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
723 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
726 drm_intel_gem_bo_free(&bo_gem->bo);
730 DRMINITLISTHEAD(&bo_gem->name_list);
731 DRMINITLISTHEAD(&bo_gem->vma_list);
735 atomic_set(&bo_gem->refcount, 1);
736 bo_gem->validate_index = -1;
737 bo_gem->reloc_tree_fences = 0;
738 bo_gem->used_as_reloc_target = false;
739 bo_gem->has_error = false;
740 bo_gem->reusable = true;
741 bo_gem->aub_annotations = NULL;
742 bo_gem->aub_annotation_count = 0;
744 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
746 DBG("bo_create: buf %d (%s) %ldb\n",
747 bo_gem->gem_handle, bo_gem->name, size);
752 static drm_intel_bo *
753 drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
756 unsigned int alignment)
758 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size,
760 I915_TILING_NONE, 0);
763 static drm_intel_bo *
764 drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr,
767 unsigned int alignment)
769 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0,
770 I915_TILING_NONE, 0);
773 static drm_intel_bo *
774 drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
775 int x, int y, int cpp, uint32_t *tiling_mode,
776 unsigned long *pitch, unsigned long flags)
778 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
779 unsigned long size, stride;
783 unsigned long aligned_y, height_alignment;
785 tiling = *tiling_mode;
787 /* If we're tiled, our allocations are in 8 or 32-row blocks,
788 * so failure to align our height means that we won't allocate
791 * If we're untiled, we still have to align to 2 rows high
792 * because the data port accesses 2x2 blocks even if the
793 * bottom row isn't to be rendered, so failure to align means
794 * we could walk off the end of the GTT and fault. This is
795 * documented on 965, and may be the case on older chipsets
796 * too so we try to be careful.
799 height_alignment = 2;
801 if ((bufmgr_gem->gen == 2) && tiling != I915_TILING_NONE)
802 height_alignment = 16;
803 else if (tiling == I915_TILING_X
804 || (IS_915(bufmgr_gem->pci_device)
805 && tiling == I915_TILING_Y))
806 height_alignment = 8;
807 else if (tiling == I915_TILING_Y)
808 height_alignment = 32;
809 aligned_y = ALIGN(y, height_alignment);
812 stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, tiling_mode);
813 size = stride * aligned_y;
814 size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, tiling_mode);
815 } while (*tiling_mode != tiling);
818 if (tiling == I915_TILING_NONE)
821 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags,
826 * Returns a drm_intel_bo wrapping the given buffer object handle.
828 * This can be used when one application needs to pass a buffer object
832 drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
836 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
837 drm_intel_bo_gem *bo_gem;
839 struct drm_gem_open open_arg;
840 struct drm_i915_gem_get_tiling get_tiling;
843 /* At the moment most applications only have a few named bo.
844 * For instance, in a DRI client only the render buffers passed
845 * between X and the client are named. And since X returns the
846 * alternating names for the front/back buffer a linear search
847 * provides a sufficiently fast match.
849 for (list = bufmgr_gem->named.next;
850 list != &bufmgr_gem->named;
852 bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
853 if (bo_gem->global_name == handle) {
854 drm_intel_gem_bo_reference(&bo_gem->bo);
859 bo_gem = calloc(1, sizeof(*bo_gem));
864 open_arg.name = handle;
865 ret = drmIoctl(bufmgr_gem->fd,
869 DBG("Couldn't reference %s handle 0x%08x: %s\n",
870 name, handle, strerror(errno));
874 bo_gem->bo.size = open_arg.size;
875 bo_gem->bo.offset = 0;
876 bo_gem->bo.virtual = NULL;
877 bo_gem->bo.bufmgr = bufmgr;
879 atomic_set(&bo_gem->refcount, 1);
880 bo_gem->validate_index = -1;
881 bo_gem->gem_handle = open_arg.handle;
882 bo_gem->bo.handle = open_arg.handle;
883 bo_gem->global_name = handle;
884 bo_gem->reusable = false;
886 VG_CLEAR(get_tiling);
887 get_tiling.handle = bo_gem->gem_handle;
888 ret = drmIoctl(bufmgr_gem->fd,
889 DRM_IOCTL_I915_GEM_GET_TILING,
892 drm_intel_gem_bo_unreference(&bo_gem->bo);
895 bo_gem->tiling_mode = get_tiling.tiling_mode;
896 bo_gem->swizzle_mode = get_tiling.swizzle_mode;
897 /* XXX stride is unknown */
898 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
900 DRMINITLISTHEAD(&bo_gem->vma_list);
901 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
902 DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
908 drm_intel_gem_bo_free(drm_intel_bo *bo)
910 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
911 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
912 struct drm_gem_close close;
915 DRMLISTDEL(&bo_gem->vma_list);
916 if (bo_gem->mem_virtual) {
917 VG(VALGRIND_FREELIKE_BLOCK(bo_gem->mem_virtual, 0));
918 munmap(bo_gem->mem_virtual, bo_gem->bo.size);
919 bufmgr_gem->vma_count--;
921 if (bo_gem->gtt_virtual) {
922 munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
923 bufmgr_gem->vma_count--;
926 /* Close this object */
928 close.handle = bo_gem->gem_handle;
929 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
931 DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
932 bo_gem->gem_handle, bo_gem->name, strerror(errno));
934 free(bo_gem->aub_annotations);
939 drm_intel_gem_bo_mark_mmaps_incoherent(drm_intel_bo *bo)
942 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
944 if (bo_gem->mem_virtual)
945 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->mem_virtual, bo->size);
947 if (bo_gem->gtt_virtual)
948 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->gtt_virtual, bo->size);
952 /** Frees all cached buffers significantly older than @time. */
954 drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
958 if (bufmgr_gem->time == time)
961 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
962 struct drm_intel_gem_bo_bucket *bucket =
963 &bufmgr_gem->cache_bucket[i];
965 while (!DRMLISTEMPTY(&bucket->head)) {
966 drm_intel_bo_gem *bo_gem;
968 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
969 bucket->head.next, head);
970 if (time - bo_gem->free_time <= 1)
973 DRMLISTDEL(&bo_gem->head);
975 drm_intel_gem_bo_free(&bo_gem->bo);
979 bufmgr_gem->time = time;
982 static void drm_intel_gem_bo_purge_vma_cache(drm_intel_bufmgr_gem *bufmgr_gem)
986 DBG("%s: cached=%d, open=%d, limit=%d\n", __FUNCTION__,
987 bufmgr_gem->vma_count, bufmgr_gem->vma_open, bufmgr_gem->vma_max);
989 if (bufmgr_gem->vma_max < 0)
992 /* We may need to evict a few entries in order to create new mmaps */
993 limit = bufmgr_gem->vma_max - 2*bufmgr_gem->vma_open;
997 while (bufmgr_gem->vma_count > limit) {
998 drm_intel_bo_gem *bo_gem;
1000 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1001 bufmgr_gem->vma_cache.next,
1003 assert(bo_gem->map_count == 0);
1004 DRMLISTDELINIT(&bo_gem->vma_list);
1006 if (bo_gem->mem_virtual) {
1007 munmap(bo_gem->mem_virtual, bo_gem->bo.size);
1008 bo_gem->mem_virtual = NULL;
1009 bufmgr_gem->vma_count--;
1011 if (bo_gem->gtt_virtual) {
1012 munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
1013 bo_gem->gtt_virtual = NULL;
1014 bufmgr_gem->vma_count--;
1019 static void drm_intel_gem_bo_close_vma(drm_intel_bufmgr_gem *bufmgr_gem,
1020 drm_intel_bo_gem *bo_gem)
1022 bufmgr_gem->vma_open--;
1023 DRMLISTADDTAIL(&bo_gem->vma_list, &bufmgr_gem->vma_cache);
1024 if (bo_gem->mem_virtual)
1025 bufmgr_gem->vma_count++;
1026 if (bo_gem->gtt_virtual)
1027 bufmgr_gem->vma_count++;
1028 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
1031 static void drm_intel_gem_bo_open_vma(drm_intel_bufmgr_gem *bufmgr_gem,
1032 drm_intel_bo_gem *bo_gem)
1034 bufmgr_gem->vma_open++;
1035 DRMLISTDEL(&bo_gem->vma_list);
1036 if (bo_gem->mem_virtual)
1037 bufmgr_gem->vma_count--;
1038 if (bo_gem->gtt_virtual)
1039 bufmgr_gem->vma_count--;
1040 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
1044 drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
1046 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1047 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1048 struct drm_intel_gem_bo_bucket *bucket;
1051 /* Unreference all the target buffers */
1052 for (i = 0; i < bo_gem->reloc_count; i++) {
1053 if (bo_gem->reloc_target_info[i].bo != bo) {
1054 drm_intel_gem_bo_unreference_locked_timed(bo_gem->
1055 reloc_target_info[i].bo,
1059 bo_gem->reloc_count = 0;
1060 bo_gem->used_as_reloc_target = false;
1062 DBG("bo_unreference final: %d (%s)\n",
1063 bo_gem->gem_handle, bo_gem->name);
1065 /* release memory associated with this object */
1066 if (bo_gem->reloc_target_info) {
1067 free(bo_gem->reloc_target_info);
1068 bo_gem->reloc_target_info = NULL;
1070 if (bo_gem->relocs) {
1071 free(bo_gem->relocs);
1072 bo_gem->relocs = NULL;
1075 /* Clear any left-over mappings */
1076 if (bo_gem->map_count) {
1077 DBG("bo freed with non-zero map-count %d\n", bo_gem->map_count);
1078 bo_gem->map_count = 0;
1079 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1080 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1083 DRMLISTDEL(&bo_gem->name_list);
1085 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
1086 /* Put the buffer into our internal cache for reuse if we can. */
1087 if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
1088 drm_intel_gem_bo_madvise_internal(bufmgr_gem, bo_gem,
1089 I915_MADV_DONTNEED)) {
1090 bo_gem->free_time = time;
1092 bo_gem->name = NULL;
1093 bo_gem->validate_index = -1;
1095 DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
1097 drm_intel_gem_bo_free(bo);
1101 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
1104 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1106 assert(atomic_read(&bo_gem->refcount) > 0);
1107 if (atomic_dec_and_test(&bo_gem->refcount))
1108 drm_intel_gem_bo_unreference_final(bo, time);
1111 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
1113 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1115 assert(atomic_read(&bo_gem->refcount) > 0);
1116 if (atomic_dec_and_test(&bo_gem->refcount)) {
1117 drm_intel_bufmgr_gem *bufmgr_gem =
1118 (drm_intel_bufmgr_gem *) bo->bufmgr;
1119 struct timespec time;
1121 clock_gettime(CLOCK_MONOTONIC, &time);
1123 pthread_mutex_lock(&bufmgr_gem->lock);
1124 drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
1125 drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
1126 pthread_mutex_unlock(&bufmgr_gem->lock);
1130 static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
1132 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1133 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1134 struct drm_i915_gem_set_domain set_domain;
1137 pthread_mutex_lock(&bufmgr_gem->lock);
1139 if (bo_gem->map_count++ == 0)
1140 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
1142 if (!bo_gem->mem_virtual) {
1143 struct drm_i915_gem_mmap mmap_arg;
1145 DBG("bo_map: %d (%s), map_count=%d\n",
1146 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
1149 mmap_arg.handle = bo_gem->gem_handle;
1150 mmap_arg.offset = 0;
1151 mmap_arg.size = bo->size;
1152 ret = drmIoctl(bufmgr_gem->fd,
1153 DRM_IOCTL_I915_GEM_MMAP,
1157 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1158 __FILE__, __LINE__, bo_gem->gem_handle,
1159 bo_gem->name, strerror(errno));
1160 if (--bo_gem->map_count == 0)
1161 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1162 pthread_mutex_unlock(&bufmgr_gem->lock);
1165 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
1166 bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
1168 DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1169 bo_gem->mem_virtual);
1170 bo->virtual = bo_gem->mem_virtual;
1172 VG_CLEAR(set_domain);
1173 set_domain.handle = bo_gem->gem_handle;
1174 set_domain.read_domains = I915_GEM_DOMAIN_CPU;
1176 set_domain.write_domain = I915_GEM_DOMAIN_CPU;
1178 set_domain.write_domain = 0;
1179 ret = drmIoctl(bufmgr_gem->fd,
1180 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1183 DBG("%s:%d: Error setting to CPU domain %d: %s\n",
1184 __FILE__, __LINE__, bo_gem->gem_handle,
1189 bo_gem->mapped_cpu_write = true;
1191 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1192 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->mem_virtual, bo->size));
1193 pthread_mutex_unlock(&bufmgr_gem->lock);
1199 map_gtt(drm_intel_bo *bo)
1201 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1202 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1205 if (bo_gem->map_count++ == 0)
1206 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
1208 /* Get a mapping of the buffer if we haven't before. */
1209 if (bo_gem->gtt_virtual == NULL) {
1210 struct drm_i915_gem_mmap_gtt mmap_arg;
1212 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
1213 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
1216 mmap_arg.handle = bo_gem->gem_handle;
1218 /* Get the fake offset back... */
1219 ret = drmIoctl(bufmgr_gem->fd,
1220 DRM_IOCTL_I915_GEM_MMAP_GTT,
1224 DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
1226 bo_gem->gem_handle, bo_gem->name,
1228 if (--bo_gem->map_count == 0)
1229 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1234 bo_gem->gtt_virtual = mmap(0, bo->size, PROT_READ | PROT_WRITE,
1235 MAP_SHARED, bufmgr_gem->fd,
1237 if (bo_gem->gtt_virtual == MAP_FAILED) {
1238 bo_gem->gtt_virtual = NULL;
1240 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1242 bo_gem->gem_handle, bo_gem->name,
1244 if (--bo_gem->map_count == 0)
1245 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1250 bo->virtual = bo_gem->gtt_virtual;
1252 DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1253 bo_gem->gtt_virtual);
1258 int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
1260 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1261 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1262 struct drm_i915_gem_set_domain set_domain;
1265 pthread_mutex_lock(&bufmgr_gem->lock);
1269 pthread_mutex_unlock(&bufmgr_gem->lock);
1273 /* Now move it to the GTT domain so that the GPU and CPU
1274 * caches are flushed and the GPU isn't actively using the
1277 * The pagefault handler does this domain change for us when
1278 * it has unbound the BO from the GTT, but it's up to us to
1279 * tell it when we're about to use things if we had done
1280 * rendering and it still happens to be bound to the GTT.
1282 VG_CLEAR(set_domain);
1283 set_domain.handle = bo_gem->gem_handle;
1284 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1285 set_domain.write_domain = I915_GEM_DOMAIN_GTT;
1286 ret = drmIoctl(bufmgr_gem->fd,
1287 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1290 DBG("%s:%d: Error setting domain %d: %s\n",
1291 __FILE__, __LINE__, bo_gem->gem_handle,
1295 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1296 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->gtt_virtual, bo->size));
1297 pthread_mutex_unlock(&bufmgr_gem->lock);
1303 * Performs a mapping of the buffer object like the normal GTT
1304 * mapping, but avoids waiting for the GPU to be done reading from or
1305 * rendering to the buffer.
1307 * This is used in the implementation of GL_ARB_map_buffer_range: The
1308 * user asks to create a buffer, then does a mapping, fills some
1309 * space, runs a drawing command, then asks to map it again without
1310 * synchronizing because it guarantees that it won't write over the
1311 * data that the GPU is busy using (or, more specifically, that if it
1312 * does write over the data, it acknowledges that rendering is
1316 int drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo)
1318 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1321 /* If the CPU cache isn't coherent with the GTT, then use a
1322 * regular synchronized mapping. The problem is that we don't
1323 * track where the buffer was last used on the CPU side in
1324 * terms of drm_intel_bo_map vs drm_intel_gem_bo_map_gtt, so
1325 * we would potentially corrupt the buffer even when the user
1326 * does reasonable things.
1328 if (!bufmgr_gem->has_llc)
1329 return drm_intel_gem_bo_map_gtt(bo);
1331 pthread_mutex_lock(&bufmgr_gem->lock);
1333 pthread_mutex_unlock(&bufmgr_gem->lock);
1338 static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
1340 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1341 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1347 pthread_mutex_lock(&bufmgr_gem->lock);
1349 if (bo_gem->map_count <= 0) {
1350 DBG("attempted to unmap an unmapped bo\n");
1351 pthread_mutex_unlock(&bufmgr_gem->lock);
1352 /* Preserve the old behaviour of just treating this as a
1353 * no-op rather than reporting the error.
1358 if (bo_gem->mapped_cpu_write) {
1359 struct drm_i915_gem_sw_finish sw_finish;
1361 /* Cause a flush to happen if the buffer's pinned for
1362 * scanout, so the results show up in a timely manner.
1363 * Unlike GTT set domains, this only does work if the
1364 * buffer should be scanout-related.
1366 VG_CLEAR(sw_finish);
1367 sw_finish.handle = bo_gem->gem_handle;
1368 ret = drmIoctl(bufmgr_gem->fd,
1369 DRM_IOCTL_I915_GEM_SW_FINISH,
1371 ret = ret == -1 ? -errno : 0;
1373 bo_gem->mapped_cpu_write = false;
1376 /* We need to unmap after every innovation as we cannot track
1377 * an open vma for every bo as that will exhaasut the system
1378 * limits and cause later failures.
1380 if (--bo_gem->map_count == 0) {
1381 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1382 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1385 pthread_mutex_unlock(&bufmgr_gem->lock);
1390 int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
1392 return drm_intel_gem_bo_unmap(bo);
1396 drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
1397 unsigned long size, const void *data)
1399 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1400 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1401 struct drm_i915_gem_pwrite pwrite;
1405 pwrite.handle = bo_gem->gem_handle;
1406 pwrite.offset = offset;
1408 pwrite.data_ptr = (uint64_t) (uintptr_t) data;
1409 ret = drmIoctl(bufmgr_gem->fd,
1410 DRM_IOCTL_I915_GEM_PWRITE,
1414 DBG("%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
1415 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1416 (int)size, strerror(errno));
1423 drm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
1425 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1426 struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id;
1429 VG_CLEAR(get_pipe_from_crtc_id);
1430 get_pipe_from_crtc_id.crtc_id = crtc_id;
1431 ret = drmIoctl(bufmgr_gem->fd,
1432 DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID,
1433 &get_pipe_from_crtc_id);
1435 /* We return -1 here to signal that we don't
1436 * know which pipe is associated with this crtc.
1437 * This lets the caller know that this information
1438 * isn't available; using the wrong pipe for
1439 * vblank waiting can cause the chipset to lock up
1444 return get_pipe_from_crtc_id.pipe;
1448 drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
1449 unsigned long size, void *data)
1451 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1452 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1453 struct drm_i915_gem_pread pread;
1457 pread.handle = bo_gem->gem_handle;
1458 pread.offset = offset;
1460 pread.data_ptr = (uint64_t) (uintptr_t) data;
1461 ret = drmIoctl(bufmgr_gem->fd,
1462 DRM_IOCTL_I915_GEM_PREAD,
1466 DBG("%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
1467 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1468 (int)size, strerror(errno));
1474 /** Waits for all GPU rendering with the object to have completed. */
1476 drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo)
1478 drm_intel_gem_bo_start_gtt_access(bo, 1);
1482 * Sets the object to the GTT read and possibly write domain, used by the X
1483 * 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt().
1485 * In combination with drm_intel_gem_bo_pin() and manual fence management, we
1486 * can do tiled pixmaps this way.
1489 drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
1491 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1492 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1493 struct drm_i915_gem_set_domain set_domain;
1496 VG_CLEAR(set_domain);
1497 set_domain.handle = bo_gem->gem_handle;
1498 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1499 set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0;
1500 ret = drmIoctl(bufmgr_gem->fd,
1501 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1504 DBG("%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
1505 __FILE__, __LINE__, bo_gem->gem_handle,
1506 set_domain.read_domains, set_domain.write_domain,
1512 drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
1514 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1517 free(bufmgr_gem->exec2_objects);
1518 free(bufmgr_gem->exec_objects);
1519 free(bufmgr_gem->exec_bos);
1521 pthread_mutex_destroy(&bufmgr_gem->lock);
1523 /* Free any cached buffer objects we were going to reuse */
1524 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
1525 struct drm_intel_gem_bo_bucket *bucket =
1526 &bufmgr_gem->cache_bucket[i];
1527 drm_intel_bo_gem *bo_gem;
1529 while (!DRMLISTEMPTY(&bucket->head)) {
1530 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1531 bucket->head.next, head);
1532 DRMLISTDEL(&bo_gem->head);
1534 drm_intel_gem_bo_free(&bo_gem->bo);
1542 * Adds the target buffer to the validation list and adds the relocation
1543 * to the reloc_buffer's relocation list.
1545 * The relocation entry at the given offset must already contain the
1546 * precomputed relocation value, because the kernel will optimize out
1547 * the relocation entry write when the buffer hasn't moved from the
1548 * last known offset in target_bo.
1551 do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1552 drm_intel_bo *target_bo, uint32_t target_offset,
1553 uint32_t read_domains, uint32_t write_domain,
1556 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1557 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1558 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
1559 bool fenced_command;
1561 if (bo_gem->has_error)
1564 if (target_bo_gem->has_error) {
1565 bo_gem->has_error = true;
1569 /* We never use HW fences for rendering on 965+ */
1570 if (bufmgr_gem->gen >= 4)
1573 fenced_command = need_fence;
1574 if (target_bo_gem->tiling_mode == I915_TILING_NONE)
1577 /* Create a new relocation list if needed */
1578 if (bo_gem->relocs == NULL && drm_intel_setup_reloc_list(bo))
1581 /* Check overflow */
1582 assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
1585 assert(offset <= bo->size - 4);
1586 assert((write_domain & (write_domain - 1)) == 0);
1588 /* Make sure that we're not adding a reloc to something whose size has
1589 * already been accounted for.
1591 assert(!bo_gem->used_as_reloc_target);
1592 if (target_bo_gem != bo_gem) {
1593 target_bo_gem->used_as_reloc_target = true;
1594 bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
1596 /* An object needing a fence is a tiled buffer, so it won't have
1597 * relocs to other buffers.
1600 target_bo_gem->reloc_tree_fences = 1;
1601 bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
1603 bo_gem->relocs[bo_gem->reloc_count].offset = offset;
1604 bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
1605 bo_gem->relocs[bo_gem->reloc_count].target_handle =
1606 target_bo_gem->gem_handle;
1607 bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
1608 bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
1609 bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset;
1611 bo_gem->reloc_target_info[bo_gem->reloc_count].bo = target_bo;
1612 if (target_bo != bo)
1613 drm_intel_gem_bo_reference(target_bo);
1615 bo_gem->reloc_target_info[bo_gem->reloc_count].flags =
1616 DRM_INTEL_RELOC_FENCE;
1618 bo_gem->reloc_target_info[bo_gem->reloc_count].flags = 0;
1620 bo_gem->reloc_count++;
1626 drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1627 drm_intel_bo *target_bo, uint32_t target_offset,
1628 uint32_t read_domains, uint32_t write_domain)
1630 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
1632 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
1633 read_domains, write_domain,
1634 !bufmgr_gem->fenced_relocs);
1638 drm_intel_gem_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
1639 drm_intel_bo *target_bo,
1640 uint32_t target_offset,
1641 uint32_t read_domains, uint32_t write_domain)
1643 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
1644 read_domains, write_domain, true);
1648 drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo)
1650 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1652 return bo_gem->reloc_count;
1656 * Removes existing relocation entries in the BO after "start".
1658 * This allows a user to avoid a two-step process for state setup with
1659 * counting up all the buffer objects and doing a
1660 * drm_intel_bufmgr_check_aperture_space() before emitting any of the
1661 * relocations for the state setup. Instead, save the state of the
1662 * batchbuffer including drm_intel_gem_get_reloc_count(), emit all the
1663 * state, and then check if it still fits in the aperture.
1665 * Any further drm_intel_bufmgr_check_aperture_space() queries
1666 * involving this buffer in the tree are undefined after this call.
1669 drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start)
1671 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1673 struct timespec time;
1675 clock_gettime(CLOCK_MONOTONIC, &time);
1677 assert(bo_gem->reloc_count >= start);
1678 /* Unreference the cleared target buffers */
1679 for (i = start; i < bo_gem->reloc_count; i++) {
1680 if (bo_gem->reloc_target_info[i].bo != bo) {
1681 drm_intel_gem_bo_unreference_locked_timed(bo_gem->
1682 reloc_target_info[i].bo,
1686 bo_gem->reloc_count = start;
1690 * Walk the tree of relocations rooted at BO and accumulate the list of
1691 * validations to be performed and update the relocation buffers with
1692 * index values into the validation list.
1695 drm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
1697 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1700 if (bo_gem->relocs == NULL)
1703 for (i = 0; i < bo_gem->reloc_count; i++) {
1704 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
1706 if (target_bo == bo)
1709 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1711 /* Continue walking the tree depth-first. */
1712 drm_intel_gem_bo_process_reloc(target_bo);
1714 /* Add the target to the validate list */
1715 drm_intel_add_validate_buffer(target_bo);
1720 drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo)
1722 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1725 if (bo_gem->relocs == NULL)
1728 for (i = 0; i < bo_gem->reloc_count; i++) {
1729 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
1732 if (target_bo == bo)
1735 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1737 /* Continue walking the tree depth-first. */
1738 drm_intel_gem_bo_process_reloc2(target_bo);
1740 need_fence = (bo_gem->reloc_target_info[i].flags &
1741 DRM_INTEL_RELOC_FENCE);
1743 /* Add the target to the validate list */
1744 drm_intel_add_validate_buffer2(target_bo, need_fence);
1750 drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem)
1754 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1755 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1756 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1758 /* Update the buffer offset */
1759 if (bufmgr_gem->exec_objects[i].offset != bo->offset) {
1760 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
1761 bo_gem->gem_handle, bo_gem->name, bo->offset,
1762 (unsigned long long)bufmgr_gem->exec_objects[i].
1764 bo->offset = bufmgr_gem->exec_objects[i].offset;
1770 drm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem)
1774 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1775 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1776 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1778 /* Update the buffer offset */
1779 if (bufmgr_gem->exec2_objects[i].offset != bo->offset) {
1780 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
1781 bo_gem->gem_handle, bo_gem->name, bo->offset,
1782 (unsigned long long)bufmgr_gem->exec2_objects[i].offset);
1783 bo->offset = bufmgr_gem->exec2_objects[i].offset;
1789 aub_out(drm_intel_bufmgr_gem *bufmgr_gem, uint32_t data)
1791 fwrite(&data, 1, 4, bufmgr_gem->aub_file);
1795 aub_out_data(drm_intel_bufmgr_gem *bufmgr_gem, void *data, size_t size)
1797 fwrite(data, 1, size, bufmgr_gem->aub_file);
1801 aub_write_bo_data(drm_intel_bo *bo, uint32_t offset, uint32_t size)
1803 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1804 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1808 data = malloc(bo->size);
1809 drm_intel_bo_get_subdata(bo, offset, size, data);
1811 /* Easy mode: write out bo with no relocations */
1812 if (!bo_gem->reloc_count) {
1813 aub_out_data(bufmgr_gem, data, size);
1818 /* Otherwise, handle the relocations while writing. */
1819 for (i = 0; i < size / 4; i++) {
1821 for (r = 0; r < bo_gem->reloc_count; r++) {
1822 struct drm_i915_gem_relocation_entry *reloc;
1823 drm_intel_reloc_target *info;
1825 reloc = &bo_gem->relocs[r];
1826 info = &bo_gem->reloc_target_info[r];
1828 if (reloc->offset == offset + i * 4) {
1829 drm_intel_bo_gem *target_gem;
1832 target_gem = (drm_intel_bo_gem *)info->bo;
1835 val += target_gem->aub_offset;
1837 aub_out(bufmgr_gem, val);
1842 if (r == bo_gem->reloc_count) {
1843 /* no relocation, just the data */
1844 aub_out(bufmgr_gem, data[i]);
1852 aub_bo_get_address(drm_intel_bo *bo)
1854 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1855 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1857 /* Give the object a graphics address in the AUB file. We
1858 * don't just use the GEM object address because we do AUB
1859 * dumping before execution -- we want to successfully log
1860 * when the hardware might hang, and we might even want to aub
1861 * capture for a driver trying to execute on a different
1862 * generation of hardware by disabling the actual kernel exec
1865 bo_gem->aub_offset = bufmgr_gem->aub_offset;
1866 bufmgr_gem->aub_offset += bo->size;
1867 /* XXX: Handle aperture overflow. */
1868 assert(bufmgr_gem->aub_offset < 256 * 1024 * 1024);
1872 aub_write_trace_block(drm_intel_bo *bo, uint32_t type, uint32_t subtype,
1873 uint32_t offset, uint32_t size)
1875 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1876 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1879 CMD_AUB_TRACE_HEADER_BLOCK |
1882 AUB_TRACE_MEMTYPE_GTT | type | AUB_TRACE_OP_DATA_WRITE);
1883 aub_out(bufmgr_gem, subtype);
1884 aub_out(bufmgr_gem, bo_gem->aub_offset + offset);
1885 aub_out(bufmgr_gem, size);
1886 aub_write_bo_data(bo, offset, size);
1890 * Break up large objects into multiple writes. Otherwise a 128kb VBO
1891 * would overflow the 16 bits of size field in the packet header and
1892 * everything goes badly after that.
1895 aub_write_large_trace_block(drm_intel_bo *bo, uint32_t type, uint32_t subtype,
1896 uint32_t offset, uint32_t size)
1898 uint32_t block_size;
1899 uint32_t sub_offset;
1901 for (sub_offset = 0; sub_offset < size; sub_offset += block_size) {
1902 block_size = size - sub_offset;
1904 if (block_size > 8 * 4096)
1905 block_size = 8 * 4096;
1907 aub_write_trace_block(bo, type, subtype, offset + sub_offset,
1913 aub_write_bo(drm_intel_bo *bo)
1915 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1916 uint32_t offset = 0;
1919 aub_bo_get_address(bo);
1921 /* Write out each annotated section separately. */
1922 for (i = 0; i < bo_gem->aub_annotation_count; ++i) {
1923 drm_intel_aub_annotation *annotation =
1924 &bo_gem->aub_annotations[i];
1925 uint32_t ending_offset = annotation->ending_offset;
1926 if (ending_offset > bo->size)
1927 ending_offset = bo->size;
1928 if (ending_offset > offset) {
1929 aub_write_large_trace_block(bo, annotation->type,
1930 annotation->subtype,
1932 ending_offset - offset);
1933 offset = ending_offset;
1937 /* Write out any remaining unannotated data */
1938 if (offset < bo->size) {
1939 aub_write_large_trace_block(bo, AUB_TRACE_TYPE_NOTYPE, 0,
1940 offset, bo->size - offset);
1945 * Make a ringbuffer on fly and dump it
1948 aub_build_dump_ringbuffer(drm_intel_bufmgr_gem *bufmgr_gem,
1949 uint32_t batch_buffer, int ring_flag)
1951 uint32_t ringbuffer[4096];
1952 int ring = AUB_TRACE_TYPE_RING_PRB0; /* The default ring */
1955 if (ring_flag == I915_EXEC_BSD)
1956 ring = AUB_TRACE_TYPE_RING_PRB1;
1958 /* Make a ring buffer to execute our batchbuffer. */
1959 memset(ringbuffer, 0, sizeof(ringbuffer));
1960 ringbuffer[ring_count++] = AUB_MI_BATCH_BUFFER_START;
1961 ringbuffer[ring_count++] = batch_buffer;
1963 /* Write out the ring. This appears to trigger execution of
1964 * the ring in the simulator.
1967 CMD_AUB_TRACE_HEADER_BLOCK |
1970 AUB_TRACE_MEMTYPE_GTT | ring | AUB_TRACE_OP_COMMAND_WRITE);
1971 aub_out(bufmgr_gem, 0); /* general/surface subtype */
1972 aub_out(bufmgr_gem, bufmgr_gem->aub_offset);
1973 aub_out(bufmgr_gem, ring_count * 4);
1975 /* FIXME: Need some flush operations here? */
1976 aub_out_data(bufmgr_gem, ringbuffer, ring_count * 4);
1978 /* Update offset pointer */
1979 bufmgr_gem->aub_offset += 4096;
1983 drm_intel_gem_bo_aub_dump_bmp(drm_intel_bo *bo,
1984 int x1, int y1, int width, int height,
1985 enum aub_dump_bmp_format format,
1986 int pitch, int offset)
1988 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1989 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1993 case AUB_DUMP_BMP_FORMAT_8BIT:
1996 case AUB_DUMP_BMP_FORMAT_ARGB_4444:
1999 case AUB_DUMP_BMP_FORMAT_ARGB_0888:
2000 case AUB_DUMP_BMP_FORMAT_ARGB_8888:
2004 printf("Unknown AUB dump format %d\n", format);
2008 if (!bufmgr_gem->aub_file)
2011 aub_out(bufmgr_gem, CMD_AUB_DUMP_BMP | 4);
2012 aub_out(bufmgr_gem, (y1 << 16) | x1);
2017 aub_out(bufmgr_gem, (height << 16) | width);
2018 aub_out(bufmgr_gem, bo_gem->aub_offset + offset);
2020 ((bo_gem->tiling_mode != I915_TILING_NONE) ? (1 << 2) : 0) |
2021 ((bo_gem->tiling_mode == I915_TILING_Y) ? (1 << 3) : 0));
2025 aub_exec(drm_intel_bo *bo, int ring_flag, int used)
2027 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2028 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2030 bool batch_buffer_needs_annotations;
2032 if (!bufmgr_gem->aub_file)
2035 /* If batch buffer is not annotated, annotate it the best we
2038 batch_buffer_needs_annotations = bo_gem->aub_annotation_count == 0;
2039 if (batch_buffer_needs_annotations) {
2040 drm_intel_aub_annotation annotations[2] = {
2041 { AUB_TRACE_TYPE_BATCH, 0, used },
2042 { AUB_TRACE_TYPE_NOTYPE, 0, bo->size }
2044 drm_intel_bufmgr_gem_set_aub_annotations(bo, annotations, 2);
2047 /* Write out all buffers to AUB memory */
2048 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2049 aub_write_bo(bufmgr_gem->exec_bos[i]);
2052 /* Remove any annotations we added */
2053 if (batch_buffer_needs_annotations)
2054 drm_intel_bufmgr_gem_set_aub_annotations(bo, NULL, 0);
2056 /* Dump ring buffer */
2057 aub_build_dump_ringbuffer(bufmgr_gem, bo_gem->aub_offset, ring_flag);
2059 fflush(bufmgr_gem->aub_file);
2062 * One frame has been dumped. So reset the aub_offset for the next frame.
2064 * FIXME: Can we do this?
2066 bufmgr_gem->aub_offset = 0x10000;
2070 drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
2071 drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
2073 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2074 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2075 struct drm_i915_gem_execbuffer execbuf;
2078 if (bo_gem->has_error)
2081 pthread_mutex_lock(&bufmgr_gem->lock);
2082 /* Update indices and set up the validate list. */
2083 drm_intel_gem_bo_process_reloc(bo);
2085 /* Add the batch buffer to the validation list. There are no
2086 * relocations pointing to it.
2088 drm_intel_add_validate_buffer(bo);
2091 execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects;
2092 execbuf.buffer_count = bufmgr_gem->exec_count;
2093 execbuf.batch_start_offset = 0;
2094 execbuf.batch_len = used;
2095 execbuf.cliprects_ptr = (uintptr_t) cliprects;
2096 execbuf.num_cliprects = num_cliprects;
2100 ret = drmIoctl(bufmgr_gem->fd,
2101 DRM_IOCTL_I915_GEM_EXECBUFFER,
2105 if (errno == ENOSPC) {
2106 DBG("Execbuffer fails to pin. "
2107 "Estimate: %u. Actual: %u. Available: %u\n",
2108 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
2111 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
2114 (unsigned int)bufmgr_gem->gtt_size);
2117 drm_intel_update_buffer_offsets(bufmgr_gem);
2119 if (bufmgr_gem->bufmgr.debug)
2120 drm_intel_gem_dump_validation_list(bufmgr_gem);
2122 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2123 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
2124 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2126 /* Disconnect the buffer from the validate list */
2127 bo_gem->validate_index = -1;
2128 bufmgr_gem->exec_bos[i] = NULL;
2130 bufmgr_gem->exec_count = 0;
2131 pthread_mutex_unlock(&bufmgr_gem->lock);
2137 drm_intel_gem_bo_mrb_exec2(drm_intel_bo *bo, int used,
2138 drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
2141 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
2142 struct drm_i915_gem_execbuffer2 execbuf;
2146 switch (flags & 0x7) {
2150 if (!bufmgr_gem->has_blt)
2154 if (!bufmgr_gem->has_bsd)
2157 case I915_EXEC_RENDER:
2158 case I915_EXEC_DEFAULT:
2162 pthread_mutex_lock(&bufmgr_gem->lock);
2163 /* Update indices and set up the validate list. */
2164 drm_intel_gem_bo_process_reloc2(bo);
2166 /* Add the batch buffer to the validation list. There are no relocations
2169 drm_intel_add_validate_buffer2(bo, 0);
2172 execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec2_objects;
2173 execbuf.buffer_count = bufmgr_gem->exec_count;
2174 execbuf.batch_start_offset = 0;
2175 execbuf.batch_len = used;
2176 execbuf.cliprects_ptr = (uintptr_t)cliprects;
2177 execbuf.num_cliprects = num_cliprects;
2180 execbuf.flags = flags;
2184 aub_exec(bo, flags, used);
2186 if (bufmgr_gem->no_exec)
2187 goto skip_execution;
2189 ret = drmIoctl(bufmgr_gem->fd,
2190 DRM_IOCTL_I915_GEM_EXECBUFFER2,
2194 if (ret == -ENOSPC) {
2195 DBG("Execbuffer fails to pin. "
2196 "Estimate: %u. Actual: %u. Available: %u\n",
2197 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
2198 bufmgr_gem->exec_count),
2199 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
2200 bufmgr_gem->exec_count),
2201 (unsigned int) bufmgr_gem->gtt_size);
2204 drm_intel_update_buffer_offsets2(bufmgr_gem);
2207 if (bufmgr_gem->bufmgr.debug)
2208 drm_intel_gem_dump_validation_list(bufmgr_gem);
2210 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2211 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
2212 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
2214 /* Disconnect the buffer from the validate list */
2215 bo_gem->validate_index = -1;
2216 bufmgr_gem->exec_bos[i] = NULL;
2218 bufmgr_gem->exec_count = 0;
2219 pthread_mutex_unlock(&bufmgr_gem->lock);
2225 drm_intel_gem_bo_exec2(drm_intel_bo *bo, int used,
2226 drm_clip_rect_t *cliprects, int num_cliprects,
2229 return drm_intel_gem_bo_mrb_exec2(bo, used,
2230 cliprects, num_cliprects, DR4,
2235 drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
2237 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2238 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2239 struct drm_i915_gem_pin pin;
2243 pin.handle = bo_gem->gem_handle;
2244 pin.alignment = alignment;
2246 ret = drmIoctl(bufmgr_gem->fd,
2247 DRM_IOCTL_I915_GEM_PIN,
2252 bo->offset = pin.offset;
2257 drm_intel_gem_bo_unpin(drm_intel_bo *bo)
2259 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2260 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2261 struct drm_i915_gem_unpin unpin;
2265 unpin.handle = bo_gem->gem_handle;
2267 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
2275 drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
2276 uint32_t tiling_mode,
2279 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2280 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2281 struct drm_i915_gem_set_tiling set_tiling;
2284 if (bo_gem->global_name == 0 &&
2285 tiling_mode == bo_gem->tiling_mode &&
2286 stride == bo_gem->stride)
2289 memset(&set_tiling, 0, sizeof(set_tiling));
2291 /* set_tiling is slightly broken and overwrites the
2292 * input on the error path, so we have to open code
2295 set_tiling.handle = bo_gem->gem_handle;
2296 set_tiling.tiling_mode = tiling_mode;
2297 set_tiling.stride = stride;
2299 ret = ioctl(bufmgr_gem->fd,
2300 DRM_IOCTL_I915_GEM_SET_TILING,
2302 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
2306 bo_gem->tiling_mode = set_tiling.tiling_mode;
2307 bo_gem->swizzle_mode = set_tiling.swizzle_mode;
2308 bo_gem->stride = set_tiling.stride;
2313 drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
2316 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2317 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2320 /* Linear buffers have no stride. By ensuring that we only ever use
2321 * stride 0 with linear buffers, we simplify our code.
2323 if (*tiling_mode == I915_TILING_NONE)
2326 ret = drm_intel_gem_bo_set_tiling_internal(bo, *tiling_mode, stride);
2328 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
2330 *tiling_mode = bo_gem->tiling_mode;
2335 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
2336 uint32_t * swizzle_mode)
2338 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2340 *tiling_mode = bo_gem->tiling_mode;
2341 *swizzle_mode = bo_gem->swizzle_mode;
2346 drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
2348 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2349 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2352 if (!bo_gem->global_name) {
2353 struct drm_gem_flink flink;
2356 flink.handle = bo_gem->gem_handle;
2358 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink);
2362 bo_gem->global_name = flink.name;
2363 bo_gem->reusable = false;
2365 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
2368 *name = bo_gem->global_name;
2373 * Enables unlimited caching of buffer objects for reuse.
2375 * This is potentially very memory expensive, as the cache at each bucket
2376 * size is only bounded by how many buffers of that size we've managed to have
2377 * in flight at once.
2380 drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
2382 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
2384 bufmgr_gem->bo_reuse = true;
2388 * Enable use of fenced reloc type.
2390 * New code should enable this to avoid unnecessary fence register
2391 * allocation. If this option is not enabled, all relocs will have fence
2392 * register allocated.
2395 drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr)
2397 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2399 if (bufmgr_gem->bufmgr.bo_exec == drm_intel_gem_bo_exec2)
2400 bufmgr_gem->fenced_relocs = true;
2404 * Return the additional aperture space required by the tree of buffer objects
2408 drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
2410 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2414 if (bo == NULL || bo_gem->included_in_check_aperture)
2418 bo_gem->included_in_check_aperture = true;
2420 for (i = 0; i < bo_gem->reloc_count; i++)
2422 drm_intel_gem_bo_get_aperture_space(bo_gem->
2423 reloc_target_info[i].bo);
2429 * Count the number of buffers in this list that need a fence reg
2431 * If the count is greater than the number of available regs, we'll have
2432 * to ask the caller to resubmit a batch with fewer tiled buffers.
2434 * This function over-counts if the same buffer is used multiple times.
2437 drm_intel_gem_total_fences(drm_intel_bo ** bo_array, int count)
2440 unsigned int total = 0;
2442 for (i = 0; i < count; i++) {
2443 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
2448 total += bo_gem->reloc_tree_fences;
2454 * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready
2455 * for the next drm_intel_bufmgr_check_aperture_space() call.
2458 drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo)
2460 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2463 if (bo == NULL || !bo_gem->included_in_check_aperture)
2466 bo_gem->included_in_check_aperture = false;
2468 for (i = 0; i < bo_gem->reloc_count; i++)
2469 drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->
2470 reloc_target_info[i].bo);
2474 * Return a conservative estimate for the amount of aperture required
2475 * for a collection of buffers. This may double-count some buffers.
2478 drm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count)
2481 unsigned int total = 0;
2483 for (i = 0; i < count; i++) {
2484 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
2486 total += bo_gem->reloc_tree_size;
2492 * Return the amount of aperture needed for a collection of buffers.
2493 * This avoids double counting any buffers, at the cost of looking
2494 * at every buffer in the set.
2497 drm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count)
2500 unsigned int total = 0;
2502 for (i = 0; i < count; i++) {
2503 total += drm_intel_gem_bo_get_aperture_space(bo_array[i]);
2504 /* For the first buffer object in the array, we get an
2505 * accurate count back for its reloc_tree size (since nothing
2506 * had been flagged as being counted yet). We can save that
2507 * value out as a more conservative reloc_tree_size that
2508 * avoids double-counting target buffers. Since the first
2509 * buffer happens to usually be the batch buffer in our
2510 * callers, this can pull us back from doing the tree
2511 * walk on every new batch emit.
2514 drm_intel_bo_gem *bo_gem =
2515 (drm_intel_bo_gem *) bo_array[i];
2516 bo_gem->reloc_tree_size = total;
2520 for (i = 0; i < count; i++)
2521 drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]);
2526 * Return -1 if the batchbuffer should be flushed before attempting to
2527 * emit rendering referencing the buffers pointed to by bo_array.
2529 * This is required because if we try to emit a batchbuffer with relocations
2530 * to a tree of buffers that won't simultaneously fit in the aperture,
2531 * the rendering will return an error at a point where the software is not
2532 * prepared to recover from it.
2534 * However, we also want to emit the batchbuffer significantly before we reach
2535 * the limit, as a series of batchbuffers each of which references buffers
2536 * covering almost all of the aperture means that at each emit we end up
2537 * waiting to evict a buffer from the last rendering, and we get synchronous
2538 * performance. By emitting smaller batchbuffers, we eat some CPU overhead to
2539 * get better parallelism.
2542 drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
2544 drm_intel_bufmgr_gem *bufmgr_gem =
2545 (drm_intel_bufmgr_gem *) bo_array[0]->bufmgr;
2546 unsigned int total = 0;
2547 unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
2550 /* Check for fence reg constraints if necessary */
2551 if (bufmgr_gem->available_fences) {
2552 total_fences = drm_intel_gem_total_fences(bo_array, count);
2553 if (total_fences > bufmgr_gem->available_fences)
2557 total = drm_intel_gem_estimate_batch_space(bo_array, count);
2559 if (total > threshold)
2560 total = drm_intel_gem_compute_batch_space(bo_array, count);
2562 if (total > threshold) {
2563 DBG("check_space: overflowed available aperture, "
2565 total / 1024, (int)bufmgr_gem->gtt_size / 1024);
2568 DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024,
2569 (int)bufmgr_gem->gtt_size / 1024);
2575 * Disable buffer reuse for objects which are shared with the kernel
2576 * as scanout buffers
2579 drm_intel_gem_bo_disable_reuse(drm_intel_bo *bo)
2581 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2583 bo_gem->reusable = false;
2588 drm_intel_gem_bo_is_reusable(drm_intel_bo *bo)
2590 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2592 return bo_gem->reusable;
2596 _drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
2598 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2601 for (i = 0; i < bo_gem->reloc_count; i++) {
2602 if (bo_gem->reloc_target_info[i].bo == target_bo)
2604 if (bo == bo_gem->reloc_target_info[i].bo)
2606 if (_drm_intel_gem_bo_references(bo_gem->reloc_target_info[i].bo,
2614 /** Return true if target_bo is referenced by bo's relocation tree. */
2616 drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
2618 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
2620 if (bo == NULL || target_bo == NULL)
2622 if (target_bo_gem->used_as_reloc_target)
2623 return _drm_intel_gem_bo_references(bo, target_bo);
2628 add_bucket(drm_intel_bufmgr_gem *bufmgr_gem, int size)
2630 unsigned int i = bufmgr_gem->num_buckets;
2632 assert(i < ARRAY_SIZE(bufmgr_gem->cache_bucket));
2634 DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head);
2635 bufmgr_gem->cache_bucket[i].size = size;
2636 bufmgr_gem->num_buckets++;
2640 init_cache_buckets(drm_intel_bufmgr_gem *bufmgr_gem)
2642 unsigned long size, cache_max_size = 64 * 1024 * 1024;
2644 /* OK, so power of two buckets was too wasteful of memory.
2645 * Give 3 other sizes between each power of two, to hopefully
2646 * cover things accurately enough. (The alternative is
2647 * probably to just go for exact matching of sizes, and assume
2648 * that for things like composited window resize the tiled
2649 * width/height alignment and rounding of sizes to pages will
2650 * get us useful cache hit rates anyway)
2652 add_bucket(bufmgr_gem, 4096);
2653 add_bucket(bufmgr_gem, 4096 * 2);
2654 add_bucket(bufmgr_gem, 4096 * 3);
2656 /* Initialize the linked lists for BO reuse cache. */
2657 for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
2658 add_bucket(bufmgr_gem, size);
2660 add_bucket(bufmgr_gem, size + size * 1 / 4);
2661 add_bucket(bufmgr_gem, size + size * 2 / 4);
2662 add_bucket(bufmgr_gem, size + size * 3 / 4);
2667 drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr, int limit)
2669 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2671 bufmgr_gem->vma_max = limit;
2673 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
2677 * Get the PCI ID for the device. This can be overridden by setting the
2678 * INTEL_DEVID_OVERRIDE environment variable to the desired ID.
2681 get_pci_device_id(drm_intel_bufmgr_gem *bufmgr_gem)
2683 char *devid_override;
2686 drm_i915_getparam_t gp;
2688 if (geteuid() == getuid()) {
2689 devid_override = getenv("INTEL_DEVID_OVERRIDE");
2690 if (devid_override) {
2691 bufmgr_gem->no_exec = true;
2692 return strtod(devid_override, NULL);
2698 gp.param = I915_PARAM_CHIPSET_ID;
2700 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2702 fprintf(stderr, "get chip id failed: %d [%d]\n", ret, errno);
2703 fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
2709 drm_intel_bufmgr_gem_get_devid(drm_intel_bufmgr *bufmgr)
2711 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2713 return bufmgr_gem->pci_device;
2717 * Sets up AUB dumping.
2719 * This is a trace file format that can be used with the simulator.
2720 * Packets are emitted in a format somewhat like GPU command packets.
2721 * You can set up a GTT and upload your objects into the referenced
2722 * space, then send off batchbuffers and get BMPs out the other end.
2725 drm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr *bufmgr, int enable)
2727 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2728 int entry = 0x200003;
2730 int gtt_size = 0x10000;
2733 if (bufmgr_gem->aub_file) {
2734 fclose(bufmgr_gem->aub_file);
2735 bufmgr_gem->aub_file = NULL;
2739 if (geteuid() != getuid())
2742 bufmgr_gem->aub_file = fopen("intel.aub", "w+");
2743 if (!bufmgr_gem->aub_file)
2746 /* Start allocating objects from just after the GTT. */
2747 bufmgr_gem->aub_offset = gtt_size;
2749 /* Start with a (required) version packet. */
2750 aub_out(bufmgr_gem, CMD_AUB_HEADER | (13 - 2));
2752 (4 << AUB_HEADER_MAJOR_SHIFT) |
2753 (0 << AUB_HEADER_MINOR_SHIFT));
2754 for (i = 0; i < 8; i++) {
2755 aub_out(bufmgr_gem, 0); /* app name */
2757 aub_out(bufmgr_gem, 0); /* timestamp */
2758 aub_out(bufmgr_gem, 0); /* timestamp */
2759 aub_out(bufmgr_gem, 0); /* comment len */
2761 /* Set up the GTT. The max we can handle is 256M */
2762 aub_out(bufmgr_gem, CMD_AUB_TRACE_HEADER_BLOCK | (5 - 2));
2763 aub_out(bufmgr_gem, AUB_TRACE_MEMTYPE_NONLOCAL | 0 | AUB_TRACE_OP_DATA_WRITE);
2764 aub_out(bufmgr_gem, 0); /* subtype */
2765 aub_out(bufmgr_gem, 0); /* offset */
2766 aub_out(bufmgr_gem, gtt_size); /* size */
2767 for (i = 0x000; i < gtt_size; i += 4, entry += 0x1000) {
2768 aub_out(bufmgr_gem, entry);
2773 * Annotate the given bo for use in aub dumping.
2775 * \param annotations is an array of drm_intel_aub_annotation objects
2776 * describing the type of data in various sections of the bo. Each
2777 * element of the array specifies the type and subtype of a section of
2778 * the bo, and the past-the-end offset of that section. The elements
2779 * of \c annotations must be sorted so that ending_offset is
2782 * \param count is the number of elements in the \c annotations array.
2783 * If \c count is zero, then \c annotations will not be dereferenced.
2785 * Annotations are copied into a private data structure, so caller may
2786 * re-use the memory pointed to by \c annotations after the call
2789 * Annotations are stored for the lifetime of the bo; to reset to the
2790 * default state (no annotations), call this function with a \c count
2794 drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo,
2795 drm_intel_aub_annotation *annotations,
2798 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2799 unsigned size = sizeof(*annotations) * count;
2800 drm_intel_aub_annotation *new_annotations =
2801 count > 0 ? realloc(bo_gem->aub_annotations, size) : NULL;
2802 if (new_annotations == NULL) {
2803 free(bo_gem->aub_annotations);
2804 bo_gem->aub_annotations = NULL;
2805 bo_gem->aub_annotation_count = 0;
2808 memcpy(new_annotations, annotations, size);
2809 bo_gem->aub_annotations = new_annotations;
2810 bo_gem->aub_annotation_count = count;
2814 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
2815 * and manage map buffer objections.
2817 * \param fd File descriptor of the opened DRM device.
2820 drm_intel_bufmgr_gem_init(int fd, int batch_size)
2822 drm_intel_bufmgr_gem *bufmgr_gem;
2823 struct drm_i915_gem_get_aperture aperture;
2824 drm_i915_getparam_t gp;
2828 bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
2829 if (bufmgr_gem == NULL)
2832 bufmgr_gem->fd = fd;
2834 if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
2839 ret = drmIoctl(bufmgr_gem->fd,
2840 DRM_IOCTL_I915_GEM_GET_APERTURE,
2844 bufmgr_gem->gtt_size = aperture.aper_available_size;
2846 fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
2848 bufmgr_gem->gtt_size = 128 * 1024 * 1024;
2849 fprintf(stderr, "Assuming %dkB available aperture size.\n"
2850 "May lead to reduced performance or incorrect "
2852 (int)bufmgr_gem->gtt_size / 1024);
2855 bufmgr_gem->pci_device = get_pci_device_id(bufmgr_gem);
2857 if (IS_GEN2(bufmgr_gem->pci_device))
2858 bufmgr_gem->gen = 2;
2859 else if (IS_GEN3(bufmgr_gem->pci_device))
2860 bufmgr_gem->gen = 3;
2861 else if (IS_GEN4(bufmgr_gem->pci_device))
2862 bufmgr_gem->gen = 4;
2863 else if (IS_GEN5(bufmgr_gem->pci_device))
2864 bufmgr_gem->gen = 5;
2865 else if (IS_GEN6(bufmgr_gem->pci_device))
2866 bufmgr_gem->gen = 6;
2867 else if (IS_GEN7(bufmgr_gem->pci_device))
2868 bufmgr_gem->gen = 7;
2872 if (IS_GEN3(bufmgr_gem->pci_device) &&
2873 bufmgr_gem->gtt_size > 256*1024*1024) {
2874 /* The unmappable part of gtt on gen 3 (i.e. above 256MB) can't
2875 * be used for tiled blits. To simplify the accounting, just
2876 * substract the unmappable part (fixed to 256MB on all known
2877 * gen3 devices) if the kernel advertises it. */
2878 bufmgr_gem->gtt_size -= 256*1024*1024;
2884 gp.param = I915_PARAM_HAS_EXECBUF2;
2885 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2889 gp.param = I915_PARAM_HAS_BSD;
2890 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2891 bufmgr_gem->has_bsd = ret == 0;
2893 gp.param = I915_PARAM_HAS_BLT;
2894 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2895 bufmgr_gem->has_blt = ret == 0;
2897 gp.param = I915_PARAM_HAS_RELAXED_FENCING;
2898 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2899 bufmgr_gem->has_relaxed_fencing = ret == 0;
2901 gp.param = I915_PARAM_HAS_LLC;
2902 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2904 /* Kernel does not supports HAS_LLC query, fallback to GPU
2905 * generation detection and assume that we have LLC on GEN6/7
2907 bufmgr_gem->has_llc = (IS_GEN6(bufmgr_gem->pci_device) |
2908 IS_GEN7(bufmgr_gem->pci_device));
2910 bufmgr_gem->has_llc = ret == 0;
2912 if (bufmgr_gem->gen < 4) {
2913 gp.param = I915_PARAM_NUM_FENCES_AVAIL;
2914 gp.value = &bufmgr_gem->available_fences;
2915 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2917 fprintf(stderr, "get fences failed: %d [%d]\n", ret,
2919 fprintf(stderr, "param: %d, val: %d\n", gp.param,
2921 bufmgr_gem->available_fences = 0;
2923 /* XXX The kernel reports the total number of fences,
2924 * including any that may be pinned.
2926 * We presume that there will be at least one pinned
2927 * fence for the scanout buffer, but there may be more
2928 * than one scanout and the user may be manually
2929 * pinning buffers. Let's move to execbuffer2 and
2930 * thereby forget the insanity of using fences...
2932 bufmgr_gem->available_fences -= 2;
2933 if (bufmgr_gem->available_fences < 0)
2934 bufmgr_gem->available_fences = 0;
2938 /* Let's go with one relocation per every 2 dwords (but round down a bit
2939 * since a power of two will mean an extra page allocation for the reloc
2942 * Every 4 was too few for the blender benchmark.
2944 bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
2946 bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
2947 bufmgr_gem->bufmgr.bo_alloc_for_render =
2948 drm_intel_gem_bo_alloc_for_render;
2949 bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled;
2950 bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
2951 bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
2952 bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
2953 bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap;
2954 bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata;
2955 bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata;
2956 bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering;
2957 bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc;
2958 bufmgr_gem->bufmgr.bo_emit_reloc_fence = drm_intel_gem_bo_emit_reloc_fence;
2959 bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin;
2960 bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin;
2961 bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
2962 bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
2963 bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
2964 /* Use the new one if available */
2966 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec2;
2967 bufmgr_gem->bufmgr.bo_mrb_exec = drm_intel_gem_bo_mrb_exec2;
2969 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
2970 bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
2971 bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
2972 bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy;
2973 bufmgr_gem->bufmgr.debug = 0;
2974 bufmgr_gem->bufmgr.check_aperture_space =
2975 drm_intel_gem_check_aperture_space;
2976 bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse;
2977 bufmgr_gem->bufmgr.bo_is_reusable = drm_intel_gem_bo_is_reusable;
2978 bufmgr_gem->bufmgr.get_pipe_from_crtc_id =
2979 drm_intel_gem_get_pipe_from_crtc_id;
2980 bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references;
2982 DRMINITLISTHEAD(&bufmgr_gem->named);
2983 init_cache_buckets(bufmgr_gem);
2985 DRMINITLISTHEAD(&bufmgr_gem->vma_cache);
2986 bufmgr_gem->vma_max = -1; /* unlimited by default */
2988 return &bufmgr_gem->bufmgr;