1 /**************************************************************************
3 * Copyright © 2007 Red Hat Inc.
4 * Copyright © 2007-2012 Intel Corporation
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
29 **************************************************************************/
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
42 #include <xf86atomic.h>
50 #include <sys/ioctl.h>
52 #include <sys/types.h>
57 #define ETIME ETIMEDOUT
59 #include "libdrm_macros.h"
60 #include "libdrm_lists.h"
61 #include "intel_bufmgr.h"
62 #include "intel_bufmgr_priv.h"
63 #include "intel_chipset.h"
77 #define memclear(s) memset(&s, 0, sizeof(s))
79 #define DBG(...) do { \
80 if (bufmgr_gem->bufmgr.debug) \
81 fprintf(stderr, __VA_ARGS__); \
84 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
85 #define MAX2(A, B) ((A) > (B) ? (A) : (B))
88 * upper_32_bits - return bits 32-63 of a number
89 * @n: the number we're accessing
91 * A basic shift-right of a 64- or 32-bit quantity. Use this to suppress
92 * the "right shift count >= width of type" warning when that quantity is
95 #define upper_32_bits(n) ((__u32)(((n) >> 16) >> 16))
98 * lower_32_bits - return bits 0-31 of a number
99 * @n: the number we're accessing
101 #define lower_32_bits(n) ((__u32)(n))
103 typedef struct _drm_intel_bo_gem drm_intel_bo_gem;
105 struct drm_intel_gem_bo_bucket {
110 typedef struct _drm_intel_bufmgr_gem {
111 drm_intel_bufmgr bufmgr;
119 pthread_mutex_t lock;
121 struct drm_i915_gem_exec_object *exec_objects;
122 struct drm_i915_gem_exec_object2 *exec2_objects;
123 drm_intel_bo **exec_bos;
127 /** Array of lists of cached gem objects of power-of-two sizes */
128 struct drm_intel_gem_bo_bucket cache_bucket[14 * 4];
132 drmMMListHead managers;
134 drm_intel_bo_gem *name_table;
135 drm_intel_bo_gem *handle_table;
137 drmMMListHead vma_cache;
138 int vma_count, vma_open, vma_max;
141 int available_fences;
144 unsigned int has_bsd : 1;
145 unsigned int has_blt : 1;
146 unsigned int has_relaxed_fencing : 1;
147 unsigned int has_llc : 1;
148 unsigned int has_wait_timeout : 1;
149 unsigned int bo_reuse : 1;
150 unsigned int no_exec : 1;
151 unsigned int has_vebox : 1;
159 } drm_intel_bufmgr_gem;
161 #define DRM_INTEL_RELOC_FENCE (1<<0)
163 typedef struct _drm_intel_reloc_target_info {
166 } drm_intel_reloc_target;
168 struct _drm_intel_bo_gem {
176 * Kenel-assigned global name for this object
178 * List contains both flink named and prime fd'd objects
180 unsigned int global_name;
182 UT_hash_handle handle_hh;
183 UT_hash_handle name_hh;
186 * Index of the buffer within the validation list while preparing a
187 * batchbuffer execution.
192 * Current tiling mode
194 uint32_t tiling_mode;
195 uint32_t swizzle_mode;
196 unsigned long stride;
200 /** Array passed to the DRM containing relocation information. */
201 struct drm_i915_gem_relocation_entry *relocs;
203 * Array of info structs corresponding to relocs[i].target_handle etc
205 drm_intel_reloc_target *reloc_target_info;
206 /** Number of entries in relocs */
208 /** Array of BOs that are referenced by this buffer and will be softpinned */
209 drm_intel_bo **softpin_target;
210 /** Number softpinned BOs that are referenced by this buffer */
211 int softpin_target_count;
212 /** Maximum amount of softpinned BOs that are referenced by this buffer */
213 int softpin_target_size;
215 /** Mapped address for the buffer, saved across map/unmap cycles */
217 /** GTT virtual address for the buffer, saved across map/unmap cycles */
219 /** WC CPU address for the buffer, saved across map/unmap cycles */
222 * Virtual address of the buffer allocated by user, used for userptr
227 drmMMListHead vma_list;
233 * Boolean of whether this BO and its children have been included in
234 * the current drm_intel_bufmgr_check_aperture_space() total.
236 bool included_in_check_aperture;
239 * Boolean of whether this buffer has been used as a relocation
240 * target and had its size accounted for, and thus can't have any
241 * further relocations added to it.
243 bool used_as_reloc_target;
246 * Boolean of whether we have encountered an error whilst building the relocation tree.
251 * Boolean of whether this buffer can be re-used
256 * Boolean of whether the GPU is definitely not accessing the buffer.
258 * This is only valid when reusable, since non-reusable
259 * buffers are those that have been shared wth other
260 * processes, so we don't know their state.
265 * Boolean of whether this buffer was allocated with userptr
270 * Boolean of whether this buffer can be placed in the full 48-bit
271 * address range on gen8+.
273 * By default, buffers will be keep in a 32-bit range, unless this
274 * flag is explicitly set.
276 bool use_48b_address_range;
279 * Whether this buffer is softpinned at offset specified by the user
284 * Size in bytes of this buffer and its relocation descendents.
286 * Used to avoid costly tree walking in
287 * drm_intel_bufmgr_check_aperture in the common case.
292 * Number of potential fence registers required by this buffer and its
295 int reloc_tree_fences;
297 /** Flags that we may need to do the SW_FINSIH ioctl on unmap. */
298 bool mapped_cpu_write;
302 drm_intel_gem_estimate_batch_space(drm_intel_bo ** bo_array, int count);
305 drm_intel_gem_compute_batch_space(drm_intel_bo ** bo_array, int count);
308 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
309 uint32_t * swizzle_mode);
312 drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
313 uint32_t tiling_mode,
316 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
319 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo);
321 static void drm_intel_gem_bo_free(drm_intel_bo *bo);
323 static inline drm_intel_bo_gem *to_bo_gem(drm_intel_bo *bo)
325 return (drm_intel_bo_gem *)bo;
329 drm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size,
330 uint32_t *tiling_mode)
332 unsigned long min_size, max_size;
335 if (*tiling_mode == I915_TILING_NONE)
338 /* 965+ just need multiples of page size for tiling */
339 if (bufmgr_gem->gen >= 4)
340 return ROUND_UP_TO(size, 4096);
342 /* Older chips need powers of two, of at least 512k or 1M */
343 if (bufmgr_gem->gen == 3) {
344 min_size = 1024*1024;
345 max_size = 128*1024*1024;
348 max_size = 64*1024*1024;
351 if (size > max_size) {
352 *tiling_mode = I915_TILING_NONE;
356 /* Do we need to allocate every page for the fence? */
357 if (bufmgr_gem->has_relaxed_fencing)
358 return ROUND_UP_TO(size, 4096);
360 for (i = min_size; i < size; i <<= 1)
367 * Round a given pitch up to the minimum required for X tiling on a
368 * given chip. We use 512 as the minimum to allow for a later tiling
372 drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem,
373 unsigned long pitch, uint32_t *tiling_mode)
375 unsigned long tile_width;
378 /* If untiled, then just align it so that we can do rendering
379 * to it with the 3D engine.
381 if (*tiling_mode == I915_TILING_NONE)
382 return ALIGN(pitch, 64);
384 if (*tiling_mode == I915_TILING_X
385 || (IS_915(bufmgr_gem->pci_device)
386 && *tiling_mode == I915_TILING_Y))
391 /* 965 is flexible */
392 if (bufmgr_gem->gen >= 4)
393 return ROUND_UP_TO(pitch, tile_width);
395 /* The older hardware has a maximum pitch of 8192 with tiled
396 * surfaces, so fallback to untiled if it's too large.
399 *tiling_mode = I915_TILING_NONE;
400 return ALIGN(pitch, 64);
403 /* Pre-965 needs power of two tile width */
404 for (i = tile_width; i < pitch; i <<= 1)
410 static struct drm_intel_gem_bo_bucket *
411 drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
416 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
417 struct drm_intel_gem_bo_bucket *bucket =
418 &bufmgr_gem->cache_bucket[i];
419 if (bucket->size >= size) {
428 drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
432 for (i = 0; i < bufmgr_gem->exec_count; i++) {
433 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
434 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
436 if (bo_gem->relocs == NULL && bo_gem->softpin_target == NULL) {
437 DBG("%2d: %d %s(%s)\n", i, bo_gem->gem_handle,
438 bo_gem->is_softpin ? "*" : "",
443 for (j = 0; j < bo_gem->reloc_count; j++) {
444 drm_intel_bo *target_bo = bo_gem->reloc_target_info[j].bo;
445 drm_intel_bo_gem *target_gem =
446 (drm_intel_bo_gem *) target_bo;
448 DBG("%2d: %d %s(%s)@0x%08x %08x -> "
449 "%d (%s)@0x%08x %08x + 0x%08x\n",
452 bo_gem->is_softpin ? "*" : "",
454 upper_32_bits(bo_gem->relocs[j].offset),
455 lower_32_bits(bo_gem->relocs[j].offset),
456 target_gem->gem_handle,
458 upper_32_bits(target_bo->offset64),
459 lower_32_bits(target_bo->offset64),
460 bo_gem->relocs[j].delta);
463 for (j = 0; j < bo_gem->softpin_target_count; j++) {
464 drm_intel_bo *target_bo = bo_gem->softpin_target[j];
465 drm_intel_bo_gem *target_gem =
466 (drm_intel_bo_gem *) target_bo;
467 DBG("%2d: %d %s(%s) -> "
468 "%d *(%s)@0x%08x %08x\n",
471 bo_gem->is_softpin ? "*" : "",
473 target_gem->gem_handle,
475 upper_32_bits(target_bo->offset64),
476 lower_32_bits(target_bo->offset64));
482 drm_intel_gem_bo_reference(drm_intel_bo *bo)
484 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
486 atomic_inc(&bo_gem->refcount);
490 * Adds the given buffer to the list of buffers to be validated (moved into the
491 * appropriate memory type) with the next batch submission.
493 * If a buffer is validated multiple times in a batch submission, it ends up
494 * with the intersection of the memory type flags and the union of the
498 drm_intel_add_validate_buffer(drm_intel_bo *bo)
500 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
501 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
504 if (bo_gem->validate_index != -1)
507 /* Extend the array of validation entries as necessary. */
508 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
509 int new_size = bufmgr_gem->exec_size * 2;
514 bufmgr_gem->exec_objects =
515 realloc(bufmgr_gem->exec_objects,
516 sizeof(*bufmgr_gem->exec_objects) * new_size);
517 bufmgr_gem->exec_bos =
518 realloc(bufmgr_gem->exec_bos,
519 sizeof(*bufmgr_gem->exec_bos) * new_size);
520 bufmgr_gem->exec_size = new_size;
523 index = bufmgr_gem->exec_count;
524 bo_gem->validate_index = index;
525 /* Fill in array entry */
526 bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
527 bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
528 bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs;
529 bufmgr_gem->exec_objects[index].alignment = bo->align;
530 bufmgr_gem->exec_objects[index].offset = 0;
531 bufmgr_gem->exec_bos[index] = bo;
532 bufmgr_gem->exec_count++;
536 drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence)
538 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
539 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
544 flags |= EXEC_OBJECT_NEEDS_FENCE;
545 if (bo_gem->use_48b_address_range)
546 flags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
547 if (bo_gem->is_softpin)
548 flags |= EXEC_OBJECT_PINNED;
550 if (bo_gem->validate_index != -1) {
551 bufmgr_gem->exec2_objects[bo_gem->validate_index].flags |= flags;
555 /* Extend the array of validation entries as necessary. */
556 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
557 int new_size = bufmgr_gem->exec_size * 2;
562 bufmgr_gem->exec2_objects =
563 realloc(bufmgr_gem->exec2_objects,
564 sizeof(*bufmgr_gem->exec2_objects) * new_size);
565 bufmgr_gem->exec_bos =
566 realloc(bufmgr_gem->exec_bos,
567 sizeof(*bufmgr_gem->exec_bos) * new_size);
568 bufmgr_gem->exec_size = new_size;
571 index = bufmgr_gem->exec_count;
572 bo_gem->validate_index = index;
573 /* Fill in array entry */
574 bufmgr_gem->exec2_objects[index].handle = bo_gem->gem_handle;
575 bufmgr_gem->exec2_objects[index].relocation_count = bo_gem->reloc_count;
576 bufmgr_gem->exec2_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
577 bufmgr_gem->exec2_objects[index].alignment = bo->align;
578 bufmgr_gem->exec2_objects[index].offset = bo_gem->is_softpin ?
580 bufmgr_gem->exec_bos[index] = bo;
581 bufmgr_gem->exec2_objects[index].flags = flags;
582 bufmgr_gem->exec2_objects[index].rsvd1 = 0;
583 bufmgr_gem->exec2_objects[index].rsvd2 = 0;
584 bufmgr_gem->exec_count++;
587 #define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
591 drm_intel_bo_gem_set_in_aperture_size(drm_intel_bufmgr_gem *bufmgr_gem,
592 drm_intel_bo_gem *bo_gem,
593 unsigned int alignment)
597 assert(!bo_gem->used_as_reloc_target);
599 /* The older chipsets are far-less flexible in terms of tiling,
600 * and require tiled buffer to be size aligned in the aperture.
601 * This means that in the worst possible case we will need a hole
602 * twice as large as the object in order for it to fit into the
603 * aperture. Optimal packing is for wimps.
605 size = bo_gem->bo.size;
606 if (bufmgr_gem->gen < 4 && bo_gem->tiling_mode != I915_TILING_NONE) {
607 unsigned int min_size;
609 if (bufmgr_gem->has_relaxed_fencing) {
610 if (bufmgr_gem->gen == 3)
611 min_size = 1024*1024;
615 while (min_size < size)
620 /* Account for worst-case alignment. */
621 alignment = MAX2(alignment, min_size);
624 bo_gem->reloc_tree_size = size + alignment;
628 drm_intel_setup_reloc_list(drm_intel_bo *bo)
630 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
631 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
632 unsigned int max_relocs = bufmgr_gem->max_relocs;
634 if (bo->size / 4 < max_relocs)
635 max_relocs = bo->size / 4;
637 bo_gem->relocs = malloc(max_relocs *
638 sizeof(struct drm_i915_gem_relocation_entry));
639 bo_gem->reloc_target_info = malloc(max_relocs *
640 sizeof(drm_intel_reloc_target));
641 if (bo_gem->relocs == NULL || bo_gem->reloc_target_info == NULL) {
642 bo_gem->has_error = true;
644 free (bo_gem->relocs);
645 bo_gem->relocs = NULL;
647 free (bo_gem->reloc_target_info);
648 bo_gem->reloc_target_info = NULL;
657 drm_intel_gem_bo_busy(drm_intel_bo *bo)
659 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
660 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
661 struct drm_i915_gem_busy busy;
664 if (bo_gem->reusable && bo_gem->idle)
668 busy.handle = bo_gem->gem_handle;
670 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
672 bo_gem->idle = !busy.busy;
677 return (ret == 0 && busy.busy);
681 drm_intel_gem_bo_madvise_internal(drm_intel_bufmgr_gem *bufmgr_gem,
682 drm_intel_bo_gem *bo_gem, int state)
684 struct drm_i915_gem_madvise madv;
687 madv.handle = bo_gem->gem_handle;
690 drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
692 return madv.retained;
696 drm_intel_gem_bo_madvise(drm_intel_bo *bo, int madv)
698 return drm_intel_gem_bo_madvise_internal
699 ((drm_intel_bufmgr_gem *) bo->bufmgr,
700 (drm_intel_bo_gem *) bo,
704 /* drop the oldest entries that have been purged by the kernel */
706 drm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem *bufmgr_gem,
707 struct drm_intel_gem_bo_bucket *bucket)
709 while (!DRMLISTEMPTY(&bucket->head)) {
710 drm_intel_bo_gem *bo_gem;
712 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
713 bucket->head.next, head);
714 if (drm_intel_gem_bo_madvise_internal
715 (bufmgr_gem, bo_gem, I915_MADV_DONTNEED))
718 DRMLISTDEL(&bo_gem->head);
719 drm_intel_gem_bo_free(&bo_gem->bo);
723 static drm_intel_bo *
724 drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
728 uint32_t tiling_mode,
729 unsigned long stride,
730 unsigned int alignment)
732 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
733 drm_intel_bo_gem *bo_gem;
734 unsigned int page_size = getpagesize();
736 struct drm_intel_gem_bo_bucket *bucket;
737 bool alloc_from_cache;
738 unsigned long bo_size;
739 bool for_render = false;
741 if (flags & BO_ALLOC_FOR_RENDER)
744 /* Round the allocated size up to a power of two number of pages. */
745 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size);
747 /* If we don't have caching at this size, don't actually round the
750 if (bucket == NULL) {
752 if (bo_size < page_size)
755 bo_size = bucket->size;
758 pthread_mutex_lock(&bufmgr_gem->lock);
759 /* Get a buffer out of the cache if available */
761 alloc_from_cache = false;
762 if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) {
764 /* Allocate new render-target BOs from the tail (MRU)
765 * of the list, as it will likely be hot in the GPU
766 * cache and in the aperture for us.
768 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
769 bucket->head.prev, head);
770 DRMLISTDEL(&bo_gem->head);
771 alloc_from_cache = true;
772 bo_gem->bo.align = alignment;
774 assert(alignment == 0);
775 /* For non-render-target BOs (where we're probably
776 * going to map it first thing in order to fill it
777 * with data), check if the last BO in the cache is
778 * unbusy, and only reuse in that case. Otherwise,
779 * allocating a new buffer is probably faster than
780 * waiting for the GPU to finish.
782 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
783 bucket->head.next, head);
784 if (!drm_intel_gem_bo_busy(&bo_gem->bo)) {
785 alloc_from_cache = true;
786 DRMLISTDEL(&bo_gem->head);
790 if (alloc_from_cache) {
791 if (!drm_intel_gem_bo_madvise_internal
792 (bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) {
793 drm_intel_gem_bo_free(&bo_gem->bo);
794 drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem,
799 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
802 drm_intel_gem_bo_free(&bo_gem->bo);
808 if (!alloc_from_cache) {
809 struct drm_i915_gem_create create;
811 bo_gem = calloc(1, sizeof(*bo_gem));
815 /* drm_intel_gem_bo_free calls DRMLISTDEL() for an uninitialized
816 list (vma_list), so better set the list head here */
817 DRMINITLISTHEAD(&bo_gem->vma_list);
819 bo_gem->bo.size = bo_size;
822 create.size = bo_size;
824 ret = drmIoctl(bufmgr_gem->fd,
825 DRM_IOCTL_I915_GEM_CREATE,
832 bo_gem->gem_handle = create.handle;
833 bo_gem->bo.handle = bo_gem->gem_handle;
834 bo_gem->bo.bufmgr = bufmgr;
835 bo_gem->bo.align = alignment;
837 bo_gem->tiling_mode = I915_TILING_NONE;
838 bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
841 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
846 HASH_ADD(handle_hh, bufmgr_gem->handle_table,
847 gem_handle, sizeof(bo_gem->gem_handle),
852 atomic_set(&bo_gem->refcount, 1);
853 bo_gem->validate_index = -1;
854 bo_gem->reloc_tree_fences = 0;
855 bo_gem->used_as_reloc_target = false;
856 bo_gem->has_error = false;
857 bo_gem->reusable = true;
858 bo_gem->use_48b_address_range = false;
860 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, alignment);
861 pthread_mutex_unlock(&bufmgr_gem->lock);
863 DBG("bo_create: buf %d (%s) %ldb\n",
864 bo_gem->gem_handle, bo_gem->name, size);
869 drm_intel_gem_bo_free(&bo_gem->bo);
871 pthread_mutex_unlock(&bufmgr_gem->lock);
875 static drm_intel_bo *
876 drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
879 unsigned int alignment)
881 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size,
887 static drm_intel_bo *
888 drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr,
891 unsigned int alignment)
893 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0,
894 I915_TILING_NONE, 0, 0);
897 static drm_intel_bo *
898 drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
899 int x, int y, int cpp, uint32_t *tiling_mode,
900 unsigned long *pitch, unsigned long flags)
902 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
903 unsigned long size, stride;
907 unsigned long aligned_y, height_alignment;
909 tiling = *tiling_mode;
911 /* If we're tiled, our allocations are in 8 or 32-row blocks,
912 * so failure to align our height means that we won't allocate
915 * If we're untiled, we still have to align to 2 rows high
916 * because the data port accesses 2x2 blocks even if the
917 * bottom row isn't to be rendered, so failure to align means
918 * we could walk off the end of the GTT and fault. This is
919 * documented on 965, and may be the case on older chipsets
920 * too so we try to be careful.
923 height_alignment = 2;
925 if ((bufmgr_gem->gen == 2) && tiling != I915_TILING_NONE)
926 height_alignment = 16;
927 else if (tiling == I915_TILING_X
928 || (IS_915(bufmgr_gem->pci_device)
929 && tiling == I915_TILING_Y))
930 height_alignment = 8;
931 else if (tiling == I915_TILING_Y)
932 height_alignment = 32;
933 aligned_y = ALIGN(y, height_alignment);
936 stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, tiling_mode);
937 size = stride * aligned_y;
938 size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, tiling_mode);
939 } while (*tiling_mode != tiling);
942 if (tiling == I915_TILING_NONE)
945 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags,
949 static drm_intel_bo *
950 drm_intel_gem_bo_alloc_userptr(drm_intel_bufmgr *bufmgr,
953 uint32_t tiling_mode,
958 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
959 drm_intel_bo_gem *bo_gem;
961 struct drm_i915_gem_userptr userptr;
963 /* Tiling with userptr surfaces is not supported
964 * on all hardware so refuse it for time being.
966 if (tiling_mode != I915_TILING_NONE)
969 bo_gem = calloc(1, sizeof(*bo_gem));
973 atomic_set(&bo_gem->refcount, 1);
974 DRMINITLISTHEAD(&bo_gem->vma_list);
976 bo_gem->bo.size = size;
979 userptr.user_ptr = (__u64)((unsigned long)addr);
980 userptr.user_size = size;
981 userptr.flags = flags;
983 ret = drmIoctl(bufmgr_gem->fd,
984 DRM_IOCTL_I915_GEM_USERPTR,
987 DBG("bo_create_userptr: "
988 "ioctl failed with user ptr %p size 0x%lx, "
989 "user flags 0x%lx\n", addr, size, flags);
994 pthread_mutex_lock(&bufmgr_gem->lock);
996 bo_gem->gem_handle = userptr.handle;
997 bo_gem->bo.handle = bo_gem->gem_handle;
998 bo_gem->bo.bufmgr = bufmgr;
999 bo_gem->is_userptr = true;
1000 bo_gem->bo.virtual = addr;
1001 /* Save the address provided by user */
1002 bo_gem->user_virtual = addr;
1003 bo_gem->tiling_mode = I915_TILING_NONE;
1004 bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
1007 HASH_ADD(handle_hh, bufmgr_gem->handle_table,
1008 gem_handle, sizeof(bo_gem->gem_handle),
1011 bo_gem->name = name;
1012 bo_gem->validate_index = -1;
1013 bo_gem->reloc_tree_fences = 0;
1014 bo_gem->used_as_reloc_target = false;
1015 bo_gem->has_error = false;
1016 bo_gem->reusable = false;
1017 bo_gem->use_48b_address_range = false;
1019 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
1020 pthread_mutex_unlock(&bufmgr_gem->lock);
1022 DBG("bo_create_userptr: "
1023 "ptr %p buf %d (%s) size %ldb, stride 0x%x, tile mode %d\n",
1024 addr, bo_gem->gem_handle, bo_gem->name,
1025 size, stride, tiling_mode);
1031 has_userptr(drm_intel_bufmgr_gem *bufmgr_gem)
1036 struct drm_i915_gem_userptr userptr;
1038 pgsz = sysconf(_SC_PAGESIZE);
1041 ret = posix_memalign(&ptr, pgsz, pgsz);
1043 DBG("Failed to get a page (%ld) for userptr detection!\n",
1049 userptr.user_ptr = (__u64)(unsigned long)ptr;
1050 userptr.user_size = pgsz;
1053 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_USERPTR, &userptr);
1055 if (errno == ENODEV && userptr.flags == 0) {
1056 userptr.flags = I915_USERPTR_UNSYNCHRONIZED;
1063 /* We don't release the userptr bo here as we want to keep the
1064 * kernel mm tracking alive for our lifetime. The first time we
1065 * create a userptr object the kernel has to install a mmu_notifer
1066 * which is a heavyweight operation (e.g. it requires taking all
1067 * mm_locks and stop_machine()).
1070 bufmgr_gem->userptr_active.ptr = ptr;
1071 bufmgr_gem->userptr_active.handle = userptr.handle;
1076 static drm_intel_bo *
1077 check_bo_alloc_userptr(drm_intel_bufmgr *bufmgr,
1080 uint32_t tiling_mode,
1083 unsigned long flags)
1085 if (has_userptr((drm_intel_bufmgr_gem *)bufmgr))
1086 bufmgr->bo_alloc_userptr = drm_intel_gem_bo_alloc_userptr;
1088 bufmgr->bo_alloc_userptr = NULL;
1090 return drm_intel_bo_alloc_userptr(bufmgr, name, addr,
1091 tiling_mode, stride, size, flags);
1095 * Returns a drm_intel_bo wrapping the given buffer object handle.
1097 * This can be used when one application needs to pass a buffer object
1101 drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
1103 unsigned int handle)
1105 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1106 drm_intel_bo_gem *bo_gem;
1108 struct drm_gem_open open_arg;
1109 struct drm_i915_gem_get_tiling get_tiling;
1111 /* At the moment most applications only have a few named bo.
1112 * For instance, in a DRI client only the render buffers passed
1113 * between X and the client are named. And since X returns the
1114 * alternating names for the front/back buffer a linear search
1115 * provides a sufficiently fast match.
1117 pthread_mutex_lock(&bufmgr_gem->lock);
1118 HASH_FIND(name_hh, bufmgr_gem->name_table,
1119 &handle, sizeof(handle), bo_gem);
1121 drm_intel_gem_bo_reference(&bo_gem->bo);
1126 open_arg.name = handle;
1127 ret = drmIoctl(bufmgr_gem->fd,
1131 DBG("Couldn't reference %s handle 0x%08x: %s\n",
1132 name, handle, strerror(errno));
1136 /* Now see if someone has used a prime handle to get this
1137 * object from the kernel before by looking through the list
1138 * again for a matching gem_handle
1140 HASH_FIND(handle_hh, bufmgr_gem->handle_table,
1141 &open_arg.handle, sizeof(open_arg.handle), bo_gem);
1143 drm_intel_gem_bo_reference(&bo_gem->bo);
1147 bo_gem = calloc(1, sizeof(*bo_gem));
1151 atomic_set(&bo_gem->refcount, 1);
1152 DRMINITLISTHEAD(&bo_gem->vma_list);
1154 bo_gem->bo.size = open_arg.size;
1155 bo_gem->bo.offset = 0;
1156 bo_gem->bo.offset64 = 0;
1157 bo_gem->bo.virtual = NULL;
1158 bo_gem->bo.bufmgr = bufmgr;
1159 bo_gem->name = name;
1160 bo_gem->validate_index = -1;
1161 bo_gem->gem_handle = open_arg.handle;
1162 bo_gem->bo.handle = open_arg.handle;
1163 bo_gem->global_name = handle;
1164 bo_gem->reusable = false;
1165 bo_gem->use_48b_address_range = false;
1167 HASH_ADD(handle_hh, bufmgr_gem->handle_table,
1168 gem_handle, sizeof(bo_gem->gem_handle), bo_gem);
1169 HASH_ADD(name_hh, bufmgr_gem->name_table,
1170 global_name, sizeof(bo_gem->global_name), bo_gem);
1172 memclear(get_tiling);
1173 get_tiling.handle = bo_gem->gem_handle;
1174 ret = drmIoctl(bufmgr_gem->fd,
1175 DRM_IOCTL_I915_GEM_GET_TILING,
1180 bo_gem->tiling_mode = get_tiling.tiling_mode;
1181 bo_gem->swizzle_mode = get_tiling.swizzle_mode;
1182 /* XXX stride is unknown */
1183 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
1184 DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
1187 pthread_mutex_unlock(&bufmgr_gem->lock);
1191 drm_intel_gem_bo_free(&bo_gem->bo);
1192 pthread_mutex_unlock(&bufmgr_gem->lock);
1197 drm_intel_gem_bo_free(drm_intel_bo *bo)
1199 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1200 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1201 struct drm_gem_close close;
1204 DRMLISTDEL(&bo_gem->vma_list);
1205 if (bo_gem->mem_virtual) {
1206 VG(VALGRIND_FREELIKE_BLOCK(bo_gem->mem_virtual, 0));
1207 drm_munmap(bo_gem->mem_virtual, bo_gem->bo.size);
1208 bufmgr_gem->vma_count--;
1210 if (bo_gem->wc_virtual) {
1211 VG(VALGRIND_FREELIKE_BLOCK(bo_gem->wc_virtual, 0));
1212 drm_munmap(bo_gem->wc_virtual, bo_gem->bo.size);
1213 bufmgr_gem->vma_count--;
1215 if (bo_gem->gtt_virtual) {
1216 drm_munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
1217 bufmgr_gem->vma_count--;
1220 if (bo_gem->global_name)
1221 HASH_DELETE(name_hh, bufmgr_gem->name_table, bo_gem);
1222 HASH_DELETE(handle_hh, bufmgr_gem->handle_table, bo_gem);
1224 /* Close this object */
1226 close.handle = bo_gem->gem_handle;
1227 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
1229 DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
1230 bo_gem->gem_handle, bo_gem->name, strerror(errno));
1236 drm_intel_gem_bo_mark_mmaps_incoherent(drm_intel_bo *bo)
1239 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1241 if (bo_gem->mem_virtual)
1242 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->mem_virtual, bo->size);
1244 if (bo_gem->wc_virtual)
1245 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->wc_virtual, bo->size);
1247 if (bo_gem->gtt_virtual)
1248 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->gtt_virtual, bo->size);
1252 /** Frees all cached buffers significantly older than @time. */
1254 drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
1258 if (bufmgr_gem->time == time)
1261 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
1262 struct drm_intel_gem_bo_bucket *bucket =
1263 &bufmgr_gem->cache_bucket[i];
1265 while (!DRMLISTEMPTY(&bucket->head)) {
1266 drm_intel_bo_gem *bo_gem;
1268 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1269 bucket->head.next, head);
1270 if (time - bo_gem->free_time <= 1)
1273 DRMLISTDEL(&bo_gem->head);
1275 drm_intel_gem_bo_free(&bo_gem->bo);
1279 bufmgr_gem->time = time;
1282 static void drm_intel_gem_bo_purge_vma_cache(drm_intel_bufmgr_gem *bufmgr_gem)
1286 DBG("%s: cached=%d, open=%d, limit=%d\n", __FUNCTION__,
1287 bufmgr_gem->vma_count, bufmgr_gem->vma_open, bufmgr_gem->vma_max);
1289 if (bufmgr_gem->vma_max < 0)
1292 /* We may need to evict a few entries in order to create new mmaps */
1293 limit = bufmgr_gem->vma_max - 2*bufmgr_gem->vma_open;
1297 while (bufmgr_gem->vma_count > limit) {
1298 drm_intel_bo_gem *bo_gem;
1300 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1301 bufmgr_gem->vma_cache.next,
1303 assert(bo_gem->map_count == 0);
1304 DRMLISTDELINIT(&bo_gem->vma_list);
1306 if (bo_gem->mem_virtual) {
1307 drm_munmap(bo_gem->mem_virtual, bo_gem->bo.size);
1308 bo_gem->mem_virtual = NULL;
1309 bufmgr_gem->vma_count--;
1311 if (bo_gem->wc_virtual) {
1312 drm_munmap(bo_gem->wc_virtual, bo_gem->bo.size);
1313 bo_gem->wc_virtual = NULL;
1314 bufmgr_gem->vma_count--;
1316 if (bo_gem->gtt_virtual) {
1317 drm_munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
1318 bo_gem->gtt_virtual = NULL;
1319 bufmgr_gem->vma_count--;
1324 static void drm_intel_gem_bo_close_vma(drm_intel_bufmgr_gem *bufmgr_gem,
1325 drm_intel_bo_gem *bo_gem)
1327 bufmgr_gem->vma_open--;
1328 DRMLISTADDTAIL(&bo_gem->vma_list, &bufmgr_gem->vma_cache);
1329 if (bo_gem->mem_virtual)
1330 bufmgr_gem->vma_count++;
1331 if (bo_gem->wc_virtual)
1332 bufmgr_gem->vma_count++;
1333 if (bo_gem->gtt_virtual)
1334 bufmgr_gem->vma_count++;
1335 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
1338 static void drm_intel_gem_bo_open_vma(drm_intel_bufmgr_gem *bufmgr_gem,
1339 drm_intel_bo_gem *bo_gem)
1341 bufmgr_gem->vma_open++;
1342 DRMLISTDEL(&bo_gem->vma_list);
1343 if (bo_gem->mem_virtual)
1344 bufmgr_gem->vma_count--;
1345 if (bo_gem->wc_virtual)
1346 bufmgr_gem->vma_count--;
1347 if (bo_gem->gtt_virtual)
1348 bufmgr_gem->vma_count--;
1349 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
1353 drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
1355 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1356 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1357 struct drm_intel_gem_bo_bucket *bucket;
1360 /* Unreference all the target buffers */
1361 for (i = 0; i < bo_gem->reloc_count; i++) {
1362 if (bo_gem->reloc_target_info[i].bo != bo) {
1363 drm_intel_gem_bo_unreference_locked_timed(bo_gem->
1364 reloc_target_info[i].bo,
1368 for (i = 0; i < bo_gem->softpin_target_count; i++)
1369 drm_intel_gem_bo_unreference_locked_timed(bo_gem->softpin_target[i],
1371 bo_gem->reloc_count = 0;
1372 bo_gem->used_as_reloc_target = false;
1373 bo_gem->softpin_target_count = 0;
1375 DBG("bo_unreference final: %d (%s)\n",
1376 bo_gem->gem_handle, bo_gem->name);
1378 /* release memory associated with this object */
1379 if (bo_gem->reloc_target_info) {
1380 free(bo_gem->reloc_target_info);
1381 bo_gem->reloc_target_info = NULL;
1383 if (bo_gem->relocs) {
1384 free(bo_gem->relocs);
1385 bo_gem->relocs = NULL;
1387 if (bo_gem->softpin_target) {
1388 free(bo_gem->softpin_target);
1389 bo_gem->softpin_target = NULL;
1390 bo_gem->softpin_target_size = 0;
1393 /* Clear any left-over mappings */
1394 if (bo_gem->map_count) {
1395 DBG("bo freed with non-zero map-count %d\n", bo_gem->map_count);
1396 bo_gem->map_count = 0;
1397 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1398 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1401 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
1402 /* Put the buffer into our internal cache for reuse if we can. */
1403 if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
1404 drm_intel_gem_bo_madvise_internal(bufmgr_gem, bo_gem,
1405 I915_MADV_DONTNEED)) {
1406 bo_gem->free_time = time;
1408 bo_gem->name = NULL;
1409 bo_gem->validate_index = -1;
1411 DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
1413 drm_intel_gem_bo_free(bo);
1417 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
1420 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1422 assert(atomic_read(&bo_gem->refcount) > 0);
1423 if (atomic_dec_and_test(&bo_gem->refcount))
1424 drm_intel_gem_bo_unreference_final(bo, time);
1427 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
1429 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1431 assert(atomic_read(&bo_gem->refcount) > 0);
1433 if (atomic_add_unless(&bo_gem->refcount, -1, 1)) {
1434 drm_intel_bufmgr_gem *bufmgr_gem =
1435 (drm_intel_bufmgr_gem *) bo->bufmgr;
1436 struct timespec time;
1438 clock_gettime(CLOCK_MONOTONIC, &time);
1440 pthread_mutex_lock(&bufmgr_gem->lock);
1442 if (atomic_dec_and_test(&bo_gem->refcount)) {
1443 drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
1444 drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
1447 pthread_mutex_unlock(&bufmgr_gem->lock);
1451 static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
1453 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1454 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1455 struct drm_i915_gem_set_domain set_domain;
1458 if (bo_gem->is_userptr) {
1459 /* Return the same user ptr */
1460 bo->virtual = bo_gem->user_virtual;
1464 pthread_mutex_lock(&bufmgr_gem->lock);
1466 if (bo_gem->map_count++ == 0)
1467 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
1469 if (!bo_gem->mem_virtual) {
1470 struct drm_i915_gem_mmap mmap_arg;
1472 DBG("bo_map: %d (%s), map_count=%d\n",
1473 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
1476 mmap_arg.handle = bo_gem->gem_handle;
1477 mmap_arg.size = bo->size;
1478 ret = drmIoctl(bufmgr_gem->fd,
1479 DRM_IOCTL_I915_GEM_MMAP,
1483 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1484 __FILE__, __LINE__, bo_gem->gem_handle,
1485 bo_gem->name, strerror(errno));
1486 if (--bo_gem->map_count == 0)
1487 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1488 pthread_mutex_unlock(&bufmgr_gem->lock);
1491 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
1492 bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
1494 DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1495 bo_gem->mem_virtual);
1496 bo->virtual = bo_gem->mem_virtual;
1498 memclear(set_domain);
1499 set_domain.handle = bo_gem->gem_handle;
1500 set_domain.read_domains = I915_GEM_DOMAIN_CPU;
1502 set_domain.write_domain = I915_GEM_DOMAIN_CPU;
1504 set_domain.write_domain = 0;
1505 ret = drmIoctl(bufmgr_gem->fd,
1506 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1509 DBG("%s:%d: Error setting to CPU domain %d: %s\n",
1510 __FILE__, __LINE__, bo_gem->gem_handle,
1515 bo_gem->mapped_cpu_write = true;
1517 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1518 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->mem_virtual, bo->size));
1519 pthread_mutex_unlock(&bufmgr_gem->lock);
1525 map_gtt(drm_intel_bo *bo)
1527 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1528 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1531 if (bo_gem->is_userptr)
1534 if (bo_gem->map_count++ == 0)
1535 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
1537 /* Get a mapping of the buffer if we haven't before. */
1538 if (bo_gem->gtt_virtual == NULL) {
1539 struct drm_i915_gem_mmap_gtt mmap_arg;
1541 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
1542 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
1545 mmap_arg.handle = bo_gem->gem_handle;
1547 /* Get the fake offset back... */
1548 ret = drmIoctl(bufmgr_gem->fd,
1549 DRM_IOCTL_I915_GEM_MMAP_GTT,
1553 DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
1555 bo_gem->gem_handle, bo_gem->name,
1557 if (--bo_gem->map_count == 0)
1558 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1563 bo_gem->gtt_virtual = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
1564 MAP_SHARED, bufmgr_gem->fd,
1566 if (bo_gem->gtt_virtual == MAP_FAILED) {
1567 bo_gem->gtt_virtual = NULL;
1569 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1571 bo_gem->gem_handle, bo_gem->name,
1573 if (--bo_gem->map_count == 0)
1574 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1579 bo->virtual = bo_gem->gtt_virtual;
1581 DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1582 bo_gem->gtt_virtual);
1588 drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
1590 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1591 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1592 struct drm_i915_gem_set_domain set_domain;
1595 pthread_mutex_lock(&bufmgr_gem->lock);
1599 pthread_mutex_unlock(&bufmgr_gem->lock);
1603 /* Now move it to the GTT domain so that the GPU and CPU
1604 * caches are flushed and the GPU isn't actively using the
1607 * The pagefault handler does this domain change for us when
1608 * it has unbound the BO from the GTT, but it's up to us to
1609 * tell it when we're about to use things if we had done
1610 * rendering and it still happens to be bound to the GTT.
1612 memclear(set_domain);
1613 set_domain.handle = bo_gem->gem_handle;
1614 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1615 set_domain.write_domain = I915_GEM_DOMAIN_GTT;
1616 ret = drmIoctl(bufmgr_gem->fd,
1617 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1620 DBG("%s:%d: Error setting domain %d: %s\n",
1621 __FILE__, __LINE__, bo_gem->gem_handle,
1625 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1626 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->gtt_virtual, bo->size));
1627 pthread_mutex_unlock(&bufmgr_gem->lock);
1633 * Performs a mapping of the buffer object like the normal GTT
1634 * mapping, but avoids waiting for the GPU to be done reading from or
1635 * rendering to the buffer.
1637 * This is used in the implementation of GL_ARB_map_buffer_range: The
1638 * user asks to create a buffer, then does a mapping, fills some
1639 * space, runs a drawing command, then asks to map it again without
1640 * synchronizing because it guarantees that it won't write over the
1641 * data that the GPU is busy using (or, more specifically, that if it
1642 * does write over the data, it acknowledges that rendering is
1647 drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo)
1649 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1650 #ifdef HAVE_VALGRIND
1651 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1655 /* If the CPU cache isn't coherent with the GTT, then use a
1656 * regular synchronized mapping. The problem is that we don't
1657 * track where the buffer was last used on the CPU side in
1658 * terms of drm_intel_bo_map vs drm_intel_gem_bo_map_gtt, so
1659 * we would potentially corrupt the buffer even when the user
1660 * does reasonable things.
1662 if (!bufmgr_gem->has_llc)
1663 return drm_intel_gem_bo_map_gtt(bo);
1665 pthread_mutex_lock(&bufmgr_gem->lock);
1669 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1670 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->gtt_virtual, bo->size));
1673 pthread_mutex_unlock(&bufmgr_gem->lock);
1678 static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
1680 drm_intel_bufmgr_gem *bufmgr_gem;
1681 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1687 if (bo_gem->is_userptr)
1690 bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1692 pthread_mutex_lock(&bufmgr_gem->lock);
1694 if (bo_gem->map_count <= 0) {
1695 DBG("attempted to unmap an unmapped bo\n");
1696 pthread_mutex_unlock(&bufmgr_gem->lock);
1697 /* Preserve the old behaviour of just treating this as a
1698 * no-op rather than reporting the error.
1703 if (bo_gem->mapped_cpu_write) {
1704 struct drm_i915_gem_sw_finish sw_finish;
1706 /* Cause a flush to happen if the buffer's pinned for
1707 * scanout, so the results show up in a timely manner.
1708 * Unlike GTT set domains, this only does work if the
1709 * buffer should be scanout-related.
1711 memclear(sw_finish);
1712 sw_finish.handle = bo_gem->gem_handle;
1713 ret = drmIoctl(bufmgr_gem->fd,
1714 DRM_IOCTL_I915_GEM_SW_FINISH,
1716 ret = ret == -1 ? -errno : 0;
1718 bo_gem->mapped_cpu_write = false;
1721 /* We need to unmap after every innovation as we cannot track
1722 * an open vma for every bo as that will exhaasut the system
1723 * limits and cause later failures.
1725 if (--bo_gem->map_count == 0) {
1726 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1727 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1730 pthread_mutex_unlock(&bufmgr_gem->lock);
1736 drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
1738 return drm_intel_gem_bo_unmap(bo);
1742 drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
1743 unsigned long size, const void *data)
1745 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1746 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1747 struct drm_i915_gem_pwrite pwrite;
1750 if (bo_gem->is_userptr)
1754 pwrite.handle = bo_gem->gem_handle;
1755 pwrite.offset = offset;
1757 pwrite.data_ptr = (uint64_t) (uintptr_t) data;
1758 ret = drmIoctl(bufmgr_gem->fd,
1759 DRM_IOCTL_I915_GEM_PWRITE,
1763 DBG("%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
1764 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1765 (int)size, strerror(errno));
1772 drm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
1774 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1775 struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id;
1778 memclear(get_pipe_from_crtc_id);
1779 get_pipe_from_crtc_id.crtc_id = crtc_id;
1780 ret = drmIoctl(bufmgr_gem->fd,
1781 DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID,
1782 &get_pipe_from_crtc_id);
1784 /* We return -1 here to signal that we don't
1785 * know which pipe is associated with this crtc.
1786 * This lets the caller know that this information
1787 * isn't available; using the wrong pipe for
1788 * vblank waiting can cause the chipset to lock up
1793 return get_pipe_from_crtc_id.pipe;
1797 drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
1798 unsigned long size, void *data)
1800 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1801 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1802 struct drm_i915_gem_pread pread;
1805 if (bo_gem->is_userptr)
1809 pread.handle = bo_gem->gem_handle;
1810 pread.offset = offset;
1812 pread.data_ptr = (uint64_t) (uintptr_t) data;
1813 ret = drmIoctl(bufmgr_gem->fd,
1814 DRM_IOCTL_I915_GEM_PREAD,
1818 DBG("%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
1819 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1820 (int)size, strerror(errno));
1826 /** Waits for all GPU rendering with the object to have completed. */
1828 drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo)
1830 drm_intel_gem_bo_start_gtt_access(bo, 1);
1834 * Waits on a BO for the given amount of time.
1836 * @bo: buffer object to wait for
1837 * @timeout_ns: amount of time to wait in nanoseconds.
1838 * If value is less than 0, an infinite wait will occur.
1840 * Returns 0 if the wait was successful ie. the last batch referencing the
1841 * object has completed within the allotted time. Otherwise some negative return
1842 * value describes the error. Of particular interest is -ETIME when the wait has
1843 * failed to yield the desired result.
1845 * Similar to drm_intel_gem_bo_wait_rendering except a timeout parameter allows
1846 * the operation to give up after a certain amount of time. Another subtle
1847 * difference is the internal locking semantics are different (this variant does
1848 * not hold the lock for the duration of the wait). This makes the wait subject
1849 * to a larger userspace race window.
1851 * The implementation shall wait until the object is no longer actively
1852 * referenced within a batch buffer at the time of the call. The wait will
1853 * not guarantee that the buffer is re-issued via another thread, or an flinked
1854 * handle. Userspace must make sure this race does not occur if such precision
1857 * Note that some kernels have broken the inifite wait for negative values
1858 * promise, upgrade to latest stable kernels if this is the case.
1861 drm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns)
1863 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1864 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1865 struct drm_i915_gem_wait wait;
1868 if (!bufmgr_gem->has_wait_timeout) {
1869 DBG("%s:%d: Timed wait is not supported. Falling back to "
1870 "infinite wait\n", __FILE__, __LINE__);
1872 drm_intel_gem_bo_wait_rendering(bo);
1875 return drm_intel_gem_bo_busy(bo) ? -ETIME : 0;
1880 wait.bo_handle = bo_gem->gem_handle;
1881 wait.timeout_ns = timeout_ns;
1882 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
1890 * Sets the object to the GTT read and possibly write domain, used by the X
1891 * 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt().
1893 * In combination with drm_intel_gem_bo_pin() and manual fence management, we
1894 * can do tiled pixmaps this way.
1897 drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
1899 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1900 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1901 struct drm_i915_gem_set_domain set_domain;
1904 memclear(set_domain);
1905 set_domain.handle = bo_gem->gem_handle;
1906 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1907 set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0;
1908 ret = drmIoctl(bufmgr_gem->fd,
1909 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1912 DBG("%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
1913 __FILE__, __LINE__, bo_gem->gem_handle,
1914 set_domain.read_domains, set_domain.write_domain,
1920 drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
1922 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1923 struct drm_gem_close close_bo;
1926 free(bufmgr_gem->exec2_objects);
1927 free(bufmgr_gem->exec_objects);
1928 free(bufmgr_gem->exec_bos);
1930 pthread_mutex_destroy(&bufmgr_gem->lock);
1932 /* Free any cached buffer objects we were going to reuse */
1933 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
1934 struct drm_intel_gem_bo_bucket *bucket =
1935 &bufmgr_gem->cache_bucket[i];
1936 drm_intel_bo_gem *bo_gem;
1938 while (!DRMLISTEMPTY(&bucket->head)) {
1939 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1940 bucket->head.next, head);
1941 DRMLISTDEL(&bo_gem->head);
1943 drm_intel_gem_bo_free(&bo_gem->bo);
1947 /* Release userptr bo kept hanging around for optimisation. */
1948 if (bufmgr_gem->userptr_active.ptr) {
1950 close_bo.handle = bufmgr_gem->userptr_active.handle;
1951 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close_bo);
1952 free(bufmgr_gem->userptr_active.ptr);
1955 "Failed to release test userptr object! (%d) "
1956 "i915 kernel driver may not be sane!\n", errno);
1963 * Adds the target buffer to the validation list and adds the relocation
1964 * to the reloc_buffer's relocation list.
1966 * The relocation entry at the given offset must already contain the
1967 * precomputed relocation value, because the kernel will optimize out
1968 * the relocation entry write when the buffer hasn't moved from the
1969 * last known offset in target_bo.
1972 do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1973 drm_intel_bo *target_bo, uint32_t target_offset,
1974 uint32_t read_domains, uint32_t write_domain,
1977 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1978 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1979 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
1980 bool fenced_command;
1982 if (bo_gem->has_error)
1985 if (target_bo_gem->has_error) {
1986 bo_gem->has_error = true;
1990 /* We never use HW fences for rendering on 965+ */
1991 if (bufmgr_gem->gen >= 4)
1994 fenced_command = need_fence;
1995 if (target_bo_gem->tiling_mode == I915_TILING_NONE)
1998 /* Create a new relocation list if needed */
1999 if (bo_gem->relocs == NULL && drm_intel_setup_reloc_list(bo))
2002 /* Check overflow */
2003 assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
2006 assert(offset <= bo->size - 4);
2007 assert((write_domain & (write_domain - 1)) == 0);
2009 /* An object needing a fence is a tiled buffer, so it won't have
2010 * relocs to other buffers.
2013 assert(target_bo_gem->reloc_count == 0);
2014 target_bo_gem->reloc_tree_fences = 1;
2017 /* Make sure that we're not adding a reloc to something whose size has
2018 * already been accounted for.
2020 assert(!bo_gem->used_as_reloc_target);
2021 if (target_bo_gem != bo_gem) {
2022 target_bo_gem->used_as_reloc_target = true;
2023 bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
2024 bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
2027 bo_gem->reloc_target_info[bo_gem->reloc_count].bo = target_bo;
2028 if (target_bo != bo)
2029 drm_intel_gem_bo_reference(target_bo);
2031 bo_gem->reloc_target_info[bo_gem->reloc_count].flags =
2032 DRM_INTEL_RELOC_FENCE;
2034 bo_gem->reloc_target_info[bo_gem->reloc_count].flags = 0;
2036 bo_gem->relocs[bo_gem->reloc_count].offset = offset;
2037 bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
2038 bo_gem->relocs[bo_gem->reloc_count].target_handle =
2039 target_bo_gem->gem_handle;
2040 bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
2041 bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
2042 bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset64;
2043 bo_gem->reloc_count++;
2049 drm_intel_gem_bo_use_48b_address_range(drm_intel_bo *bo, uint32_t enable)
2051 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2052 bo_gem->use_48b_address_range = enable;
2056 drm_intel_gem_bo_add_softpin_target(drm_intel_bo *bo, drm_intel_bo *target_bo)
2058 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2059 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2060 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
2061 if (bo_gem->has_error)
2064 if (target_bo_gem->has_error) {
2065 bo_gem->has_error = true;
2069 if (!target_bo_gem->is_softpin)
2071 if (target_bo_gem == bo_gem)
2074 if (bo_gem->softpin_target_count == bo_gem->softpin_target_size) {
2075 int new_size = bo_gem->softpin_target_size * 2;
2077 new_size = bufmgr_gem->max_relocs;
2079 bo_gem->softpin_target = realloc(bo_gem->softpin_target, new_size *
2080 sizeof(drm_intel_bo *));
2081 if (!bo_gem->softpin_target)
2084 bo_gem->softpin_target_size = new_size;
2086 bo_gem->softpin_target[bo_gem->softpin_target_count] = target_bo;
2087 drm_intel_gem_bo_reference(target_bo);
2088 bo_gem->softpin_target_count++;
2094 drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
2095 drm_intel_bo *target_bo, uint32_t target_offset,
2096 uint32_t read_domains, uint32_t write_domain)
2098 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
2099 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *)target_bo;
2101 if (target_bo_gem->is_softpin)
2102 return drm_intel_gem_bo_add_softpin_target(bo, target_bo);
2104 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
2105 read_domains, write_domain,
2106 !bufmgr_gem->fenced_relocs);
2110 drm_intel_gem_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
2111 drm_intel_bo *target_bo,
2112 uint32_t target_offset,
2113 uint32_t read_domains, uint32_t write_domain)
2115 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
2116 read_domains, write_domain, true);
2120 drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo)
2122 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2124 return bo_gem->reloc_count;
2128 * Removes existing relocation entries in the BO after "start".
2130 * This allows a user to avoid a two-step process for state setup with
2131 * counting up all the buffer objects and doing a
2132 * drm_intel_bufmgr_check_aperture_space() before emitting any of the
2133 * relocations for the state setup. Instead, save the state of the
2134 * batchbuffer including drm_intel_gem_get_reloc_count(), emit all the
2135 * state, and then check if it still fits in the aperture.
2137 * Any further drm_intel_bufmgr_check_aperture_space() queries
2138 * involving this buffer in the tree are undefined after this call.
2140 * This also removes all softpinned targets being referenced by the BO.
2143 drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start)
2145 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2146 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2148 struct timespec time;
2150 clock_gettime(CLOCK_MONOTONIC, &time);
2152 assert(bo_gem->reloc_count >= start);
2154 /* Unreference the cleared target buffers */
2155 pthread_mutex_lock(&bufmgr_gem->lock);
2157 for (i = start; i < bo_gem->reloc_count; i++) {
2158 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) bo_gem->reloc_target_info[i].bo;
2159 if (&target_bo_gem->bo != bo) {
2160 bo_gem->reloc_tree_fences -= target_bo_gem->reloc_tree_fences;
2161 drm_intel_gem_bo_unreference_locked_timed(&target_bo_gem->bo,
2165 bo_gem->reloc_count = start;
2167 for (i = 0; i < bo_gem->softpin_target_count; i++) {
2168 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) bo_gem->softpin_target[i];
2169 drm_intel_gem_bo_unreference_locked_timed(&target_bo_gem->bo, time.tv_sec);
2171 bo_gem->softpin_target_count = 0;
2173 pthread_mutex_unlock(&bufmgr_gem->lock);
2178 * Walk the tree of relocations rooted at BO and accumulate the list of
2179 * validations to be performed and update the relocation buffers with
2180 * index values into the validation list.
2183 drm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
2185 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2188 if (bo_gem->relocs == NULL)
2191 for (i = 0; i < bo_gem->reloc_count; i++) {
2192 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
2194 if (target_bo == bo)
2197 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
2199 /* Continue walking the tree depth-first. */
2200 drm_intel_gem_bo_process_reloc(target_bo);
2202 /* Add the target to the validate list */
2203 drm_intel_add_validate_buffer(target_bo);
2208 drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo)
2210 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
2213 if (bo_gem->relocs == NULL && bo_gem->softpin_target == NULL)
2216 for (i = 0; i < bo_gem->reloc_count; i++) {
2217 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
2220 if (target_bo == bo)
2223 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
2225 /* Continue walking the tree depth-first. */
2226 drm_intel_gem_bo_process_reloc2(target_bo);
2228 need_fence = (bo_gem->reloc_target_info[i].flags &
2229 DRM_INTEL_RELOC_FENCE);
2231 /* Add the target to the validate list */
2232 drm_intel_add_validate_buffer2(target_bo, need_fence);
2235 for (i = 0; i < bo_gem->softpin_target_count; i++) {
2236 drm_intel_bo *target_bo = bo_gem->softpin_target[i];
2238 if (target_bo == bo)
2241 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
2242 drm_intel_gem_bo_process_reloc2(target_bo);
2243 drm_intel_add_validate_buffer2(target_bo, false);
2249 drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem)
2253 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2254 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
2255 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2257 /* Update the buffer offset */
2258 if (bufmgr_gem->exec_objects[i].offset != bo->offset64) {
2259 DBG("BO %d (%s) migrated: 0x%08x %08x -> 0x%08x %08x\n",
2260 bo_gem->gem_handle, bo_gem->name,
2261 upper_32_bits(bo->offset64),
2262 lower_32_bits(bo->offset64),
2263 upper_32_bits(bufmgr_gem->exec_objects[i].offset),
2264 lower_32_bits(bufmgr_gem->exec_objects[i].offset));
2265 bo->offset64 = bufmgr_gem->exec_objects[i].offset;
2266 bo->offset = bufmgr_gem->exec_objects[i].offset;
2272 drm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem)
2276 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2277 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
2278 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
2280 /* Update the buffer offset */
2281 if (bufmgr_gem->exec2_objects[i].offset != bo->offset64) {
2282 /* If we're seeing softpinned object here it means that the kernel
2283 * has relocated our object... Indicating a programming error
2285 assert(!bo_gem->is_softpin);
2286 DBG("BO %d (%s) migrated: 0x%08x %08x -> 0x%08x %08x\n",
2287 bo_gem->gem_handle, bo_gem->name,
2288 upper_32_bits(bo->offset64),
2289 lower_32_bits(bo->offset64),
2290 upper_32_bits(bufmgr_gem->exec2_objects[i].offset),
2291 lower_32_bits(bufmgr_gem->exec2_objects[i].offset));
2292 bo->offset64 = bufmgr_gem->exec2_objects[i].offset;
2293 bo->offset = bufmgr_gem->exec2_objects[i].offset;
2299 drm_intel_gem_bo_aub_dump_bmp(drm_intel_bo *bo,
2300 int x1, int y1, int width, int height,
2301 enum aub_dump_bmp_format format,
2302 int pitch, int offset)
2307 drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
2308 drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
2310 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2311 struct drm_i915_gem_execbuffer execbuf;
2314 if (to_bo_gem(bo)->has_error)
2317 pthread_mutex_lock(&bufmgr_gem->lock);
2318 /* Update indices and set up the validate list. */
2319 drm_intel_gem_bo_process_reloc(bo);
2321 /* Add the batch buffer to the validation list. There are no
2322 * relocations pointing to it.
2324 drm_intel_add_validate_buffer(bo);
2327 execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects;
2328 execbuf.buffer_count = bufmgr_gem->exec_count;
2329 execbuf.batch_start_offset = 0;
2330 execbuf.batch_len = used;
2331 execbuf.cliprects_ptr = (uintptr_t) cliprects;
2332 execbuf.num_cliprects = num_cliprects;
2336 ret = drmIoctl(bufmgr_gem->fd,
2337 DRM_IOCTL_I915_GEM_EXECBUFFER,
2341 if (errno == ENOSPC) {
2342 DBG("Execbuffer fails to pin. "
2343 "Estimate: %u. Actual: %u. Available: %u\n",
2344 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
2347 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
2350 (unsigned int)bufmgr_gem->gtt_size);
2353 drm_intel_update_buffer_offsets(bufmgr_gem);
2355 if (bufmgr_gem->bufmgr.debug)
2356 drm_intel_gem_dump_validation_list(bufmgr_gem);
2358 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2359 drm_intel_bo_gem *bo_gem = to_bo_gem(bufmgr_gem->exec_bos[i]);
2361 bo_gem->idle = false;
2363 /* Disconnect the buffer from the validate list */
2364 bo_gem->validate_index = -1;
2365 bufmgr_gem->exec_bos[i] = NULL;
2367 bufmgr_gem->exec_count = 0;
2368 pthread_mutex_unlock(&bufmgr_gem->lock);
2374 do_exec2(drm_intel_bo *bo, int used, drm_intel_context *ctx,
2375 drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
2378 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
2379 struct drm_i915_gem_execbuffer2 execbuf;
2383 if (to_bo_gem(bo)->has_error)
2386 switch (flags & 0x7) {
2390 if (!bufmgr_gem->has_blt)
2394 if (!bufmgr_gem->has_bsd)
2397 case I915_EXEC_VEBOX:
2398 if (!bufmgr_gem->has_vebox)
2401 case I915_EXEC_RENDER:
2402 case I915_EXEC_DEFAULT:
2406 pthread_mutex_lock(&bufmgr_gem->lock);
2407 /* Update indices and set up the validate list. */
2408 drm_intel_gem_bo_process_reloc2(bo);
2410 /* Add the batch buffer to the validation list. There are no relocations
2413 drm_intel_add_validate_buffer2(bo, 0);
2416 execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec2_objects;
2417 execbuf.buffer_count = bufmgr_gem->exec_count;
2418 execbuf.batch_start_offset = 0;
2419 execbuf.batch_len = used;
2420 execbuf.cliprects_ptr = (uintptr_t)cliprects;
2421 execbuf.num_cliprects = num_cliprects;
2424 execbuf.flags = flags;
2426 i915_execbuffer2_set_context_id(execbuf, 0);
2428 i915_execbuffer2_set_context_id(execbuf, ctx->ctx_id);
2431 if (bufmgr_gem->no_exec)
2432 goto skip_execution;
2434 ret = drmIoctl(bufmgr_gem->fd,
2435 DRM_IOCTL_I915_GEM_EXECBUFFER2,
2439 if (ret == -ENOSPC) {
2440 DBG("Execbuffer fails to pin. "
2441 "Estimate: %u. Actual: %u. Available: %u\n",
2442 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
2443 bufmgr_gem->exec_count),
2444 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
2445 bufmgr_gem->exec_count),
2446 (unsigned int) bufmgr_gem->gtt_size);
2449 drm_intel_update_buffer_offsets2(bufmgr_gem);
2452 if (bufmgr_gem->bufmgr.debug)
2453 drm_intel_gem_dump_validation_list(bufmgr_gem);
2455 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2456 drm_intel_bo_gem *bo_gem = to_bo_gem(bufmgr_gem->exec_bos[i]);
2458 bo_gem->idle = false;
2460 /* Disconnect the buffer from the validate list */
2461 bo_gem->validate_index = -1;
2462 bufmgr_gem->exec_bos[i] = NULL;
2464 bufmgr_gem->exec_count = 0;
2465 pthread_mutex_unlock(&bufmgr_gem->lock);
2471 drm_intel_gem_bo_exec2(drm_intel_bo *bo, int used,
2472 drm_clip_rect_t *cliprects, int num_cliprects,
2475 return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4,
2480 drm_intel_gem_bo_mrb_exec2(drm_intel_bo *bo, int used,
2481 drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
2484 return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4,
2489 drm_intel_gem_bo_context_exec(drm_intel_bo *bo, drm_intel_context *ctx,
2490 int used, unsigned int flags)
2492 return do_exec2(bo, used, ctx, NULL, 0, 0, flags);
2496 drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
2498 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2499 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2500 struct drm_i915_gem_pin pin;
2504 pin.handle = bo_gem->gem_handle;
2505 pin.alignment = alignment;
2507 ret = drmIoctl(bufmgr_gem->fd,
2508 DRM_IOCTL_I915_GEM_PIN,
2513 bo->offset64 = pin.offset;
2514 bo->offset = pin.offset;
2519 drm_intel_gem_bo_unpin(drm_intel_bo *bo)
2521 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2522 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2523 struct drm_i915_gem_unpin unpin;
2527 unpin.handle = bo_gem->gem_handle;
2529 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
2537 drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
2538 uint32_t tiling_mode,
2541 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2542 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2543 struct drm_i915_gem_set_tiling set_tiling;
2546 if (bo_gem->global_name == 0 &&
2547 tiling_mode == bo_gem->tiling_mode &&
2548 stride == bo_gem->stride)
2551 memset(&set_tiling, 0, sizeof(set_tiling));
2553 /* set_tiling is slightly broken and overwrites the
2554 * input on the error path, so we have to open code
2557 set_tiling.handle = bo_gem->gem_handle;
2558 set_tiling.tiling_mode = tiling_mode;
2559 set_tiling.stride = stride;
2561 ret = ioctl(bufmgr_gem->fd,
2562 DRM_IOCTL_I915_GEM_SET_TILING,
2564 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
2568 bo_gem->tiling_mode = set_tiling.tiling_mode;
2569 bo_gem->swizzle_mode = set_tiling.swizzle_mode;
2570 bo_gem->stride = set_tiling.stride;
2575 drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
2578 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2579 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2582 /* Tiling with userptr surfaces is not supported
2583 * on all hardware so refuse it for time being.
2585 if (bo_gem->is_userptr)
2588 /* Linear buffers have no stride. By ensuring that we only ever use
2589 * stride 0 with linear buffers, we simplify our code.
2591 if (*tiling_mode == I915_TILING_NONE)
2594 ret = drm_intel_gem_bo_set_tiling_internal(bo, *tiling_mode, stride);
2596 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
2598 *tiling_mode = bo_gem->tiling_mode;
2603 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
2604 uint32_t * swizzle_mode)
2606 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2608 *tiling_mode = bo_gem->tiling_mode;
2609 *swizzle_mode = bo_gem->swizzle_mode;
2614 drm_intel_gem_bo_set_softpin_offset(drm_intel_bo *bo, uint64_t offset)
2616 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2618 bo_gem->is_softpin = true;
2619 bo->offset64 = offset;
2620 bo->offset = offset;
2625 drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int size)
2627 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
2630 drm_intel_bo_gem *bo_gem;
2631 struct drm_i915_gem_get_tiling get_tiling;
2633 pthread_mutex_lock(&bufmgr_gem->lock);
2634 ret = drmPrimeFDToHandle(bufmgr_gem->fd, prime_fd, &handle);
2636 DBG("create_from_prime: failed to obtain handle from fd: %s\n", strerror(errno));
2637 pthread_mutex_unlock(&bufmgr_gem->lock);
2642 * See if the kernel has already returned this buffer to us. Just as
2643 * for named buffers, we must not create two bo's pointing at the same
2646 HASH_FIND(name_hh, bufmgr_gem->name_table,
2647 &handle, sizeof(handle), bo_gem);
2649 drm_intel_gem_bo_reference(&bo_gem->bo);
2653 bo_gem = calloc(1, sizeof(*bo_gem));
2657 atomic_set(&bo_gem->refcount, 1);
2658 DRMINITLISTHEAD(&bo_gem->vma_list);
2660 /* Determine size of bo. The fd-to-handle ioctl really should
2661 * return the size, but it doesn't. If we have kernel 3.12 or
2662 * later, we can lseek on the prime fd to get the size. Older
2663 * kernels will just fail, in which case we fall back to the
2664 * provided (estimated or guess size). */
2665 ret = lseek(prime_fd, 0, SEEK_END);
2667 bo_gem->bo.size = ret;
2669 bo_gem->bo.size = size;
2671 bo_gem->bo.handle = handle;
2672 bo_gem->bo.bufmgr = bufmgr;
2674 bo_gem->gem_handle = handle;
2675 HASH_ADD(handle_hh, bufmgr_gem->handle_table,
2676 gem_handle, sizeof(bo_gem->gem_handle), bo_gem);
2678 bo_gem->name = "prime";
2679 bo_gem->validate_index = -1;
2680 bo_gem->reloc_tree_fences = 0;
2681 bo_gem->used_as_reloc_target = false;
2682 bo_gem->has_error = false;
2683 bo_gem->reusable = false;
2684 bo_gem->use_48b_address_range = false;
2686 memclear(get_tiling);
2687 get_tiling.handle = bo_gem->gem_handle;
2688 if (drmIoctl(bufmgr_gem->fd,
2689 DRM_IOCTL_I915_GEM_GET_TILING,
2693 bo_gem->tiling_mode = get_tiling.tiling_mode;
2694 bo_gem->swizzle_mode = get_tiling.swizzle_mode;
2695 /* XXX stride is unknown */
2696 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
2699 pthread_mutex_unlock(&bufmgr_gem->lock);
2703 drm_intel_gem_bo_free(&bo_gem->bo);
2704 pthread_mutex_unlock(&bufmgr_gem->lock);
2709 drm_intel_bo_gem_export_to_prime(drm_intel_bo *bo, int *prime_fd)
2711 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2712 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2714 if (drmPrimeHandleToFD(bufmgr_gem->fd, bo_gem->gem_handle,
2715 DRM_CLOEXEC, prime_fd) != 0)
2718 bo_gem->reusable = false;
2724 drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
2726 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2727 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2729 if (!bo_gem->global_name) {
2730 struct drm_gem_flink flink;
2733 flink.handle = bo_gem->gem_handle;
2734 if (drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink))
2737 pthread_mutex_lock(&bufmgr_gem->lock);
2738 if (!bo_gem->global_name) {
2739 HASH_ADD(name_hh, bufmgr_gem->name_table,
2740 global_name, sizeof(bo_gem->global_name),
2742 bo_gem->global_name = flink.name;
2743 bo_gem->reusable = false;
2745 pthread_mutex_unlock(&bufmgr_gem->lock);
2748 *name = bo_gem->global_name;
2753 * Enables unlimited caching of buffer objects for reuse.
2755 * This is potentially very memory expensive, as the cache at each bucket
2756 * size is only bounded by how many buffers of that size we've managed to have
2757 * in flight at once.
2760 drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
2762 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
2764 bufmgr_gem->bo_reuse = true;
2768 * Enable use of fenced reloc type.
2770 * New code should enable this to avoid unnecessary fence register
2771 * allocation. If this option is not enabled, all relocs will have fence
2772 * register allocated.
2775 drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr)
2777 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2779 if (bufmgr_gem->bufmgr.bo_exec == drm_intel_gem_bo_exec2)
2780 bufmgr_gem->fenced_relocs = true;
2784 * Return the additional aperture space required by the tree of buffer objects
2788 drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
2790 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2794 if (bo == NULL || bo_gem->included_in_check_aperture)
2798 bo_gem->included_in_check_aperture = true;
2800 for (i = 0; i < bo_gem->reloc_count; i++)
2802 drm_intel_gem_bo_get_aperture_space(bo_gem->
2803 reloc_target_info[i].bo);
2809 * Count the number of buffers in this list that need a fence reg
2811 * If the count is greater than the number of available regs, we'll have
2812 * to ask the caller to resubmit a batch with fewer tiled buffers.
2814 * This function over-counts if the same buffer is used multiple times.
2817 drm_intel_gem_total_fences(drm_intel_bo ** bo_array, int count)
2820 unsigned int total = 0;
2822 for (i = 0; i < count; i++) {
2823 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
2828 total += bo_gem->reloc_tree_fences;
2834 * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready
2835 * for the next drm_intel_bufmgr_check_aperture_space() call.
2838 drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo)
2840 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2843 if (bo == NULL || !bo_gem->included_in_check_aperture)
2846 bo_gem->included_in_check_aperture = false;
2848 for (i = 0; i < bo_gem->reloc_count; i++)
2849 drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->
2850 reloc_target_info[i].bo);
2854 * Return a conservative estimate for the amount of aperture required
2855 * for a collection of buffers. This may double-count some buffers.
2858 drm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count)
2861 unsigned int total = 0;
2863 for (i = 0; i < count; i++) {
2864 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
2866 total += bo_gem->reloc_tree_size;
2872 * Return the amount of aperture needed for a collection of buffers.
2873 * This avoids double counting any buffers, at the cost of looking
2874 * at every buffer in the set.
2877 drm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count)
2880 unsigned int total = 0;
2882 for (i = 0; i < count; i++) {
2883 total += drm_intel_gem_bo_get_aperture_space(bo_array[i]);
2884 /* For the first buffer object in the array, we get an
2885 * accurate count back for its reloc_tree size (since nothing
2886 * had been flagged as being counted yet). We can save that
2887 * value out as a more conservative reloc_tree_size that
2888 * avoids double-counting target buffers. Since the first
2889 * buffer happens to usually be the batch buffer in our
2890 * callers, this can pull us back from doing the tree
2891 * walk on every new batch emit.
2894 drm_intel_bo_gem *bo_gem =
2895 (drm_intel_bo_gem *) bo_array[i];
2896 bo_gem->reloc_tree_size = total;
2900 for (i = 0; i < count; i++)
2901 drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]);
2906 * Return -1 if the batchbuffer should be flushed before attempting to
2907 * emit rendering referencing the buffers pointed to by bo_array.
2909 * This is required because if we try to emit a batchbuffer with relocations
2910 * to a tree of buffers that won't simultaneously fit in the aperture,
2911 * the rendering will return an error at a point where the software is not
2912 * prepared to recover from it.
2914 * However, we also want to emit the batchbuffer significantly before we reach
2915 * the limit, as a series of batchbuffers each of which references buffers
2916 * covering almost all of the aperture means that at each emit we end up
2917 * waiting to evict a buffer from the last rendering, and we get synchronous
2918 * performance. By emitting smaller batchbuffers, we eat some CPU overhead to
2919 * get better parallelism.
2922 drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
2924 drm_intel_bufmgr_gem *bufmgr_gem =
2925 (drm_intel_bufmgr_gem *) bo_array[0]->bufmgr;
2926 unsigned int total = 0;
2927 unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
2930 /* Check for fence reg constraints if necessary */
2931 if (bufmgr_gem->available_fences) {
2932 total_fences = drm_intel_gem_total_fences(bo_array, count);
2933 if (total_fences > bufmgr_gem->available_fences)
2937 total = drm_intel_gem_estimate_batch_space(bo_array, count);
2939 if (total > threshold)
2940 total = drm_intel_gem_compute_batch_space(bo_array, count);
2942 if (total > threshold) {
2943 DBG("check_space: overflowed available aperture, "
2945 total / 1024, (int)bufmgr_gem->gtt_size / 1024);
2948 DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024,
2949 (int)bufmgr_gem->gtt_size / 1024);
2955 * Disable buffer reuse for objects which are shared with the kernel
2956 * as scanout buffers
2959 drm_intel_gem_bo_disable_reuse(drm_intel_bo *bo)
2961 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2963 bo_gem->reusable = false;
2968 drm_intel_gem_bo_is_reusable(drm_intel_bo *bo)
2970 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2972 return bo_gem->reusable;
2976 _drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
2978 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2981 for (i = 0; i < bo_gem->reloc_count; i++) {
2982 if (bo_gem->reloc_target_info[i].bo == target_bo)
2984 if (bo == bo_gem->reloc_target_info[i].bo)
2986 if (_drm_intel_gem_bo_references(bo_gem->reloc_target_info[i].bo,
2991 for (i = 0; i< bo_gem->softpin_target_count; i++) {
2992 if (bo_gem->softpin_target[i] == target_bo)
2994 if (_drm_intel_gem_bo_references(bo_gem->softpin_target[i], target_bo))
3001 /** Return true if target_bo is referenced by bo's relocation tree. */
3003 drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
3005 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
3007 if (bo == NULL || target_bo == NULL)
3009 if (target_bo_gem->used_as_reloc_target)
3010 return _drm_intel_gem_bo_references(bo, target_bo);
3015 add_bucket(drm_intel_bufmgr_gem *bufmgr_gem, int size)
3017 unsigned int i = bufmgr_gem->num_buckets;
3019 assert(i < ARRAY_SIZE(bufmgr_gem->cache_bucket));
3021 DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head);
3022 bufmgr_gem->cache_bucket[i].size = size;
3023 bufmgr_gem->num_buckets++;
3027 init_cache_buckets(drm_intel_bufmgr_gem *bufmgr_gem)
3029 unsigned long size, cache_max_size = 64 * 1024 * 1024;
3031 /* OK, so power of two buckets was too wasteful of memory.
3032 * Give 3 other sizes between each power of two, to hopefully
3033 * cover things accurately enough. (The alternative is
3034 * probably to just go for exact matching of sizes, and assume
3035 * that for things like composited window resize the tiled
3036 * width/height alignment and rounding of sizes to pages will
3037 * get us useful cache hit rates anyway)
3039 add_bucket(bufmgr_gem, 4096);
3040 add_bucket(bufmgr_gem, 4096 * 2);
3041 add_bucket(bufmgr_gem, 4096 * 3);
3043 /* Initialize the linked lists for BO reuse cache. */
3044 for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
3045 add_bucket(bufmgr_gem, size);
3047 add_bucket(bufmgr_gem, size + size * 1 / 4);
3048 add_bucket(bufmgr_gem, size + size * 2 / 4);
3049 add_bucket(bufmgr_gem, size + size * 3 / 4);
3054 drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr, int limit)
3056 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3058 bufmgr_gem->vma_max = limit;
3060 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
3064 * Get the PCI ID for the device. This can be overridden by setting the
3065 * INTEL_DEVID_OVERRIDE environment variable to the desired ID.
3068 get_pci_device_id(drm_intel_bufmgr_gem *bufmgr_gem)
3070 char *devid_override;
3073 drm_i915_getparam_t gp;
3075 if (geteuid() == getuid()) {
3076 devid_override = getenv("INTEL_DEVID_OVERRIDE");
3077 if (devid_override) {
3078 bufmgr_gem->no_exec = true;
3079 return strtod(devid_override, NULL);
3084 gp.param = I915_PARAM_CHIPSET_ID;
3086 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3088 fprintf(stderr, "get chip id failed: %d [%d]\n", ret, errno);
3089 fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
3095 drm_intel_bufmgr_gem_get_devid(drm_intel_bufmgr *bufmgr)
3097 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3099 return bufmgr_gem->pci_device;
3103 * Sets the AUB filename.
3105 * This function has to be called before drm_intel_bufmgr_gem_set_aub_dump()
3106 * for it to have any effect.
3109 drm_intel_bufmgr_gem_set_aub_filename(drm_intel_bufmgr *bufmgr,
3110 const char *filename)
3115 * Sets up AUB dumping.
3117 * This is a trace file format that can be used with the simulator.
3118 * Packets are emitted in a format somewhat like GPU command packets.
3119 * You can set up a GTT and upload your objects into the referenced
3120 * space, then send off batchbuffers and get BMPs out the other end.
3123 drm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr *bufmgr, int enable)
3125 fprintf(stderr, "libdrm aub dumping is deprecated.\n\n"
3126 "Use intel_aubdump from intel-gpu-tools instead. Install intel-gpu-tools,\n"
3127 "then run (for example)\n\n"
3128 "\t$ intel_aubdump --output=trace.aub glxgears -geometry 500x500\n\n"
3129 "See the intel_aubdump man page for more details.\n");
3133 drm_intel_gem_context_create(drm_intel_bufmgr *bufmgr)
3135 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3136 struct drm_i915_gem_context_create create;
3137 drm_intel_context *context = NULL;
3140 context = calloc(1, sizeof(*context));
3145 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
3147 DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n",
3153 context->ctx_id = create.ctx_id;
3154 context->bufmgr = bufmgr;
3160 drm_intel_gem_context_destroy(drm_intel_context *ctx)
3162 drm_intel_bufmgr_gem *bufmgr_gem;
3163 struct drm_i915_gem_context_destroy destroy;
3171 bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr;
3172 destroy.ctx_id = ctx->ctx_id;
3173 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY,
3176 fprintf(stderr, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n",
3183 drm_intel_get_reset_stats(drm_intel_context *ctx,
3184 uint32_t *reset_count,
3188 drm_intel_bufmgr_gem *bufmgr_gem;
3189 struct drm_i915_reset_stats stats;
3197 bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr;
3198 stats.ctx_id = ctx->ctx_id;
3199 ret = drmIoctl(bufmgr_gem->fd,
3200 DRM_IOCTL_I915_GET_RESET_STATS,
3203 if (reset_count != NULL)
3204 *reset_count = stats.reset_count;
3207 *active = stats.batch_active;
3209 if (pending != NULL)
3210 *pending = stats.batch_pending;
3217 drm_intel_reg_read(drm_intel_bufmgr *bufmgr,
3221 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3222 struct drm_i915_reg_read reg_read;
3226 reg_read.offset = offset;
3228 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_REG_READ, ®_read);
3230 *result = reg_read.val;
3235 drm_intel_get_subslice_total(int fd, unsigned int *subslice_total)
3237 drm_i915_getparam_t gp;
3241 gp.value = (int*)subslice_total;
3242 gp.param = I915_PARAM_SUBSLICE_TOTAL;
3243 ret = drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
3251 drm_intel_get_eu_total(int fd, unsigned int *eu_total)
3253 drm_i915_getparam_t gp;
3257 gp.value = (int*)eu_total;
3258 gp.param = I915_PARAM_EU_TOTAL;
3259 ret = drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
3267 drm_intel_get_pooled_eu(int fd)
3269 drm_i915_getparam_t gp;
3273 gp.param = I915_PARAM_HAS_POOLED_EU;
3275 if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
3282 drm_intel_get_min_eu_in_pool(int fd)
3284 drm_i915_getparam_t gp;
3288 gp.param = I915_PARAM_MIN_EU_IN_POOL;
3290 if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
3297 * Annotate the given bo for use in aub dumping.
3299 * \param annotations is an array of drm_intel_aub_annotation objects
3300 * describing the type of data in various sections of the bo. Each
3301 * element of the array specifies the type and subtype of a section of
3302 * the bo, and the past-the-end offset of that section. The elements
3303 * of \c annotations must be sorted so that ending_offset is
3306 * \param count is the number of elements in the \c annotations array.
3307 * If \c count is zero, then \c annotations will not be dereferenced.
3309 * Annotations are copied into a private data structure, so caller may
3310 * re-use the memory pointed to by \c annotations after the call
3313 * Annotations are stored for the lifetime of the bo; to reset to the
3314 * default state (no annotations), call this function with a \c count
3318 drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo,
3319 drm_intel_aub_annotation *annotations,
3324 static pthread_mutex_t bufmgr_list_mutex = PTHREAD_MUTEX_INITIALIZER;
3325 static drmMMListHead bufmgr_list = { &bufmgr_list, &bufmgr_list };
3327 static drm_intel_bufmgr_gem *
3328 drm_intel_bufmgr_gem_find(int fd)
3330 drm_intel_bufmgr_gem *bufmgr_gem;
3332 DRMLISTFOREACHENTRY(bufmgr_gem, &bufmgr_list, managers) {
3333 if (bufmgr_gem->fd == fd) {
3334 atomic_inc(&bufmgr_gem->refcount);
3343 drm_intel_bufmgr_gem_unref(drm_intel_bufmgr *bufmgr)
3345 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3347 if (atomic_add_unless(&bufmgr_gem->refcount, -1, 1)) {
3348 pthread_mutex_lock(&bufmgr_list_mutex);
3350 if (atomic_dec_and_test(&bufmgr_gem->refcount)) {
3351 DRMLISTDEL(&bufmgr_gem->managers);
3352 drm_intel_bufmgr_gem_destroy(bufmgr);
3355 pthread_mutex_unlock(&bufmgr_list_mutex);
3359 void *drm_intel_gem_bo_map__gtt(drm_intel_bo *bo)
3361 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
3362 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
3364 if (bo_gem->gtt_virtual)
3365 return bo_gem->gtt_virtual;
3367 if (bo_gem->is_userptr)
3370 pthread_mutex_lock(&bufmgr_gem->lock);
3371 if (bo_gem->gtt_virtual == NULL) {
3372 struct drm_i915_gem_mmap_gtt mmap_arg;
3375 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
3376 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
3378 if (bo_gem->map_count++ == 0)
3379 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
3382 mmap_arg.handle = bo_gem->gem_handle;
3384 /* Get the fake offset back... */
3386 if (drmIoctl(bufmgr_gem->fd,
3387 DRM_IOCTL_I915_GEM_MMAP_GTT,
3390 ptr = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
3391 MAP_SHARED, bufmgr_gem->fd,
3394 if (ptr == MAP_FAILED) {
3395 if (--bo_gem->map_count == 0)
3396 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
3400 bo_gem->gtt_virtual = ptr;
3402 pthread_mutex_unlock(&bufmgr_gem->lock);
3404 return bo_gem->gtt_virtual;
3407 void *drm_intel_gem_bo_map__cpu(drm_intel_bo *bo)
3409 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
3410 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
3412 if (bo_gem->mem_virtual)
3413 return bo_gem->mem_virtual;
3415 if (bo_gem->is_userptr) {
3416 /* Return the same user ptr */
3417 return bo_gem->user_virtual;
3420 pthread_mutex_lock(&bufmgr_gem->lock);
3421 if (!bo_gem->mem_virtual) {
3422 struct drm_i915_gem_mmap mmap_arg;
3424 if (bo_gem->map_count++ == 0)
3425 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
3427 DBG("bo_map: %d (%s), map_count=%d\n",
3428 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
3431 mmap_arg.handle = bo_gem->gem_handle;
3432 mmap_arg.size = bo->size;
3433 if (drmIoctl(bufmgr_gem->fd,
3434 DRM_IOCTL_I915_GEM_MMAP,
3436 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
3437 __FILE__, __LINE__, bo_gem->gem_handle,
3438 bo_gem->name, strerror(errno));
3439 if (--bo_gem->map_count == 0)
3440 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
3442 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
3443 bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
3446 pthread_mutex_unlock(&bufmgr_gem->lock);
3448 return bo_gem->mem_virtual;
3451 void *drm_intel_gem_bo_map__wc(drm_intel_bo *bo)
3453 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
3454 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
3456 if (bo_gem->wc_virtual)
3457 return bo_gem->wc_virtual;
3459 if (bo_gem->is_userptr)
3462 pthread_mutex_lock(&bufmgr_gem->lock);
3463 if (!bo_gem->wc_virtual) {
3464 struct drm_i915_gem_mmap mmap_arg;
3466 if (bo_gem->map_count++ == 0)
3467 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
3469 DBG("bo_map: %d (%s), map_count=%d\n",
3470 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
3473 mmap_arg.handle = bo_gem->gem_handle;
3474 mmap_arg.size = bo->size;
3475 mmap_arg.flags = I915_MMAP_WC;
3476 if (drmIoctl(bufmgr_gem->fd,
3477 DRM_IOCTL_I915_GEM_MMAP,
3479 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
3480 __FILE__, __LINE__, bo_gem->gem_handle,
3481 bo_gem->name, strerror(errno));
3482 if (--bo_gem->map_count == 0)
3483 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
3485 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
3486 bo_gem->wc_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
3489 pthread_mutex_unlock(&bufmgr_gem->lock);
3491 return bo_gem->wc_virtual;
3495 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
3496 * and manage map buffer objections.
3498 * \param fd File descriptor of the opened DRM device.
3501 drm_intel_bufmgr_gem_init(int fd, int batch_size)
3503 drm_intel_bufmgr_gem *bufmgr_gem;
3504 struct drm_i915_gem_get_aperture aperture;
3505 drm_i915_getparam_t gp;
3509 pthread_mutex_lock(&bufmgr_list_mutex);
3511 bufmgr_gem = drm_intel_bufmgr_gem_find(fd);
3515 bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
3516 if (bufmgr_gem == NULL)
3519 bufmgr_gem->fd = fd;
3520 atomic_set(&bufmgr_gem->refcount, 1);
3522 if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
3529 ret = drmIoctl(bufmgr_gem->fd,
3530 DRM_IOCTL_I915_GEM_GET_APERTURE,
3534 bufmgr_gem->gtt_size = aperture.aper_available_size;
3536 fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
3538 bufmgr_gem->gtt_size = 128 * 1024 * 1024;
3539 fprintf(stderr, "Assuming %dkB available aperture size.\n"
3540 "May lead to reduced performance or incorrect "
3542 (int)bufmgr_gem->gtt_size / 1024);
3545 bufmgr_gem->pci_device = get_pci_device_id(bufmgr_gem);
3547 if (IS_GEN2(bufmgr_gem->pci_device))
3548 bufmgr_gem->gen = 2;
3549 else if (IS_GEN3(bufmgr_gem->pci_device))
3550 bufmgr_gem->gen = 3;
3551 else if (IS_GEN4(bufmgr_gem->pci_device))
3552 bufmgr_gem->gen = 4;
3553 else if (IS_GEN5(bufmgr_gem->pci_device))
3554 bufmgr_gem->gen = 5;
3555 else if (IS_GEN6(bufmgr_gem->pci_device))
3556 bufmgr_gem->gen = 6;
3557 else if (IS_GEN7(bufmgr_gem->pci_device))
3558 bufmgr_gem->gen = 7;
3559 else if (IS_GEN8(bufmgr_gem->pci_device))
3560 bufmgr_gem->gen = 8;
3561 else if (IS_GEN9(bufmgr_gem->pci_device))
3562 bufmgr_gem->gen = 9;
3569 if (IS_GEN3(bufmgr_gem->pci_device) &&
3570 bufmgr_gem->gtt_size > 256*1024*1024) {
3571 /* The unmappable part of gtt on gen 3 (i.e. above 256MB) can't
3572 * be used for tiled blits. To simplify the accounting, just
3573 * subtract the unmappable part (fixed to 256MB on all known
3574 * gen3 devices) if the kernel advertises it. */
3575 bufmgr_gem->gtt_size -= 256*1024*1024;
3581 gp.param = I915_PARAM_HAS_EXECBUF2;
3582 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3586 gp.param = I915_PARAM_HAS_BSD;
3587 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3588 bufmgr_gem->has_bsd = ret == 0;
3590 gp.param = I915_PARAM_HAS_BLT;
3591 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3592 bufmgr_gem->has_blt = ret == 0;
3594 gp.param = I915_PARAM_HAS_RELAXED_FENCING;
3595 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3596 bufmgr_gem->has_relaxed_fencing = ret == 0;
3598 bufmgr_gem->bufmgr.bo_alloc_userptr = check_bo_alloc_userptr;
3600 gp.param = I915_PARAM_HAS_WAIT_TIMEOUT;
3601 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3602 bufmgr_gem->has_wait_timeout = ret == 0;
3604 gp.param = I915_PARAM_HAS_LLC;
3605 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3607 /* Kernel does not supports HAS_LLC query, fallback to GPU
3608 * generation detection and assume that we have LLC on GEN6/7
3610 bufmgr_gem->has_llc = (IS_GEN6(bufmgr_gem->pci_device) |
3611 IS_GEN7(bufmgr_gem->pci_device));
3613 bufmgr_gem->has_llc = *gp.value;
3615 gp.param = I915_PARAM_HAS_VEBOX;
3616 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3617 bufmgr_gem->has_vebox = (ret == 0) & (*gp.value > 0);
3619 gp.param = I915_PARAM_HAS_EXEC_SOFTPIN;
3620 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3621 if (ret == 0 && *gp.value > 0)
3622 bufmgr_gem->bufmgr.bo_set_softpin_offset = drm_intel_gem_bo_set_softpin_offset;
3624 if (bufmgr_gem->gen < 4) {
3625 gp.param = I915_PARAM_NUM_FENCES_AVAIL;
3626 gp.value = &bufmgr_gem->available_fences;
3627 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3629 fprintf(stderr, "get fences failed: %d [%d]\n", ret,
3631 fprintf(stderr, "param: %d, val: %d\n", gp.param,
3633 bufmgr_gem->available_fences = 0;
3635 /* XXX The kernel reports the total number of fences,
3636 * including any that may be pinned.
3638 * We presume that there will be at least one pinned
3639 * fence for the scanout buffer, but there may be more
3640 * than one scanout and the user may be manually
3641 * pinning buffers. Let's move to execbuffer2 and
3642 * thereby forget the insanity of using fences...
3644 bufmgr_gem->available_fences -= 2;
3645 if (bufmgr_gem->available_fences < 0)
3646 bufmgr_gem->available_fences = 0;
3650 if (bufmgr_gem->gen >= 8) {
3651 gp.param = I915_PARAM_HAS_ALIASING_PPGTT;
3652 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3653 if (ret == 0 && *gp.value == 3)
3654 bufmgr_gem->bufmgr.bo_use_48b_address_range = drm_intel_gem_bo_use_48b_address_range;
3657 /* Let's go with one relocation per every 2 dwords (but round down a bit
3658 * since a power of two will mean an extra page allocation for the reloc
3661 * Every 4 was too few for the blender benchmark.
3663 bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
3665 bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
3666 bufmgr_gem->bufmgr.bo_alloc_for_render =
3667 drm_intel_gem_bo_alloc_for_render;
3668 bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled;
3669 bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
3670 bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
3671 bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
3672 bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap;
3673 bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata;
3674 bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata;
3675 bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering;
3676 bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc;
3677 bufmgr_gem->bufmgr.bo_emit_reloc_fence = drm_intel_gem_bo_emit_reloc_fence;
3678 bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin;
3679 bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin;
3680 bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
3681 bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
3682 bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
3683 /* Use the new one if available */
3685 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec2;
3686 bufmgr_gem->bufmgr.bo_mrb_exec = drm_intel_gem_bo_mrb_exec2;
3688 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
3689 bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
3690 bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
3691 bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_unref;
3692 bufmgr_gem->bufmgr.debug = 0;
3693 bufmgr_gem->bufmgr.check_aperture_space =
3694 drm_intel_gem_check_aperture_space;
3695 bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse;
3696 bufmgr_gem->bufmgr.bo_is_reusable = drm_intel_gem_bo_is_reusable;
3697 bufmgr_gem->bufmgr.get_pipe_from_crtc_id =
3698 drm_intel_gem_get_pipe_from_crtc_id;
3699 bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references;
3701 init_cache_buckets(bufmgr_gem);
3703 DRMINITLISTHEAD(&bufmgr_gem->vma_cache);
3704 bufmgr_gem->vma_max = -1; /* unlimited by default */
3706 DRMLISTADD(&bufmgr_gem->managers, &bufmgr_list);
3709 pthread_mutex_unlock(&bufmgr_list_mutex);
3711 return bufmgr_gem != NULL ? &bufmgr_gem->bufmgr : NULL;