1 /**************************************************************************
3 * Copyright © 2007 Red Hat Inc.
4 * Copyright © 2007-2012 Intel Corporation
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
29 **************************************************************************/
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
42 #include <xf86atomic.h>
50 #include <sys/ioctl.h>
52 #include <sys/types.h>
57 #define ETIME ETIMEDOUT
59 #include "libdrm_macros.h"
60 #include "libdrm_lists.h"
61 #include "intel_bufmgr.h"
62 #include "intel_bufmgr_priv.h"
63 #include "intel_chipset.h"
76 #define memclear(s) memset(&s, 0, sizeof(s))
78 #define DBG(...) do { \
79 if (bufmgr_gem->bufmgr.debug) \
80 fprintf(stderr, __VA_ARGS__); \
83 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
84 #define MAX2(A, B) ((A) > (B) ? (A) : (B))
87 * upper_32_bits - return bits 32-63 of a number
88 * @n: the number we're accessing
90 * A basic shift-right of a 64- or 32-bit quantity. Use this to suppress
91 * the "right shift count >= width of type" warning when that quantity is
94 #define upper_32_bits(n) ((__u32)(((n) >> 16) >> 16))
97 * lower_32_bits - return bits 0-31 of a number
98 * @n: the number we're accessing
100 #define lower_32_bits(n) ((__u32)(n))
102 typedef struct _drm_intel_bo_gem drm_intel_bo_gem;
104 struct drm_intel_gem_bo_bucket {
109 typedef struct _drm_intel_bufmgr_gem {
110 drm_intel_bufmgr bufmgr;
118 pthread_mutex_t lock;
120 struct drm_i915_gem_exec_object *exec_objects;
121 struct drm_i915_gem_exec_object2 *exec2_objects;
122 drm_intel_bo **exec_bos;
126 /** Array of lists of cached gem objects of power-of-two sizes */
127 struct drm_intel_gem_bo_bucket cache_bucket[14 * 4];
131 drmMMListHead managers;
134 drmMMListHead vma_cache;
135 int vma_count, vma_open, vma_max;
138 int available_fences;
141 unsigned int has_bsd : 1;
142 unsigned int has_blt : 1;
143 unsigned int has_relaxed_fencing : 1;
144 unsigned int has_llc : 1;
145 unsigned int has_wait_timeout : 1;
146 unsigned int bo_reuse : 1;
147 unsigned int no_exec : 1;
148 unsigned int has_vebox : 1;
156 } drm_intel_bufmgr_gem;
158 #define DRM_INTEL_RELOC_FENCE (1<<0)
160 typedef struct _drm_intel_reloc_target_info {
163 } drm_intel_reloc_target;
165 struct _drm_intel_bo_gem {
173 * Kenel-assigned global name for this object
175 * List contains both flink named and prime fd'd objects
177 unsigned int global_name;
178 drmMMListHead name_list;
181 * Index of the buffer within the validation list while preparing a
182 * batchbuffer execution.
187 * Current tiling mode
189 uint32_t tiling_mode;
190 uint32_t swizzle_mode;
191 unsigned long stride;
195 /** Array passed to the DRM containing relocation information. */
196 struct drm_i915_gem_relocation_entry *relocs;
198 * Array of info structs corresponding to relocs[i].target_handle etc
200 drm_intel_reloc_target *reloc_target_info;
201 /** Number of entries in relocs */
203 /** Array of BOs that are referenced by this buffer and will be softpinned */
204 drm_intel_bo **softpin_target;
205 /** Number softpinned BOs that are referenced by this buffer */
206 int softpin_target_count;
207 /** Maximum amount of softpinned BOs that are referenced by this buffer */
208 int softpin_target_size;
210 /** Mapped address for the buffer, saved across map/unmap cycles */
212 /** GTT virtual address for the buffer, saved across map/unmap cycles */
214 /** WC CPU address for the buffer, saved across map/unmap cycles */
217 * Virtual address of the buffer allocated by user, used for userptr
222 drmMMListHead vma_list;
228 * Boolean of whether this BO and its children have been included in
229 * the current drm_intel_bufmgr_check_aperture_space() total.
231 bool included_in_check_aperture;
234 * Boolean of whether this buffer has been used as a relocation
235 * target and had its size accounted for, and thus can't have any
236 * further relocations added to it.
238 bool used_as_reloc_target;
241 * Boolean of whether we have encountered an error whilst building the relocation tree.
246 * Boolean of whether this buffer can be re-used
251 * Boolean of whether the GPU is definitely not accessing the buffer.
253 * This is only valid when reusable, since non-reusable
254 * buffers are those that have been shared wth other
255 * processes, so we don't know their state.
260 * Boolean of whether this buffer was allocated with userptr
265 * Boolean of whether this buffer can be placed in the full 48-bit
266 * address range on gen8+.
268 * By default, buffers will be keep in a 32-bit range, unless this
269 * flag is explicitly set.
271 bool use_48b_address_range;
274 * Whether this buffer is softpinned at offset specified by the user
279 * Size in bytes of this buffer and its relocation descendents.
281 * Used to avoid costly tree walking in
282 * drm_intel_bufmgr_check_aperture in the common case.
287 * Number of potential fence registers required by this buffer and its
290 int reloc_tree_fences;
292 /** Flags that we may need to do the SW_FINSIH ioctl on unmap. */
293 bool mapped_cpu_write;
297 drm_intel_gem_estimate_batch_space(drm_intel_bo ** bo_array, int count);
300 drm_intel_gem_compute_batch_space(drm_intel_bo ** bo_array, int count);
303 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
304 uint32_t * swizzle_mode);
307 drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
308 uint32_t tiling_mode,
311 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
314 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo);
316 static void drm_intel_gem_bo_free(drm_intel_bo *bo);
318 static inline drm_intel_bo_gem *to_bo_gem(drm_intel_bo *bo)
320 return (drm_intel_bo_gem *)bo;
324 drm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size,
325 uint32_t *tiling_mode)
327 unsigned long min_size, max_size;
330 if (*tiling_mode == I915_TILING_NONE)
333 /* 965+ just need multiples of page size for tiling */
334 if (bufmgr_gem->gen >= 4)
335 return ROUND_UP_TO(size, 4096);
337 /* Older chips need powers of two, of at least 512k or 1M */
338 if (bufmgr_gem->gen == 3) {
339 min_size = 1024*1024;
340 max_size = 128*1024*1024;
343 max_size = 64*1024*1024;
346 if (size > max_size) {
347 *tiling_mode = I915_TILING_NONE;
351 /* Do we need to allocate every page for the fence? */
352 if (bufmgr_gem->has_relaxed_fencing)
353 return ROUND_UP_TO(size, 4096);
355 for (i = min_size; i < size; i <<= 1)
362 * Round a given pitch up to the minimum required for X tiling on a
363 * given chip. We use 512 as the minimum to allow for a later tiling
367 drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem,
368 unsigned long pitch, uint32_t *tiling_mode)
370 unsigned long tile_width;
373 /* If untiled, then just align it so that we can do rendering
374 * to it with the 3D engine.
376 if (*tiling_mode == I915_TILING_NONE)
377 return ALIGN(pitch, 64);
379 if (*tiling_mode == I915_TILING_X
380 || (IS_915(bufmgr_gem->pci_device)
381 && *tiling_mode == I915_TILING_Y))
386 /* 965 is flexible */
387 if (bufmgr_gem->gen >= 4)
388 return ROUND_UP_TO(pitch, tile_width);
390 /* The older hardware has a maximum pitch of 8192 with tiled
391 * surfaces, so fallback to untiled if it's too large.
394 *tiling_mode = I915_TILING_NONE;
395 return ALIGN(pitch, 64);
398 /* Pre-965 needs power of two tile width */
399 for (i = tile_width; i < pitch; i <<= 1)
405 static struct drm_intel_gem_bo_bucket *
406 drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
411 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
412 struct drm_intel_gem_bo_bucket *bucket =
413 &bufmgr_gem->cache_bucket[i];
414 if (bucket->size >= size) {
423 drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
427 for (i = 0; i < bufmgr_gem->exec_count; i++) {
428 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
429 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
431 if (bo_gem->relocs == NULL && bo_gem->softpin_target == NULL) {
432 DBG("%2d: %d %s(%s)\n", i, bo_gem->gem_handle,
433 bo_gem->is_softpin ? "*" : "",
438 for (j = 0; j < bo_gem->reloc_count; j++) {
439 drm_intel_bo *target_bo = bo_gem->reloc_target_info[j].bo;
440 drm_intel_bo_gem *target_gem =
441 (drm_intel_bo_gem *) target_bo;
443 DBG("%2d: %d %s(%s)@0x%08x %08x -> "
444 "%d (%s)@0x%08x %08x + 0x%08x\n",
447 bo_gem->is_softpin ? "*" : "",
449 upper_32_bits(bo_gem->relocs[j].offset),
450 lower_32_bits(bo_gem->relocs[j].offset),
451 target_gem->gem_handle,
453 upper_32_bits(target_bo->offset64),
454 lower_32_bits(target_bo->offset64),
455 bo_gem->relocs[j].delta);
458 for (j = 0; j < bo_gem->softpin_target_count; j++) {
459 drm_intel_bo *target_bo = bo_gem->softpin_target[j];
460 drm_intel_bo_gem *target_gem =
461 (drm_intel_bo_gem *) target_bo;
462 DBG("%2d: %d %s(%s) -> "
463 "%d *(%s)@0x%08x %08x\n",
466 bo_gem->is_softpin ? "*" : "",
468 target_gem->gem_handle,
470 upper_32_bits(target_bo->offset64),
471 lower_32_bits(target_bo->offset64));
477 drm_intel_gem_bo_reference(drm_intel_bo *bo)
479 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
481 atomic_inc(&bo_gem->refcount);
485 * Adds the given buffer to the list of buffers to be validated (moved into the
486 * appropriate memory type) with the next batch submission.
488 * If a buffer is validated multiple times in a batch submission, it ends up
489 * with the intersection of the memory type flags and the union of the
493 drm_intel_add_validate_buffer(drm_intel_bo *bo)
495 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
496 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
499 if (bo_gem->validate_index != -1)
502 /* Extend the array of validation entries as necessary. */
503 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
504 int new_size = bufmgr_gem->exec_size * 2;
509 bufmgr_gem->exec_objects =
510 realloc(bufmgr_gem->exec_objects,
511 sizeof(*bufmgr_gem->exec_objects) * new_size);
512 bufmgr_gem->exec_bos =
513 realloc(bufmgr_gem->exec_bos,
514 sizeof(*bufmgr_gem->exec_bos) * new_size);
515 bufmgr_gem->exec_size = new_size;
518 index = bufmgr_gem->exec_count;
519 bo_gem->validate_index = index;
520 /* Fill in array entry */
521 bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
522 bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
523 bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs;
524 bufmgr_gem->exec_objects[index].alignment = bo->align;
525 bufmgr_gem->exec_objects[index].offset = 0;
526 bufmgr_gem->exec_bos[index] = bo;
527 bufmgr_gem->exec_count++;
531 drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence)
533 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
534 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
539 flags |= EXEC_OBJECT_NEEDS_FENCE;
540 if (bo_gem->use_48b_address_range)
541 flags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
542 if (bo_gem->is_softpin)
543 flags |= EXEC_OBJECT_PINNED;
545 if (bo_gem->validate_index != -1) {
546 bufmgr_gem->exec2_objects[bo_gem->validate_index].flags |= flags;
550 /* Extend the array of validation entries as necessary. */
551 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
552 int new_size = bufmgr_gem->exec_size * 2;
557 bufmgr_gem->exec2_objects =
558 realloc(bufmgr_gem->exec2_objects,
559 sizeof(*bufmgr_gem->exec2_objects) * new_size);
560 bufmgr_gem->exec_bos =
561 realloc(bufmgr_gem->exec_bos,
562 sizeof(*bufmgr_gem->exec_bos) * new_size);
563 bufmgr_gem->exec_size = new_size;
566 index = bufmgr_gem->exec_count;
567 bo_gem->validate_index = index;
568 /* Fill in array entry */
569 bufmgr_gem->exec2_objects[index].handle = bo_gem->gem_handle;
570 bufmgr_gem->exec2_objects[index].relocation_count = bo_gem->reloc_count;
571 bufmgr_gem->exec2_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
572 bufmgr_gem->exec2_objects[index].alignment = bo->align;
573 bufmgr_gem->exec2_objects[index].offset = bo_gem->is_softpin ?
575 bufmgr_gem->exec_bos[index] = bo;
576 bufmgr_gem->exec2_objects[index].flags = flags;
577 bufmgr_gem->exec2_objects[index].rsvd1 = 0;
578 bufmgr_gem->exec2_objects[index].rsvd2 = 0;
579 bufmgr_gem->exec_count++;
582 #define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
586 drm_intel_bo_gem_set_in_aperture_size(drm_intel_bufmgr_gem *bufmgr_gem,
587 drm_intel_bo_gem *bo_gem,
588 unsigned int alignment)
592 assert(!bo_gem->used_as_reloc_target);
594 /* The older chipsets are far-less flexible in terms of tiling,
595 * and require tiled buffer to be size aligned in the aperture.
596 * This means that in the worst possible case we will need a hole
597 * twice as large as the object in order for it to fit into the
598 * aperture. Optimal packing is for wimps.
600 size = bo_gem->bo.size;
601 if (bufmgr_gem->gen < 4 && bo_gem->tiling_mode != I915_TILING_NONE) {
602 unsigned int min_size;
604 if (bufmgr_gem->has_relaxed_fencing) {
605 if (bufmgr_gem->gen == 3)
606 min_size = 1024*1024;
610 while (min_size < size)
615 /* Account for worst-case alignment. */
616 alignment = MAX2(alignment, min_size);
619 bo_gem->reloc_tree_size = size + alignment;
623 drm_intel_setup_reloc_list(drm_intel_bo *bo)
625 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
626 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
627 unsigned int max_relocs = bufmgr_gem->max_relocs;
629 if (bo->size / 4 < max_relocs)
630 max_relocs = bo->size / 4;
632 bo_gem->relocs = malloc(max_relocs *
633 sizeof(struct drm_i915_gem_relocation_entry));
634 bo_gem->reloc_target_info = malloc(max_relocs *
635 sizeof(drm_intel_reloc_target));
636 if (bo_gem->relocs == NULL || bo_gem->reloc_target_info == NULL) {
637 bo_gem->has_error = true;
639 free (bo_gem->relocs);
640 bo_gem->relocs = NULL;
642 free (bo_gem->reloc_target_info);
643 bo_gem->reloc_target_info = NULL;
652 drm_intel_gem_bo_busy(drm_intel_bo *bo)
654 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
655 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
656 struct drm_i915_gem_busy busy;
659 if (bo_gem->reusable && bo_gem->idle)
663 busy.handle = bo_gem->gem_handle;
665 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
667 bo_gem->idle = !busy.busy;
672 return (ret == 0 && busy.busy);
676 drm_intel_gem_bo_madvise_internal(drm_intel_bufmgr_gem *bufmgr_gem,
677 drm_intel_bo_gem *bo_gem, int state)
679 struct drm_i915_gem_madvise madv;
682 madv.handle = bo_gem->gem_handle;
685 drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
687 return madv.retained;
691 drm_intel_gem_bo_madvise(drm_intel_bo *bo, int madv)
693 return drm_intel_gem_bo_madvise_internal
694 ((drm_intel_bufmgr_gem *) bo->bufmgr,
695 (drm_intel_bo_gem *) bo,
699 /* drop the oldest entries that have been purged by the kernel */
701 drm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem *bufmgr_gem,
702 struct drm_intel_gem_bo_bucket *bucket)
704 while (!DRMLISTEMPTY(&bucket->head)) {
705 drm_intel_bo_gem *bo_gem;
707 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
708 bucket->head.next, head);
709 if (drm_intel_gem_bo_madvise_internal
710 (bufmgr_gem, bo_gem, I915_MADV_DONTNEED))
713 DRMLISTDEL(&bo_gem->head);
714 drm_intel_gem_bo_free(&bo_gem->bo);
718 static drm_intel_bo *
719 drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
723 uint32_t tiling_mode,
724 unsigned long stride,
725 unsigned int alignment)
727 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
728 drm_intel_bo_gem *bo_gem;
729 unsigned int page_size = getpagesize();
731 struct drm_intel_gem_bo_bucket *bucket;
732 bool alloc_from_cache;
733 unsigned long bo_size;
734 bool for_render = false;
736 if (flags & BO_ALLOC_FOR_RENDER)
739 /* Round the allocated size up to a power of two number of pages. */
740 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size);
742 /* If we don't have caching at this size, don't actually round the
745 if (bucket == NULL) {
747 if (bo_size < page_size)
750 bo_size = bucket->size;
753 pthread_mutex_lock(&bufmgr_gem->lock);
754 /* Get a buffer out of the cache if available */
756 alloc_from_cache = false;
757 if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) {
759 /* Allocate new render-target BOs from the tail (MRU)
760 * of the list, as it will likely be hot in the GPU
761 * cache and in the aperture for us.
763 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
764 bucket->head.prev, head);
765 DRMLISTDEL(&bo_gem->head);
766 alloc_from_cache = true;
767 bo_gem->bo.align = alignment;
769 assert(alignment == 0);
770 /* For non-render-target BOs (where we're probably
771 * going to map it first thing in order to fill it
772 * with data), check if the last BO in the cache is
773 * unbusy, and only reuse in that case. Otherwise,
774 * allocating a new buffer is probably faster than
775 * waiting for the GPU to finish.
777 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
778 bucket->head.next, head);
779 if (!drm_intel_gem_bo_busy(&bo_gem->bo)) {
780 alloc_from_cache = true;
781 DRMLISTDEL(&bo_gem->head);
785 if (alloc_from_cache) {
786 if (!drm_intel_gem_bo_madvise_internal
787 (bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) {
788 drm_intel_gem_bo_free(&bo_gem->bo);
789 drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem,
794 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
797 drm_intel_gem_bo_free(&bo_gem->bo);
802 pthread_mutex_unlock(&bufmgr_gem->lock);
804 if (!alloc_from_cache) {
805 struct drm_i915_gem_create create;
807 bo_gem = calloc(1, sizeof(*bo_gem));
811 bo_gem->bo.size = bo_size;
814 create.size = bo_size;
816 ret = drmIoctl(bufmgr_gem->fd,
817 DRM_IOCTL_I915_GEM_CREATE,
819 bo_gem->gem_handle = create.handle;
820 bo_gem->bo.handle = bo_gem->gem_handle;
825 bo_gem->bo.bufmgr = bufmgr;
826 bo_gem->bo.align = alignment;
828 bo_gem->tiling_mode = I915_TILING_NONE;
829 bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
832 /* drm_intel_gem_bo_free calls DRMLISTDEL() for an uninitialized
833 list (vma_list), so better set the list head here */
834 DRMINITLISTHEAD(&bo_gem->name_list);
835 DRMINITLISTHEAD(&bo_gem->vma_list);
836 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
839 drm_intel_gem_bo_free(&bo_gem->bo);
845 atomic_set(&bo_gem->refcount, 1);
846 bo_gem->validate_index = -1;
847 bo_gem->reloc_tree_fences = 0;
848 bo_gem->used_as_reloc_target = false;
849 bo_gem->has_error = false;
850 bo_gem->reusable = true;
851 bo_gem->use_48b_address_range = false;
853 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, alignment);
855 DBG("bo_create: buf %d (%s) %ldb\n",
856 bo_gem->gem_handle, bo_gem->name, size);
861 static drm_intel_bo *
862 drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
865 unsigned int alignment)
867 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size,
873 static drm_intel_bo *
874 drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr,
877 unsigned int alignment)
879 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0,
880 I915_TILING_NONE, 0, 0);
883 static drm_intel_bo *
884 drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
885 int x, int y, int cpp, uint32_t *tiling_mode,
886 unsigned long *pitch, unsigned long flags)
888 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
889 unsigned long size, stride;
893 unsigned long aligned_y, height_alignment;
895 tiling = *tiling_mode;
897 /* If we're tiled, our allocations are in 8 or 32-row blocks,
898 * so failure to align our height means that we won't allocate
901 * If we're untiled, we still have to align to 2 rows high
902 * because the data port accesses 2x2 blocks even if the
903 * bottom row isn't to be rendered, so failure to align means
904 * we could walk off the end of the GTT and fault. This is
905 * documented on 965, and may be the case on older chipsets
906 * too so we try to be careful.
909 height_alignment = 2;
911 if ((bufmgr_gem->gen == 2) && tiling != I915_TILING_NONE)
912 height_alignment = 16;
913 else if (tiling == I915_TILING_X
914 || (IS_915(bufmgr_gem->pci_device)
915 && tiling == I915_TILING_Y))
916 height_alignment = 8;
917 else if (tiling == I915_TILING_Y)
918 height_alignment = 32;
919 aligned_y = ALIGN(y, height_alignment);
922 stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, tiling_mode);
923 size = stride * aligned_y;
924 size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, tiling_mode);
925 } while (*tiling_mode != tiling);
928 if (tiling == I915_TILING_NONE)
931 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags,
935 static drm_intel_bo *
936 drm_intel_gem_bo_alloc_userptr(drm_intel_bufmgr *bufmgr,
939 uint32_t tiling_mode,
944 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
945 drm_intel_bo_gem *bo_gem;
947 struct drm_i915_gem_userptr userptr;
949 /* Tiling with userptr surfaces is not supported
950 * on all hardware so refuse it for time being.
952 if (tiling_mode != I915_TILING_NONE)
955 bo_gem = calloc(1, sizeof(*bo_gem));
959 bo_gem->bo.size = size;
962 userptr.user_ptr = (__u64)((unsigned long)addr);
963 userptr.user_size = size;
964 userptr.flags = flags;
966 ret = drmIoctl(bufmgr_gem->fd,
967 DRM_IOCTL_I915_GEM_USERPTR,
970 DBG("bo_create_userptr: "
971 "ioctl failed with user ptr %p size 0x%lx, "
972 "user flags 0x%lx\n", addr, size, flags);
977 bo_gem->gem_handle = userptr.handle;
978 bo_gem->bo.handle = bo_gem->gem_handle;
979 bo_gem->bo.bufmgr = bufmgr;
980 bo_gem->is_userptr = true;
981 bo_gem->bo.virtual = addr;
982 /* Save the address provided by user */
983 bo_gem->user_virtual = addr;
984 bo_gem->tiling_mode = I915_TILING_NONE;
985 bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
988 DRMINITLISTHEAD(&bo_gem->name_list);
989 DRMINITLISTHEAD(&bo_gem->vma_list);
992 atomic_set(&bo_gem->refcount, 1);
993 bo_gem->validate_index = -1;
994 bo_gem->reloc_tree_fences = 0;
995 bo_gem->used_as_reloc_target = false;
996 bo_gem->has_error = false;
997 bo_gem->reusable = false;
998 bo_gem->use_48b_address_range = false;
1000 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
1002 DBG("bo_create_userptr: "
1003 "ptr %p buf %d (%s) size %ldb, stride 0x%x, tile mode %d\n",
1004 addr, bo_gem->gem_handle, bo_gem->name,
1005 size, stride, tiling_mode);
1011 has_userptr(drm_intel_bufmgr_gem *bufmgr_gem)
1016 struct drm_i915_gem_userptr userptr;
1018 pgsz = sysconf(_SC_PAGESIZE);
1021 ret = posix_memalign(&ptr, pgsz, pgsz);
1023 DBG("Failed to get a page (%ld) for userptr detection!\n",
1029 userptr.user_ptr = (__u64)(unsigned long)ptr;
1030 userptr.user_size = pgsz;
1033 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_USERPTR, &userptr);
1035 if (errno == ENODEV && userptr.flags == 0) {
1036 userptr.flags = I915_USERPTR_UNSYNCHRONIZED;
1043 /* We don't release the userptr bo here as we want to keep the
1044 * kernel mm tracking alive for our lifetime. The first time we
1045 * create a userptr object the kernel has to install a mmu_notifer
1046 * which is a heavyweight operation (e.g. it requires taking all
1047 * mm_locks and stop_machine()).
1050 bufmgr_gem->userptr_active.ptr = ptr;
1051 bufmgr_gem->userptr_active.handle = userptr.handle;
1056 static drm_intel_bo *
1057 check_bo_alloc_userptr(drm_intel_bufmgr *bufmgr,
1060 uint32_t tiling_mode,
1063 unsigned long flags)
1065 if (has_userptr((drm_intel_bufmgr_gem *)bufmgr))
1066 bufmgr->bo_alloc_userptr = drm_intel_gem_bo_alloc_userptr;
1068 bufmgr->bo_alloc_userptr = NULL;
1070 return drm_intel_bo_alloc_userptr(bufmgr, name, addr,
1071 tiling_mode, stride, size, flags);
1075 * Returns a drm_intel_bo wrapping the given buffer object handle.
1077 * This can be used when one application needs to pass a buffer object
1081 drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
1083 unsigned int handle)
1085 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1086 drm_intel_bo_gem *bo_gem;
1088 struct drm_gem_open open_arg;
1089 struct drm_i915_gem_get_tiling get_tiling;
1090 drmMMListHead *list;
1092 /* At the moment most applications only have a few named bo.
1093 * For instance, in a DRI client only the render buffers passed
1094 * between X and the client are named. And since X returns the
1095 * alternating names for the front/back buffer a linear search
1096 * provides a sufficiently fast match.
1098 pthread_mutex_lock(&bufmgr_gem->lock);
1099 for (list = bufmgr_gem->named.next;
1100 list != &bufmgr_gem->named;
1101 list = list->next) {
1102 bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
1103 if (bo_gem->global_name == handle) {
1104 drm_intel_gem_bo_reference(&bo_gem->bo);
1105 pthread_mutex_unlock(&bufmgr_gem->lock);
1111 open_arg.name = handle;
1112 ret = drmIoctl(bufmgr_gem->fd,
1116 DBG("Couldn't reference %s handle 0x%08x: %s\n",
1117 name, handle, strerror(errno));
1118 pthread_mutex_unlock(&bufmgr_gem->lock);
1121 /* Now see if someone has used a prime handle to get this
1122 * object from the kernel before by looking through the list
1123 * again for a matching gem_handle
1125 for (list = bufmgr_gem->named.next;
1126 list != &bufmgr_gem->named;
1127 list = list->next) {
1128 bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
1129 if (bo_gem->gem_handle == open_arg.handle) {
1130 drm_intel_gem_bo_reference(&bo_gem->bo);
1131 pthread_mutex_unlock(&bufmgr_gem->lock);
1136 bo_gem = calloc(1, sizeof(*bo_gem));
1138 pthread_mutex_unlock(&bufmgr_gem->lock);
1142 bo_gem->bo.size = open_arg.size;
1143 bo_gem->bo.offset = 0;
1144 bo_gem->bo.offset64 = 0;
1145 bo_gem->bo.virtual = NULL;
1146 bo_gem->bo.bufmgr = bufmgr;
1147 bo_gem->name = name;
1148 atomic_set(&bo_gem->refcount, 1);
1149 bo_gem->validate_index = -1;
1150 bo_gem->gem_handle = open_arg.handle;
1151 bo_gem->bo.handle = open_arg.handle;
1152 bo_gem->global_name = handle;
1153 bo_gem->reusable = false;
1154 bo_gem->use_48b_address_range = false;
1156 memclear(get_tiling);
1157 get_tiling.handle = bo_gem->gem_handle;
1158 ret = drmIoctl(bufmgr_gem->fd,
1159 DRM_IOCTL_I915_GEM_GET_TILING,
1162 drm_intel_gem_bo_unreference(&bo_gem->bo);
1163 pthread_mutex_unlock(&bufmgr_gem->lock);
1166 bo_gem->tiling_mode = get_tiling.tiling_mode;
1167 bo_gem->swizzle_mode = get_tiling.swizzle_mode;
1168 /* XXX stride is unknown */
1169 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
1171 DRMINITLISTHEAD(&bo_gem->vma_list);
1172 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
1173 pthread_mutex_unlock(&bufmgr_gem->lock);
1174 DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
1180 drm_intel_gem_bo_free(drm_intel_bo *bo)
1182 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1183 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1184 struct drm_gem_close close;
1187 DRMLISTDEL(&bo_gem->vma_list);
1188 if (bo_gem->mem_virtual) {
1189 VG(VALGRIND_FREELIKE_BLOCK(bo_gem->mem_virtual, 0));
1190 drm_munmap(bo_gem->mem_virtual, bo_gem->bo.size);
1191 bufmgr_gem->vma_count--;
1193 if (bo_gem->wc_virtual) {
1194 VG(VALGRIND_FREELIKE_BLOCK(bo_gem->wc_virtual, 0));
1195 drm_munmap(bo_gem->wc_virtual, bo_gem->bo.size);
1196 bufmgr_gem->vma_count--;
1198 if (bo_gem->gtt_virtual) {
1199 drm_munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
1200 bufmgr_gem->vma_count--;
1203 /* Close this object */
1205 close.handle = bo_gem->gem_handle;
1206 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
1208 DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
1209 bo_gem->gem_handle, bo_gem->name, strerror(errno));
1215 drm_intel_gem_bo_mark_mmaps_incoherent(drm_intel_bo *bo)
1218 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1220 if (bo_gem->mem_virtual)
1221 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->mem_virtual, bo->size);
1223 if (bo_gem->wc_virtual)
1224 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->wc_virtual, bo->size);
1226 if (bo_gem->gtt_virtual)
1227 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->gtt_virtual, bo->size);
1231 /** Frees all cached buffers significantly older than @time. */
1233 drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
1237 if (bufmgr_gem->time == time)
1240 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
1241 struct drm_intel_gem_bo_bucket *bucket =
1242 &bufmgr_gem->cache_bucket[i];
1244 while (!DRMLISTEMPTY(&bucket->head)) {
1245 drm_intel_bo_gem *bo_gem;
1247 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1248 bucket->head.next, head);
1249 if (time - bo_gem->free_time <= 1)
1252 DRMLISTDEL(&bo_gem->head);
1254 drm_intel_gem_bo_free(&bo_gem->bo);
1258 bufmgr_gem->time = time;
1261 static void drm_intel_gem_bo_purge_vma_cache(drm_intel_bufmgr_gem *bufmgr_gem)
1265 DBG("%s: cached=%d, open=%d, limit=%d\n", __FUNCTION__,
1266 bufmgr_gem->vma_count, bufmgr_gem->vma_open, bufmgr_gem->vma_max);
1268 if (bufmgr_gem->vma_max < 0)
1271 /* We may need to evict a few entries in order to create new mmaps */
1272 limit = bufmgr_gem->vma_max - 2*bufmgr_gem->vma_open;
1276 while (bufmgr_gem->vma_count > limit) {
1277 drm_intel_bo_gem *bo_gem;
1279 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1280 bufmgr_gem->vma_cache.next,
1282 assert(bo_gem->map_count == 0);
1283 DRMLISTDELINIT(&bo_gem->vma_list);
1285 if (bo_gem->mem_virtual) {
1286 drm_munmap(bo_gem->mem_virtual, bo_gem->bo.size);
1287 bo_gem->mem_virtual = NULL;
1288 bufmgr_gem->vma_count--;
1290 if (bo_gem->wc_virtual) {
1291 drm_munmap(bo_gem->wc_virtual, bo_gem->bo.size);
1292 bo_gem->wc_virtual = NULL;
1293 bufmgr_gem->vma_count--;
1295 if (bo_gem->gtt_virtual) {
1296 drm_munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
1297 bo_gem->gtt_virtual = NULL;
1298 bufmgr_gem->vma_count--;
1303 static void drm_intel_gem_bo_close_vma(drm_intel_bufmgr_gem *bufmgr_gem,
1304 drm_intel_bo_gem *bo_gem)
1306 bufmgr_gem->vma_open--;
1307 DRMLISTADDTAIL(&bo_gem->vma_list, &bufmgr_gem->vma_cache);
1308 if (bo_gem->mem_virtual)
1309 bufmgr_gem->vma_count++;
1310 if (bo_gem->wc_virtual)
1311 bufmgr_gem->vma_count++;
1312 if (bo_gem->gtt_virtual)
1313 bufmgr_gem->vma_count++;
1314 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
1317 static void drm_intel_gem_bo_open_vma(drm_intel_bufmgr_gem *bufmgr_gem,
1318 drm_intel_bo_gem *bo_gem)
1320 bufmgr_gem->vma_open++;
1321 DRMLISTDEL(&bo_gem->vma_list);
1322 if (bo_gem->mem_virtual)
1323 bufmgr_gem->vma_count--;
1324 if (bo_gem->wc_virtual)
1325 bufmgr_gem->vma_count--;
1326 if (bo_gem->gtt_virtual)
1327 bufmgr_gem->vma_count--;
1328 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
1332 drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
1334 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1335 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1336 struct drm_intel_gem_bo_bucket *bucket;
1339 /* Unreference all the target buffers */
1340 for (i = 0; i < bo_gem->reloc_count; i++) {
1341 if (bo_gem->reloc_target_info[i].bo != bo) {
1342 drm_intel_gem_bo_unreference_locked_timed(bo_gem->
1343 reloc_target_info[i].bo,
1347 for (i = 0; i < bo_gem->softpin_target_count; i++)
1348 drm_intel_gem_bo_unreference_locked_timed(bo_gem->softpin_target[i],
1350 bo_gem->reloc_count = 0;
1351 bo_gem->used_as_reloc_target = false;
1352 bo_gem->softpin_target_count = 0;
1354 DBG("bo_unreference final: %d (%s)\n",
1355 bo_gem->gem_handle, bo_gem->name);
1357 /* release memory associated with this object */
1358 if (bo_gem->reloc_target_info) {
1359 free(bo_gem->reloc_target_info);
1360 bo_gem->reloc_target_info = NULL;
1362 if (bo_gem->relocs) {
1363 free(bo_gem->relocs);
1364 bo_gem->relocs = NULL;
1366 if (bo_gem->softpin_target) {
1367 free(bo_gem->softpin_target);
1368 bo_gem->softpin_target = NULL;
1369 bo_gem->softpin_target_size = 0;
1372 /* Clear any left-over mappings */
1373 if (bo_gem->map_count) {
1374 DBG("bo freed with non-zero map-count %d\n", bo_gem->map_count);
1375 bo_gem->map_count = 0;
1376 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1377 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1380 DRMLISTDEL(&bo_gem->name_list);
1382 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
1383 /* Put the buffer into our internal cache for reuse if we can. */
1384 if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
1385 drm_intel_gem_bo_madvise_internal(bufmgr_gem, bo_gem,
1386 I915_MADV_DONTNEED)) {
1387 bo_gem->free_time = time;
1389 bo_gem->name = NULL;
1390 bo_gem->validate_index = -1;
1392 DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
1394 drm_intel_gem_bo_free(bo);
1398 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
1401 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1403 assert(atomic_read(&bo_gem->refcount) > 0);
1404 if (atomic_dec_and_test(&bo_gem->refcount))
1405 drm_intel_gem_bo_unreference_final(bo, time);
1408 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
1410 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1412 assert(atomic_read(&bo_gem->refcount) > 0);
1414 if (atomic_add_unless(&bo_gem->refcount, -1, 1)) {
1415 drm_intel_bufmgr_gem *bufmgr_gem =
1416 (drm_intel_bufmgr_gem *) bo->bufmgr;
1417 struct timespec time;
1419 clock_gettime(CLOCK_MONOTONIC, &time);
1421 pthread_mutex_lock(&bufmgr_gem->lock);
1423 if (atomic_dec_and_test(&bo_gem->refcount)) {
1424 drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
1425 drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
1428 pthread_mutex_unlock(&bufmgr_gem->lock);
1432 static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
1434 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1435 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1436 struct drm_i915_gem_set_domain set_domain;
1439 if (bo_gem->is_userptr) {
1440 /* Return the same user ptr */
1441 bo->virtual = bo_gem->user_virtual;
1445 pthread_mutex_lock(&bufmgr_gem->lock);
1447 if (bo_gem->map_count++ == 0)
1448 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
1450 if (!bo_gem->mem_virtual) {
1451 struct drm_i915_gem_mmap mmap_arg;
1453 DBG("bo_map: %d (%s), map_count=%d\n",
1454 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
1457 mmap_arg.handle = bo_gem->gem_handle;
1458 mmap_arg.size = bo->size;
1459 ret = drmIoctl(bufmgr_gem->fd,
1460 DRM_IOCTL_I915_GEM_MMAP,
1464 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1465 __FILE__, __LINE__, bo_gem->gem_handle,
1466 bo_gem->name, strerror(errno));
1467 if (--bo_gem->map_count == 0)
1468 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1469 pthread_mutex_unlock(&bufmgr_gem->lock);
1472 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
1473 bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
1475 DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1476 bo_gem->mem_virtual);
1477 bo->virtual = bo_gem->mem_virtual;
1479 memclear(set_domain);
1480 set_domain.handle = bo_gem->gem_handle;
1481 set_domain.read_domains = I915_GEM_DOMAIN_CPU;
1483 set_domain.write_domain = I915_GEM_DOMAIN_CPU;
1485 set_domain.write_domain = 0;
1486 ret = drmIoctl(bufmgr_gem->fd,
1487 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1490 DBG("%s:%d: Error setting to CPU domain %d: %s\n",
1491 __FILE__, __LINE__, bo_gem->gem_handle,
1496 bo_gem->mapped_cpu_write = true;
1498 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1499 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->mem_virtual, bo->size));
1500 pthread_mutex_unlock(&bufmgr_gem->lock);
1506 map_gtt(drm_intel_bo *bo)
1508 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1509 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1512 if (bo_gem->is_userptr)
1515 if (bo_gem->map_count++ == 0)
1516 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
1518 /* Get a mapping of the buffer if we haven't before. */
1519 if (bo_gem->gtt_virtual == NULL) {
1520 struct drm_i915_gem_mmap_gtt mmap_arg;
1522 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
1523 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
1526 mmap_arg.handle = bo_gem->gem_handle;
1528 /* Get the fake offset back... */
1529 ret = drmIoctl(bufmgr_gem->fd,
1530 DRM_IOCTL_I915_GEM_MMAP_GTT,
1534 DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
1536 bo_gem->gem_handle, bo_gem->name,
1538 if (--bo_gem->map_count == 0)
1539 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1544 bo_gem->gtt_virtual = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
1545 MAP_SHARED, bufmgr_gem->fd,
1547 if (bo_gem->gtt_virtual == MAP_FAILED) {
1548 bo_gem->gtt_virtual = NULL;
1550 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1552 bo_gem->gem_handle, bo_gem->name,
1554 if (--bo_gem->map_count == 0)
1555 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1560 bo->virtual = bo_gem->gtt_virtual;
1562 DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1563 bo_gem->gtt_virtual);
1569 drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
1571 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1572 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1573 struct drm_i915_gem_set_domain set_domain;
1576 pthread_mutex_lock(&bufmgr_gem->lock);
1580 pthread_mutex_unlock(&bufmgr_gem->lock);
1584 /* Now move it to the GTT domain so that the GPU and CPU
1585 * caches are flushed and the GPU isn't actively using the
1588 * The pagefault handler does this domain change for us when
1589 * it has unbound the BO from the GTT, but it's up to us to
1590 * tell it when we're about to use things if we had done
1591 * rendering and it still happens to be bound to the GTT.
1593 memclear(set_domain);
1594 set_domain.handle = bo_gem->gem_handle;
1595 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1596 set_domain.write_domain = I915_GEM_DOMAIN_GTT;
1597 ret = drmIoctl(bufmgr_gem->fd,
1598 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1601 DBG("%s:%d: Error setting domain %d: %s\n",
1602 __FILE__, __LINE__, bo_gem->gem_handle,
1606 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1607 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->gtt_virtual, bo->size));
1608 pthread_mutex_unlock(&bufmgr_gem->lock);
1614 * Performs a mapping of the buffer object like the normal GTT
1615 * mapping, but avoids waiting for the GPU to be done reading from or
1616 * rendering to the buffer.
1618 * This is used in the implementation of GL_ARB_map_buffer_range: The
1619 * user asks to create a buffer, then does a mapping, fills some
1620 * space, runs a drawing command, then asks to map it again without
1621 * synchronizing because it guarantees that it won't write over the
1622 * data that the GPU is busy using (or, more specifically, that if it
1623 * does write over the data, it acknowledges that rendering is
1628 drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo)
1630 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1631 #ifdef HAVE_VALGRIND
1632 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1636 /* If the CPU cache isn't coherent with the GTT, then use a
1637 * regular synchronized mapping. The problem is that we don't
1638 * track where the buffer was last used on the CPU side in
1639 * terms of drm_intel_bo_map vs drm_intel_gem_bo_map_gtt, so
1640 * we would potentially corrupt the buffer even when the user
1641 * does reasonable things.
1643 if (!bufmgr_gem->has_llc)
1644 return drm_intel_gem_bo_map_gtt(bo);
1646 pthread_mutex_lock(&bufmgr_gem->lock);
1650 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1651 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->gtt_virtual, bo->size));
1654 pthread_mutex_unlock(&bufmgr_gem->lock);
1659 static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
1661 drm_intel_bufmgr_gem *bufmgr_gem;
1662 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1668 if (bo_gem->is_userptr)
1671 bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1673 pthread_mutex_lock(&bufmgr_gem->lock);
1675 if (bo_gem->map_count <= 0) {
1676 DBG("attempted to unmap an unmapped bo\n");
1677 pthread_mutex_unlock(&bufmgr_gem->lock);
1678 /* Preserve the old behaviour of just treating this as a
1679 * no-op rather than reporting the error.
1684 if (bo_gem->mapped_cpu_write) {
1685 struct drm_i915_gem_sw_finish sw_finish;
1687 /* Cause a flush to happen if the buffer's pinned for
1688 * scanout, so the results show up in a timely manner.
1689 * Unlike GTT set domains, this only does work if the
1690 * buffer should be scanout-related.
1692 memclear(sw_finish);
1693 sw_finish.handle = bo_gem->gem_handle;
1694 ret = drmIoctl(bufmgr_gem->fd,
1695 DRM_IOCTL_I915_GEM_SW_FINISH,
1697 ret = ret == -1 ? -errno : 0;
1699 bo_gem->mapped_cpu_write = false;
1702 /* We need to unmap after every innovation as we cannot track
1703 * an open vma for every bo as that will exhaasut the system
1704 * limits and cause later failures.
1706 if (--bo_gem->map_count == 0) {
1707 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1708 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1711 pthread_mutex_unlock(&bufmgr_gem->lock);
1717 drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
1719 return drm_intel_gem_bo_unmap(bo);
1723 drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
1724 unsigned long size, const void *data)
1726 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1727 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1728 struct drm_i915_gem_pwrite pwrite;
1731 if (bo_gem->is_userptr)
1735 pwrite.handle = bo_gem->gem_handle;
1736 pwrite.offset = offset;
1738 pwrite.data_ptr = (uint64_t) (uintptr_t) data;
1739 ret = drmIoctl(bufmgr_gem->fd,
1740 DRM_IOCTL_I915_GEM_PWRITE,
1744 DBG("%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
1745 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1746 (int)size, strerror(errno));
1753 drm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
1755 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1756 struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id;
1759 memclear(get_pipe_from_crtc_id);
1760 get_pipe_from_crtc_id.crtc_id = crtc_id;
1761 ret = drmIoctl(bufmgr_gem->fd,
1762 DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID,
1763 &get_pipe_from_crtc_id);
1765 /* We return -1 here to signal that we don't
1766 * know which pipe is associated with this crtc.
1767 * This lets the caller know that this information
1768 * isn't available; using the wrong pipe for
1769 * vblank waiting can cause the chipset to lock up
1774 return get_pipe_from_crtc_id.pipe;
1778 drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
1779 unsigned long size, void *data)
1781 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1782 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1783 struct drm_i915_gem_pread pread;
1786 if (bo_gem->is_userptr)
1790 pread.handle = bo_gem->gem_handle;
1791 pread.offset = offset;
1793 pread.data_ptr = (uint64_t) (uintptr_t) data;
1794 ret = drmIoctl(bufmgr_gem->fd,
1795 DRM_IOCTL_I915_GEM_PREAD,
1799 DBG("%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
1800 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1801 (int)size, strerror(errno));
1807 /** Waits for all GPU rendering with the object to have completed. */
1809 drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo)
1811 drm_intel_gem_bo_start_gtt_access(bo, 1);
1815 * Waits on a BO for the given amount of time.
1817 * @bo: buffer object to wait for
1818 * @timeout_ns: amount of time to wait in nanoseconds.
1819 * If value is less than 0, an infinite wait will occur.
1821 * Returns 0 if the wait was successful ie. the last batch referencing the
1822 * object has completed within the allotted time. Otherwise some negative return
1823 * value describes the error. Of particular interest is -ETIME when the wait has
1824 * failed to yield the desired result.
1826 * Similar to drm_intel_gem_bo_wait_rendering except a timeout parameter allows
1827 * the operation to give up after a certain amount of time. Another subtle
1828 * difference is the internal locking semantics are different (this variant does
1829 * not hold the lock for the duration of the wait). This makes the wait subject
1830 * to a larger userspace race window.
1832 * The implementation shall wait until the object is no longer actively
1833 * referenced within a batch buffer at the time of the call. The wait will
1834 * not guarantee that the buffer is re-issued via another thread, or an flinked
1835 * handle. Userspace must make sure this race does not occur if such precision
1838 * Note that some kernels have broken the inifite wait for negative values
1839 * promise, upgrade to latest stable kernels if this is the case.
1842 drm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns)
1844 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1845 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1846 struct drm_i915_gem_wait wait;
1849 if (!bufmgr_gem->has_wait_timeout) {
1850 DBG("%s:%d: Timed wait is not supported. Falling back to "
1851 "infinite wait\n", __FILE__, __LINE__);
1853 drm_intel_gem_bo_wait_rendering(bo);
1856 return drm_intel_gem_bo_busy(bo) ? -ETIME : 0;
1861 wait.bo_handle = bo_gem->gem_handle;
1862 wait.timeout_ns = timeout_ns;
1863 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
1871 * Sets the object to the GTT read and possibly write domain, used by the X
1872 * 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt().
1874 * In combination with drm_intel_gem_bo_pin() and manual fence management, we
1875 * can do tiled pixmaps this way.
1878 drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
1880 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1881 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1882 struct drm_i915_gem_set_domain set_domain;
1885 memclear(set_domain);
1886 set_domain.handle = bo_gem->gem_handle;
1887 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1888 set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0;
1889 ret = drmIoctl(bufmgr_gem->fd,
1890 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1893 DBG("%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
1894 __FILE__, __LINE__, bo_gem->gem_handle,
1895 set_domain.read_domains, set_domain.write_domain,
1901 drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
1903 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1904 struct drm_gem_close close_bo;
1907 free(bufmgr_gem->exec2_objects);
1908 free(bufmgr_gem->exec_objects);
1909 free(bufmgr_gem->exec_bos);
1911 pthread_mutex_destroy(&bufmgr_gem->lock);
1913 /* Free any cached buffer objects we were going to reuse */
1914 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
1915 struct drm_intel_gem_bo_bucket *bucket =
1916 &bufmgr_gem->cache_bucket[i];
1917 drm_intel_bo_gem *bo_gem;
1919 while (!DRMLISTEMPTY(&bucket->head)) {
1920 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1921 bucket->head.next, head);
1922 DRMLISTDEL(&bo_gem->head);
1924 drm_intel_gem_bo_free(&bo_gem->bo);
1928 /* Release userptr bo kept hanging around for optimisation. */
1929 if (bufmgr_gem->userptr_active.ptr) {
1931 close_bo.handle = bufmgr_gem->userptr_active.handle;
1932 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close_bo);
1933 free(bufmgr_gem->userptr_active.ptr);
1936 "Failed to release test userptr object! (%d) "
1937 "i915 kernel driver may not be sane!\n", errno);
1944 * Adds the target buffer to the validation list and adds the relocation
1945 * to the reloc_buffer's relocation list.
1947 * The relocation entry at the given offset must already contain the
1948 * precomputed relocation value, because the kernel will optimize out
1949 * the relocation entry write when the buffer hasn't moved from the
1950 * last known offset in target_bo.
1953 do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1954 drm_intel_bo *target_bo, uint32_t target_offset,
1955 uint32_t read_domains, uint32_t write_domain,
1958 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1959 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1960 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
1961 bool fenced_command;
1963 if (bo_gem->has_error)
1966 if (target_bo_gem->has_error) {
1967 bo_gem->has_error = true;
1971 /* We never use HW fences for rendering on 965+ */
1972 if (bufmgr_gem->gen >= 4)
1975 fenced_command = need_fence;
1976 if (target_bo_gem->tiling_mode == I915_TILING_NONE)
1979 /* Create a new relocation list if needed */
1980 if (bo_gem->relocs == NULL && drm_intel_setup_reloc_list(bo))
1983 /* Check overflow */
1984 assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
1987 assert(offset <= bo->size - 4);
1988 assert((write_domain & (write_domain - 1)) == 0);
1990 /* An object needing a fence is a tiled buffer, so it won't have
1991 * relocs to other buffers.
1994 assert(target_bo_gem->reloc_count == 0);
1995 target_bo_gem->reloc_tree_fences = 1;
1998 /* Make sure that we're not adding a reloc to something whose size has
1999 * already been accounted for.
2001 assert(!bo_gem->used_as_reloc_target);
2002 if (target_bo_gem != bo_gem) {
2003 target_bo_gem->used_as_reloc_target = true;
2004 bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
2005 bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
2008 bo_gem->reloc_target_info[bo_gem->reloc_count].bo = target_bo;
2009 if (target_bo != bo)
2010 drm_intel_gem_bo_reference(target_bo);
2012 bo_gem->reloc_target_info[bo_gem->reloc_count].flags =
2013 DRM_INTEL_RELOC_FENCE;
2015 bo_gem->reloc_target_info[bo_gem->reloc_count].flags = 0;
2017 bo_gem->relocs[bo_gem->reloc_count].offset = offset;
2018 bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
2019 bo_gem->relocs[bo_gem->reloc_count].target_handle =
2020 target_bo_gem->gem_handle;
2021 bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
2022 bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
2023 bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset64;
2024 bo_gem->reloc_count++;
2030 drm_intel_gem_bo_use_48b_address_range(drm_intel_bo *bo, uint32_t enable)
2032 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2033 bo_gem->use_48b_address_range = enable;
2037 drm_intel_gem_bo_add_softpin_target(drm_intel_bo *bo, drm_intel_bo *target_bo)
2039 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2040 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2041 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
2042 if (bo_gem->has_error)
2045 if (target_bo_gem->has_error) {
2046 bo_gem->has_error = true;
2050 if (!target_bo_gem->is_softpin)
2052 if (target_bo_gem == bo_gem)
2055 if (bo_gem->softpin_target_count == bo_gem->softpin_target_size) {
2056 int new_size = bo_gem->softpin_target_size * 2;
2058 new_size = bufmgr_gem->max_relocs;
2060 bo_gem->softpin_target = realloc(bo_gem->softpin_target, new_size *
2061 sizeof(drm_intel_bo *));
2062 if (!bo_gem->softpin_target)
2065 bo_gem->softpin_target_size = new_size;
2067 bo_gem->softpin_target[bo_gem->softpin_target_count] = target_bo;
2068 drm_intel_gem_bo_reference(target_bo);
2069 bo_gem->softpin_target_count++;
2075 drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
2076 drm_intel_bo *target_bo, uint32_t target_offset,
2077 uint32_t read_domains, uint32_t write_domain)
2079 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
2080 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *)target_bo;
2082 if (target_bo_gem->is_softpin)
2083 return drm_intel_gem_bo_add_softpin_target(bo, target_bo);
2085 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
2086 read_domains, write_domain,
2087 !bufmgr_gem->fenced_relocs);
2091 drm_intel_gem_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
2092 drm_intel_bo *target_bo,
2093 uint32_t target_offset,
2094 uint32_t read_domains, uint32_t write_domain)
2096 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
2097 read_domains, write_domain, true);
2101 drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo)
2103 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2105 return bo_gem->reloc_count;
2109 * Removes existing relocation entries in the BO after "start".
2111 * This allows a user to avoid a two-step process for state setup with
2112 * counting up all the buffer objects and doing a
2113 * drm_intel_bufmgr_check_aperture_space() before emitting any of the
2114 * relocations for the state setup. Instead, save the state of the
2115 * batchbuffer including drm_intel_gem_get_reloc_count(), emit all the
2116 * state, and then check if it still fits in the aperture.
2118 * Any further drm_intel_bufmgr_check_aperture_space() queries
2119 * involving this buffer in the tree are undefined after this call.
2121 * This also removes all softpinned targets being referenced by the BO.
2124 drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start)
2126 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2127 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2129 struct timespec time;
2131 clock_gettime(CLOCK_MONOTONIC, &time);
2133 assert(bo_gem->reloc_count >= start);
2135 /* Unreference the cleared target buffers */
2136 pthread_mutex_lock(&bufmgr_gem->lock);
2138 for (i = start; i < bo_gem->reloc_count; i++) {
2139 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) bo_gem->reloc_target_info[i].bo;
2140 if (&target_bo_gem->bo != bo) {
2141 bo_gem->reloc_tree_fences -= target_bo_gem->reloc_tree_fences;
2142 drm_intel_gem_bo_unreference_locked_timed(&target_bo_gem->bo,
2146 bo_gem->reloc_count = start;
2148 for (i = 0; i < bo_gem->softpin_target_count; i++) {
2149 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) bo_gem->softpin_target[i];
2150 drm_intel_gem_bo_unreference_locked_timed(&target_bo_gem->bo, time.tv_sec);
2152 bo_gem->softpin_target_count = 0;
2154 pthread_mutex_unlock(&bufmgr_gem->lock);
2159 * Walk the tree of relocations rooted at BO and accumulate the list of
2160 * validations to be performed and update the relocation buffers with
2161 * index values into the validation list.
2164 drm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
2166 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2169 if (bo_gem->relocs == NULL)
2172 for (i = 0; i < bo_gem->reloc_count; i++) {
2173 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
2175 if (target_bo == bo)
2178 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
2180 /* Continue walking the tree depth-first. */
2181 drm_intel_gem_bo_process_reloc(target_bo);
2183 /* Add the target to the validate list */
2184 drm_intel_add_validate_buffer(target_bo);
2189 drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo)
2191 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
2194 if (bo_gem->relocs == NULL && bo_gem->softpin_target == NULL)
2197 for (i = 0; i < bo_gem->reloc_count; i++) {
2198 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
2201 if (target_bo == bo)
2204 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
2206 /* Continue walking the tree depth-first. */
2207 drm_intel_gem_bo_process_reloc2(target_bo);
2209 need_fence = (bo_gem->reloc_target_info[i].flags &
2210 DRM_INTEL_RELOC_FENCE);
2212 /* Add the target to the validate list */
2213 drm_intel_add_validate_buffer2(target_bo, need_fence);
2216 for (i = 0; i < bo_gem->softpin_target_count; i++) {
2217 drm_intel_bo *target_bo = bo_gem->softpin_target[i];
2219 if (target_bo == bo)
2222 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
2223 drm_intel_gem_bo_process_reloc2(target_bo);
2224 drm_intel_add_validate_buffer2(target_bo, false);
2230 drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem)
2234 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2235 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
2236 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2238 /* Update the buffer offset */
2239 if (bufmgr_gem->exec_objects[i].offset != bo->offset64) {
2240 DBG("BO %d (%s) migrated: 0x%08x %08x -> 0x%08x %08x\n",
2241 bo_gem->gem_handle, bo_gem->name,
2242 upper_32_bits(bo->offset64),
2243 lower_32_bits(bo->offset64),
2244 upper_32_bits(bufmgr_gem->exec_objects[i].offset),
2245 lower_32_bits(bufmgr_gem->exec_objects[i].offset));
2246 bo->offset64 = bufmgr_gem->exec_objects[i].offset;
2247 bo->offset = bufmgr_gem->exec_objects[i].offset;
2253 drm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem)
2257 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2258 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
2259 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
2261 /* Update the buffer offset */
2262 if (bufmgr_gem->exec2_objects[i].offset != bo->offset64) {
2263 /* If we're seeing softpinned object here it means that the kernel
2264 * has relocated our object... Indicating a programming error
2266 assert(!bo_gem->is_softpin);
2267 DBG("BO %d (%s) migrated: 0x%08x %08x -> 0x%08x %08x\n",
2268 bo_gem->gem_handle, bo_gem->name,
2269 upper_32_bits(bo->offset64),
2270 lower_32_bits(bo->offset64),
2271 upper_32_bits(bufmgr_gem->exec2_objects[i].offset),
2272 lower_32_bits(bufmgr_gem->exec2_objects[i].offset));
2273 bo->offset64 = bufmgr_gem->exec2_objects[i].offset;
2274 bo->offset = bufmgr_gem->exec2_objects[i].offset;
2280 drm_intel_gem_bo_aub_dump_bmp(drm_intel_bo *bo,
2281 int x1, int y1, int width, int height,
2282 enum aub_dump_bmp_format format,
2283 int pitch, int offset)
2288 drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
2289 drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
2291 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2292 struct drm_i915_gem_execbuffer execbuf;
2295 if (to_bo_gem(bo)->has_error)
2298 pthread_mutex_lock(&bufmgr_gem->lock);
2299 /* Update indices and set up the validate list. */
2300 drm_intel_gem_bo_process_reloc(bo);
2302 /* Add the batch buffer to the validation list. There are no
2303 * relocations pointing to it.
2305 drm_intel_add_validate_buffer(bo);
2308 execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects;
2309 execbuf.buffer_count = bufmgr_gem->exec_count;
2310 execbuf.batch_start_offset = 0;
2311 execbuf.batch_len = used;
2312 execbuf.cliprects_ptr = (uintptr_t) cliprects;
2313 execbuf.num_cliprects = num_cliprects;
2317 ret = drmIoctl(bufmgr_gem->fd,
2318 DRM_IOCTL_I915_GEM_EXECBUFFER,
2322 if (errno == ENOSPC) {
2323 DBG("Execbuffer fails to pin. "
2324 "Estimate: %u. Actual: %u. Available: %u\n",
2325 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
2328 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
2331 (unsigned int)bufmgr_gem->gtt_size);
2334 drm_intel_update_buffer_offsets(bufmgr_gem);
2336 if (bufmgr_gem->bufmgr.debug)
2337 drm_intel_gem_dump_validation_list(bufmgr_gem);
2339 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2340 drm_intel_bo_gem *bo_gem = to_bo_gem(bufmgr_gem->exec_bos[i]);
2342 bo_gem->idle = false;
2344 /* Disconnect the buffer from the validate list */
2345 bo_gem->validate_index = -1;
2346 bufmgr_gem->exec_bos[i] = NULL;
2348 bufmgr_gem->exec_count = 0;
2349 pthread_mutex_unlock(&bufmgr_gem->lock);
2355 do_exec2(drm_intel_bo *bo, int used, drm_intel_context *ctx,
2356 drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
2359 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
2360 struct drm_i915_gem_execbuffer2 execbuf;
2364 if (to_bo_gem(bo)->has_error)
2367 switch (flags & 0x7) {
2371 if (!bufmgr_gem->has_blt)
2375 if (!bufmgr_gem->has_bsd)
2378 case I915_EXEC_VEBOX:
2379 if (!bufmgr_gem->has_vebox)
2382 case I915_EXEC_RENDER:
2383 case I915_EXEC_DEFAULT:
2387 pthread_mutex_lock(&bufmgr_gem->lock);
2388 /* Update indices and set up the validate list. */
2389 drm_intel_gem_bo_process_reloc2(bo);
2391 /* Add the batch buffer to the validation list. There are no relocations
2394 drm_intel_add_validate_buffer2(bo, 0);
2397 execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec2_objects;
2398 execbuf.buffer_count = bufmgr_gem->exec_count;
2399 execbuf.batch_start_offset = 0;
2400 execbuf.batch_len = used;
2401 execbuf.cliprects_ptr = (uintptr_t)cliprects;
2402 execbuf.num_cliprects = num_cliprects;
2405 execbuf.flags = flags;
2407 i915_execbuffer2_set_context_id(execbuf, 0);
2409 i915_execbuffer2_set_context_id(execbuf, ctx->ctx_id);
2412 if (bufmgr_gem->no_exec)
2413 goto skip_execution;
2415 ret = drmIoctl(bufmgr_gem->fd,
2416 DRM_IOCTL_I915_GEM_EXECBUFFER2,
2420 if (ret == -ENOSPC) {
2421 DBG("Execbuffer fails to pin. "
2422 "Estimate: %u. Actual: %u. Available: %u\n",
2423 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
2424 bufmgr_gem->exec_count),
2425 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
2426 bufmgr_gem->exec_count),
2427 (unsigned int) bufmgr_gem->gtt_size);
2430 drm_intel_update_buffer_offsets2(bufmgr_gem);
2433 if (bufmgr_gem->bufmgr.debug)
2434 drm_intel_gem_dump_validation_list(bufmgr_gem);
2436 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2437 drm_intel_bo_gem *bo_gem = to_bo_gem(bufmgr_gem->exec_bos[i]);
2439 bo_gem->idle = false;
2441 /* Disconnect the buffer from the validate list */
2442 bo_gem->validate_index = -1;
2443 bufmgr_gem->exec_bos[i] = NULL;
2445 bufmgr_gem->exec_count = 0;
2446 pthread_mutex_unlock(&bufmgr_gem->lock);
2452 drm_intel_gem_bo_exec2(drm_intel_bo *bo, int used,
2453 drm_clip_rect_t *cliprects, int num_cliprects,
2456 return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4,
2461 drm_intel_gem_bo_mrb_exec2(drm_intel_bo *bo, int used,
2462 drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
2465 return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4,
2470 drm_intel_gem_bo_context_exec(drm_intel_bo *bo, drm_intel_context *ctx,
2471 int used, unsigned int flags)
2473 return do_exec2(bo, used, ctx, NULL, 0, 0, flags);
2477 drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
2479 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2480 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2481 struct drm_i915_gem_pin pin;
2485 pin.handle = bo_gem->gem_handle;
2486 pin.alignment = alignment;
2488 ret = drmIoctl(bufmgr_gem->fd,
2489 DRM_IOCTL_I915_GEM_PIN,
2494 bo->offset64 = pin.offset;
2495 bo->offset = pin.offset;
2500 drm_intel_gem_bo_unpin(drm_intel_bo *bo)
2502 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2503 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2504 struct drm_i915_gem_unpin unpin;
2508 unpin.handle = bo_gem->gem_handle;
2510 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
2518 drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
2519 uint32_t tiling_mode,
2522 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2523 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2524 struct drm_i915_gem_set_tiling set_tiling;
2527 if (bo_gem->global_name == 0 &&
2528 tiling_mode == bo_gem->tiling_mode &&
2529 stride == bo_gem->stride)
2532 memset(&set_tiling, 0, sizeof(set_tiling));
2534 /* set_tiling is slightly broken and overwrites the
2535 * input on the error path, so we have to open code
2538 set_tiling.handle = bo_gem->gem_handle;
2539 set_tiling.tiling_mode = tiling_mode;
2540 set_tiling.stride = stride;
2542 ret = ioctl(bufmgr_gem->fd,
2543 DRM_IOCTL_I915_GEM_SET_TILING,
2545 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
2549 bo_gem->tiling_mode = set_tiling.tiling_mode;
2550 bo_gem->swizzle_mode = set_tiling.swizzle_mode;
2551 bo_gem->stride = set_tiling.stride;
2556 drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
2559 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2560 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2563 /* Tiling with userptr surfaces is not supported
2564 * on all hardware so refuse it for time being.
2566 if (bo_gem->is_userptr)
2569 /* Linear buffers have no stride. By ensuring that we only ever use
2570 * stride 0 with linear buffers, we simplify our code.
2572 if (*tiling_mode == I915_TILING_NONE)
2575 ret = drm_intel_gem_bo_set_tiling_internal(bo, *tiling_mode, stride);
2577 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
2579 *tiling_mode = bo_gem->tiling_mode;
2584 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
2585 uint32_t * swizzle_mode)
2587 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2589 *tiling_mode = bo_gem->tiling_mode;
2590 *swizzle_mode = bo_gem->swizzle_mode;
2595 drm_intel_gem_bo_set_softpin_offset(drm_intel_bo *bo, uint64_t offset)
2597 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2599 bo_gem->is_softpin = true;
2600 bo->offset64 = offset;
2601 bo->offset = offset;
2606 drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int size)
2608 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
2611 drm_intel_bo_gem *bo_gem;
2612 struct drm_i915_gem_get_tiling get_tiling;
2613 drmMMListHead *list;
2615 pthread_mutex_lock(&bufmgr_gem->lock);
2616 ret = drmPrimeFDToHandle(bufmgr_gem->fd, prime_fd, &handle);
2618 DBG("create_from_prime: failed to obtain handle from fd: %s\n", strerror(errno));
2619 pthread_mutex_unlock(&bufmgr_gem->lock);
2624 * See if the kernel has already returned this buffer to us. Just as
2625 * for named buffers, we must not create two bo's pointing at the same
2628 for (list = bufmgr_gem->named.next;
2629 list != &bufmgr_gem->named;
2630 list = list->next) {
2631 bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
2632 if (bo_gem->gem_handle == handle) {
2633 drm_intel_gem_bo_reference(&bo_gem->bo);
2634 pthread_mutex_unlock(&bufmgr_gem->lock);
2639 bo_gem = calloc(1, sizeof(*bo_gem));
2641 pthread_mutex_unlock(&bufmgr_gem->lock);
2644 /* Determine size of bo. The fd-to-handle ioctl really should
2645 * return the size, but it doesn't. If we have kernel 3.12 or
2646 * later, we can lseek on the prime fd to get the size. Older
2647 * kernels will just fail, in which case we fall back to the
2648 * provided (estimated or guess size). */
2649 ret = lseek(prime_fd, 0, SEEK_END);
2651 bo_gem->bo.size = ret;
2653 bo_gem->bo.size = size;
2655 bo_gem->bo.handle = handle;
2656 bo_gem->bo.bufmgr = bufmgr;
2658 bo_gem->gem_handle = handle;
2660 atomic_set(&bo_gem->refcount, 1);
2662 bo_gem->name = "prime";
2663 bo_gem->validate_index = -1;
2664 bo_gem->reloc_tree_fences = 0;
2665 bo_gem->used_as_reloc_target = false;
2666 bo_gem->has_error = false;
2667 bo_gem->reusable = false;
2668 bo_gem->use_48b_address_range = false;
2670 DRMINITLISTHEAD(&bo_gem->vma_list);
2671 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
2672 pthread_mutex_unlock(&bufmgr_gem->lock);
2674 memclear(get_tiling);
2675 get_tiling.handle = bo_gem->gem_handle;
2676 ret = drmIoctl(bufmgr_gem->fd,
2677 DRM_IOCTL_I915_GEM_GET_TILING,
2680 DBG("create_from_prime: failed to get tiling: %s\n", strerror(errno));
2681 drm_intel_gem_bo_unreference(&bo_gem->bo);
2684 bo_gem->tiling_mode = get_tiling.tiling_mode;
2685 bo_gem->swizzle_mode = get_tiling.swizzle_mode;
2686 /* XXX stride is unknown */
2687 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
2693 drm_intel_bo_gem_export_to_prime(drm_intel_bo *bo, int *prime_fd)
2695 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2696 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2698 pthread_mutex_lock(&bufmgr_gem->lock);
2699 if (DRMLISTEMPTY(&bo_gem->name_list))
2700 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
2701 pthread_mutex_unlock(&bufmgr_gem->lock);
2703 if (drmPrimeHandleToFD(bufmgr_gem->fd, bo_gem->gem_handle,
2704 DRM_CLOEXEC, prime_fd) != 0)
2707 bo_gem->reusable = false;
2713 drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
2715 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2716 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2719 if (!bo_gem->global_name) {
2720 struct drm_gem_flink flink;
2723 flink.handle = bo_gem->gem_handle;
2725 pthread_mutex_lock(&bufmgr_gem->lock);
2727 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink);
2729 pthread_mutex_unlock(&bufmgr_gem->lock);
2733 bo_gem->global_name = flink.name;
2734 bo_gem->reusable = false;
2736 if (DRMLISTEMPTY(&bo_gem->name_list))
2737 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
2738 pthread_mutex_unlock(&bufmgr_gem->lock);
2741 *name = bo_gem->global_name;
2746 * Enables unlimited caching of buffer objects for reuse.
2748 * This is potentially very memory expensive, as the cache at each bucket
2749 * size is only bounded by how many buffers of that size we've managed to have
2750 * in flight at once.
2753 drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
2755 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
2757 bufmgr_gem->bo_reuse = true;
2761 * Enable use of fenced reloc type.
2763 * New code should enable this to avoid unnecessary fence register
2764 * allocation. If this option is not enabled, all relocs will have fence
2765 * register allocated.
2768 drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr)
2770 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2772 if (bufmgr_gem->bufmgr.bo_exec == drm_intel_gem_bo_exec2)
2773 bufmgr_gem->fenced_relocs = true;
2777 * Return the additional aperture space required by the tree of buffer objects
2781 drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
2783 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2787 if (bo == NULL || bo_gem->included_in_check_aperture)
2791 bo_gem->included_in_check_aperture = true;
2793 for (i = 0; i < bo_gem->reloc_count; i++)
2795 drm_intel_gem_bo_get_aperture_space(bo_gem->
2796 reloc_target_info[i].bo);
2802 * Count the number of buffers in this list that need a fence reg
2804 * If the count is greater than the number of available regs, we'll have
2805 * to ask the caller to resubmit a batch with fewer tiled buffers.
2807 * This function over-counts if the same buffer is used multiple times.
2810 drm_intel_gem_total_fences(drm_intel_bo ** bo_array, int count)
2813 unsigned int total = 0;
2815 for (i = 0; i < count; i++) {
2816 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
2821 total += bo_gem->reloc_tree_fences;
2827 * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready
2828 * for the next drm_intel_bufmgr_check_aperture_space() call.
2831 drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo)
2833 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2836 if (bo == NULL || !bo_gem->included_in_check_aperture)
2839 bo_gem->included_in_check_aperture = false;
2841 for (i = 0; i < bo_gem->reloc_count; i++)
2842 drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->
2843 reloc_target_info[i].bo);
2847 * Return a conservative estimate for the amount of aperture required
2848 * for a collection of buffers. This may double-count some buffers.
2851 drm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count)
2854 unsigned int total = 0;
2856 for (i = 0; i < count; i++) {
2857 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
2859 total += bo_gem->reloc_tree_size;
2865 * Return the amount of aperture needed for a collection of buffers.
2866 * This avoids double counting any buffers, at the cost of looking
2867 * at every buffer in the set.
2870 drm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count)
2873 unsigned int total = 0;
2875 for (i = 0; i < count; i++) {
2876 total += drm_intel_gem_bo_get_aperture_space(bo_array[i]);
2877 /* For the first buffer object in the array, we get an
2878 * accurate count back for its reloc_tree size (since nothing
2879 * had been flagged as being counted yet). We can save that
2880 * value out as a more conservative reloc_tree_size that
2881 * avoids double-counting target buffers. Since the first
2882 * buffer happens to usually be the batch buffer in our
2883 * callers, this can pull us back from doing the tree
2884 * walk on every new batch emit.
2887 drm_intel_bo_gem *bo_gem =
2888 (drm_intel_bo_gem *) bo_array[i];
2889 bo_gem->reloc_tree_size = total;
2893 for (i = 0; i < count; i++)
2894 drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]);
2899 * Return -1 if the batchbuffer should be flushed before attempting to
2900 * emit rendering referencing the buffers pointed to by bo_array.
2902 * This is required because if we try to emit a batchbuffer with relocations
2903 * to a tree of buffers that won't simultaneously fit in the aperture,
2904 * the rendering will return an error at a point where the software is not
2905 * prepared to recover from it.
2907 * However, we also want to emit the batchbuffer significantly before we reach
2908 * the limit, as a series of batchbuffers each of which references buffers
2909 * covering almost all of the aperture means that at each emit we end up
2910 * waiting to evict a buffer from the last rendering, and we get synchronous
2911 * performance. By emitting smaller batchbuffers, we eat some CPU overhead to
2912 * get better parallelism.
2915 drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
2917 drm_intel_bufmgr_gem *bufmgr_gem =
2918 (drm_intel_bufmgr_gem *) bo_array[0]->bufmgr;
2919 unsigned int total = 0;
2920 unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
2923 /* Check for fence reg constraints if necessary */
2924 if (bufmgr_gem->available_fences) {
2925 total_fences = drm_intel_gem_total_fences(bo_array, count);
2926 if (total_fences > bufmgr_gem->available_fences)
2930 total = drm_intel_gem_estimate_batch_space(bo_array, count);
2932 if (total > threshold)
2933 total = drm_intel_gem_compute_batch_space(bo_array, count);
2935 if (total > threshold) {
2936 DBG("check_space: overflowed available aperture, "
2938 total / 1024, (int)bufmgr_gem->gtt_size / 1024);
2941 DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024,
2942 (int)bufmgr_gem->gtt_size / 1024);
2948 * Disable buffer reuse for objects which are shared with the kernel
2949 * as scanout buffers
2952 drm_intel_gem_bo_disable_reuse(drm_intel_bo *bo)
2954 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2956 bo_gem->reusable = false;
2961 drm_intel_gem_bo_is_reusable(drm_intel_bo *bo)
2963 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2965 return bo_gem->reusable;
2969 _drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
2971 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2974 for (i = 0; i < bo_gem->reloc_count; i++) {
2975 if (bo_gem->reloc_target_info[i].bo == target_bo)
2977 if (bo == bo_gem->reloc_target_info[i].bo)
2979 if (_drm_intel_gem_bo_references(bo_gem->reloc_target_info[i].bo,
2984 for (i = 0; i< bo_gem->softpin_target_count; i++) {
2985 if (bo_gem->softpin_target[i] == target_bo)
2987 if (_drm_intel_gem_bo_references(bo_gem->softpin_target[i], target_bo))
2994 /** Return true if target_bo is referenced by bo's relocation tree. */
2996 drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
2998 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
3000 if (bo == NULL || target_bo == NULL)
3002 if (target_bo_gem->used_as_reloc_target)
3003 return _drm_intel_gem_bo_references(bo, target_bo);
3008 add_bucket(drm_intel_bufmgr_gem *bufmgr_gem, int size)
3010 unsigned int i = bufmgr_gem->num_buckets;
3012 assert(i < ARRAY_SIZE(bufmgr_gem->cache_bucket));
3014 DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head);
3015 bufmgr_gem->cache_bucket[i].size = size;
3016 bufmgr_gem->num_buckets++;
3020 init_cache_buckets(drm_intel_bufmgr_gem *bufmgr_gem)
3022 unsigned long size, cache_max_size = 64 * 1024 * 1024;
3024 /* OK, so power of two buckets was too wasteful of memory.
3025 * Give 3 other sizes between each power of two, to hopefully
3026 * cover things accurately enough. (The alternative is
3027 * probably to just go for exact matching of sizes, and assume
3028 * that for things like composited window resize the tiled
3029 * width/height alignment and rounding of sizes to pages will
3030 * get us useful cache hit rates anyway)
3032 add_bucket(bufmgr_gem, 4096);
3033 add_bucket(bufmgr_gem, 4096 * 2);
3034 add_bucket(bufmgr_gem, 4096 * 3);
3036 /* Initialize the linked lists for BO reuse cache. */
3037 for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
3038 add_bucket(bufmgr_gem, size);
3040 add_bucket(bufmgr_gem, size + size * 1 / 4);
3041 add_bucket(bufmgr_gem, size + size * 2 / 4);
3042 add_bucket(bufmgr_gem, size + size * 3 / 4);
3047 drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr, int limit)
3049 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3051 bufmgr_gem->vma_max = limit;
3053 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
3057 * Get the PCI ID for the device. This can be overridden by setting the
3058 * INTEL_DEVID_OVERRIDE environment variable to the desired ID.
3061 get_pci_device_id(drm_intel_bufmgr_gem *bufmgr_gem)
3063 char *devid_override;
3066 drm_i915_getparam_t gp;
3068 if (geteuid() == getuid()) {
3069 devid_override = getenv("INTEL_DEVID_OVERRIDE");
3070 if (devid_override) {
3071 bufmgr_gem->no_exec = true;
3072 return strtod(devid_override, NULL);
3077 gp.param = I915_PARAM_CHIPSET_ID;
3079 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3081 fprintf(stderr, "get chip id failed: %d [%d]\n", ret, errno);
3082 fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
3088 drm_intel_bufmgr_gem_get_devid(drm_intel_bufmgr *bufmgr)
3090 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3092 return bufmgr_gem->pci_device;
3096 * Sets the AUB filename.
3098 * This function has to be called before drm_intel_bufmgr_gem_set_aub_dump()
3099 * for it to have any effect.
3102 drm_intel_bufmgr_gem_set_aub_filename(drm_intel_bufmgr *bufmgr,
3103 const char *filename)
3108 * Sets up AUB dumping.
3110 * This is a trace file format that can be used with the simulator.
3111 * Packets are emitted in a format somewhat like GPU command packets.
3112 * You can set up a GTT and upload your objects into the referenced
3113 * space, then send off batchbuffers and get BMPs out the other end.
3116 drm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr *bufmgr, int enable)
3118 fprintf(stderr, "libdrm aub dumping is deprecated.\n\n"
3119 "Use intel_aubdump from intel-gpu-tools instead. Install intel-gpu-tools,\n"
3120 "then run (for example)\n\n"
3121 "\t$ intel_aubdump --output=trace.aub glxgears -geometry 500x500\n\n"
3122 "See the intel_aubdump man page for more details.\n");
3126 drm_intel_gem_context_create(drm_intel_bufmgr *bufmgr)
3128 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3129 struct drm_i915_gem_context_create create;
3130 drm_intel_context *context = NULL;
3133 context = calloc(1, sizeof(*context));
3138 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
3140 DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n",
3146 context->ctx_id = create.ctx_id;
3147 context->bufmgr = bufmgr;
3153 drm_intel_gem_context_destroy(drm_intel_context *ctx)
3155 drm_intel_bufmgr_gem *bufmgr_gem;
3156 struct drm_i915_gem_context_destroy destroy;
3164 bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr;
3165 destroy.ctx_id = ctx->ctx_id;
3166 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY,
3169 fprintf(stderr, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n",
3176 drm_intel_get_reset_stats(drm_intel_context *ctx,
3177 uint32_t *reset_count,
3181 drm_intel_bufmgr_gem *bufmgr_gem;
3182 struct drm_i915_reset_stats stats;
3190 bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr;
3191 stats.ctx_id = ctx->ctx_id;
3192 ret = drmIoctl(bufmgr_gem->fd,
3193 DRM_IOCTL_I915_GET_RESET_STATS,
3196 if (reset_count != NULL)
3197 *reset_count = stats.reset_count;
3200 *active = stats.batch_active;
3202 if (pending != NULL)
3203 *pending = stats.batch_pending;
3210 drm_intel_reg_read(drm_intel_bufmgr *bufmgr,
3214 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3215 struct drm_i915_reg_read reg_read;
3219 reg_read.offset = offset;
3221 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_REG_READ, ®_read);
3223 *result = reg_read.val;
3228 drm_intel_get_subslice_total(int fd, unsigned int *subslice_total)
3230 drm_i915_getparam_t gp;
3234 gp.value = (int*)subslice_total;
3235 gp.param = I915_PARAM_SUBSLICE_TOTAL;
3236 ret = drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
3244 drm_intel_get_eu_total(int fd, unsigned int *eu_total)
3246 drm_i915_getparam_t gp;
3250 gp.value = (int*)eu_total;
3251 gp.param = I915_PARAM_EU_TOTAL;
3252 ret = drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
3260 drm_intel_get_pooled_eu(int fd)
3262 drm_i915_getparam_t gp;
3266 gp.param = I915_PARAM_HAS_POOLED_EU;
3268 if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
3275 drm_intel_get_min_eu_in_pool(int fd)
3277 drm_i915_getparam_t gp;
3281 gp.param = I915_PARAM_MIN_EU_IN_POOL;
3283 if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
3290 * Annotate the given bo for use in aub dumping.
3292 * \param annotations is an array of drm_intel_aub_annotation objects
3293 * describing the type of data in various sections of the bo. Each
3294 * element of the array specifies the type and subtype of a section of
3295 * the bo, and the past-the-end offset of that section. The elements
3296 * of \c annotations must be sorted so that ending_offset is
3299 * \param count is the number of elements in the \c annotations array.
3300 * If \c count is zero, then \c annotations will not be dereferenced.
3302 * Annotations are copied into a private data structure, so caller may
3303 * re-use the memory pointed to by \c annotations after the call
3306 * Annotations are stored for the lifetime of the bo; to reset to the
3307 * default state (no annotations), call this function with a \c count
3311 drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo,
3312 drm_intel_aub_annotation *annotations,
3317 static pthread_mutex_t bufmgr_list_mutex = PTHREAD_MUTEX_INITIALIZER;
3318 static drmMMListHead bufmgr_list = { &bufmgr_list, &bufmgr_list };
3320 static drm_intel_bufmgr_gem *
3321 drm_intel_bufmgr_gem_find(int fd)
3323 drm_intel_bufmgr_gem *bufmgr_gem;
3325 DRMLISTFOREACHENTRY(bufmgr_gem, &bufmgr_list, managers) {
3326 if (bufmgr_gem->fd == fd) {
3327 atomic_inc(&bufmgr_gem->refcount);
3336 drm_intel_bufmgr_gem_unref(drm_intel_bufmgr *bufmgr)
3338 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3340 if (atomic_add_unless(&bufmgr_gem->refcount, -1, 1)) {
3341 pthread_mutex_lock(&bufmgr_list_mutex);
3343 if (atomic_dec_and_test(&bufmgr_gem->refcount)) {
3344 DRMLISTDEL(&bufmgr_gem->managers);
3345 drm_intel_bufmgr_gem_destroy(bufmgr);
3348 pthread_mutex_unlock(&bufmgr_list_mutex);
3352 void *drm_intel_gem_bo_map__gtt(drm_intel_bo *bo)
3354 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
3355 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
3357 if (bo_gem->gtt_virtual)
3358 return bo_gem->gtt_virtual;
3360 if (bo_gem->is_userptr)
3363 pthread_mutex_lock(&bufmgr_gem->lock);
3364 if (bo_gem->gtt_virtual == NULL) {
3365 struct drm_i915_gem_mmap_gtt mmap_arg;
3368 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
3369 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
3371 if (bo_gem->map_count++ == 0)
3372 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
3375 mmap_arg.handle = bo_gem->gem_handle;
3377 /* Get the fake offset back... */
3379 if (drmIoctl(bufmgr_gem->fd,
3380 DRM_IOCTL_I915_GEM_MMAP_GTT,
3383 ptr = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
3384 MAP_SHARED, bufmgr_gem->fd,
3387 if (ptr == MAP_FAILED) {
3388 if (--bo_gem->map_count == 0)
3389 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
3393 bo_gem->gtt_virtual = ptr;
3395 pthread_mutex_unlock(&bufmgr_gem->lock);
3397 return bo_gem->gtt_virtual;
3400 void *drm_intel_gem_bo_map__cpu(drm_intel_bo *bo)
3402 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
3403 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
3405 if (bo_gem->mem_virtual)
3406 return bo_gem->mem_virtual;
3408 if (bo_gem->is_userptr) {
3409 /* Return the same user ptr */
3410 return bo_gem->user_virtual;
3413 pthread_mutex_lock(&bufmgr_gem->lock);
3414 if (!bo_gem->mem_virtual) {
3415 struct drm_i915_gem_mmap mmap_arg;
3417 if (bo_gem->map_count++ == 0)
3418 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
3420 DBG("bo_map: %d (%s), map_count=%d\n",
3421 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
3424 mmap_arg.handle = bo_gem->gem_handle;
3425 mmap_arg.size = bo->size;
3426 if (drmIoctl(bufmgr_gem->fd,
3427 DRM_IOCTL_I915_GEM_MMAP,
3429 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
3430 __FILE__, __LINE__, bo_gem->gem_handle,
3431 bo_gem->name, strerror(errno));
3432 if (--bo_gem->map_count == 0)
3433 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
3435 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
3436 bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
3439 pthread_mutex_unlock(&bufmgr_gem->lock);
3441 return bo_gem->mem_virtual;
3444 void *drm_intel_gem_bo_map__wc(drm_intel_bo *bo)
3446 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
3447 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
3449 if (bo_gem->wc_virtual)
3450 return bo_gem->wc_virtual;
3452 if (bo_gem->is_userptr)
3455 pthread_mutex_lock(&bufmgr_gem->lock);
3456 if (!bo_gem->wc_virtual) {
3457 struct drm_i915_gem_mmap mmap_arg;
3459 if (bo_gem->map_count++ == 0)
3460 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
3462 DBG("bo_map: %d (%s), map_count=%d\n",
3463 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
3466 mmap_arg.handle = bo_gem->gem_handle;
3467 mmap_arg.size = bo->size;
3468 mmap_arg.flags = I915_MMAP_WC;
3469 if (drmIoctl(bufmgr_gem->fd,
3470 DRM_IOCTL_I915_GEM_MMAP,
3472 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
3473 __FILE__, __LINE__, bo_gem->gem_handle,
3474 bo_gem->name, strerror(errno));
3475 if (--bo_gem->map_count == 0)
3476 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
3478 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
3479 bo_gem->wc_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
3482 pthread_mutex_unlock(&bufmgr_gem->lock);
3484 return bo_gem->wc_virtual;
3488 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
3489 * and manage map buffer objections.
3491 * \param fd File descriptor of the opened DRM device.
3494 drm_intel_bufmgr_gem_init(int fd, int batch_size)
3496 drm_intel_bufmgr_gem *bufmgr_gem;
3497 struct drm_i915_gem_get_aperture aperture;
3498 drm_i915_getparam_t gp;
3502 pthread_mutex_lock(&bufmgr_list_mutex);
3504 bufmgr_gem = drm_intel_bufmgr_gem_find(fd);
3508 bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
3509 if (bufmgr_gem == NULL)
3512 bufmgr_gem->fd = fd;
3513 atomic_set(&bufmgr_gem->refcount, 1);
3515 if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
3522 ret = drmIoctl(bufmgr_gem->fd,
3523 DRM_IOCTL_I915_GEM_GET_APERTURE,
3527 bufmgr_gem->gtt_size = aperture.aper_available_size;
3529 fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
3531 bufmgr_gem->gtt_size = 128 * 1024 * 1024;
3532 fprintf(stderr, "Assuming %dkB available aperture size.\n"
3533 "May lead to reduced performance or incorrect "
3535 (int)bufmgr_gem->gtt_size / 1024);
3538 bufmgr_gem->pci_device = get_pci_device_id(bufmgr_gem);
3540 if (IS_GEN2(bufmgr_gem->pci_device))
3541 bufmgr_gem->gen = 2;
3542 else if (IS_GEN3(bufmgr_gem->pci_device))
3543 bufmgr_gem->gen = 3;
3544 else if (IS_GEN4(bufmgr_gem->pci_device))
3545 bufmgr_gem->gen = 4;
3546 else if (IS_GEN5(bufmgr_gem->pci_device))
3547 bufmgr_gem->gen = 5;
3548 else if (IS_GEN6(bufmgr_gem->pci_device))
3549 bufmgr_gem->gen = 6;
3550 else if (IS_GEN7(bufmgr_gem->pci_device))
3551 bufmgr_gem->gen = 7;
3552 else if (IS_GEN8(bufmgr_gem->pci_device))
3553 bufmgr_gem->gen = 8;
3554 else if (IS_GEN9(bufmgr_gem->pci_device))
3555 bufmgr_gem->gen = 9;
3562 if (IS_GEN3(bufmgr_gem->pci_device) &&
3563 bufmgr_gem->gtt_size > 256*1024*1024) {
3564 /* The unmappable part of gtt on gen 3 (i.e. above 256MB) can't
3565 * be used for tiled blits. To simplify the accounting, just
3566 * subtract the unmappable part (fixed to 256MB on all known
3567 * gen3 devices) if the kernel advertises it. */
3568 bufmgr_gem->gtt_size -= 256*1024*1024;
3574 gp.param = I915_PARAM_HAS_EXECBUF2;
3575 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3579 gp.param = I915_PARAM_HAS_BSD;
3580 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3581 bufmgr_gem->has_bsd = ret == 0;
3583 gp.param = I915_PARAM_HAS_BLT;
3584 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3585 bufmgr_gem->has_blt = ret == 0;
3587 gp.param = I915_PARAM_HAS_RELAXED_FENCING;
3588 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3589 bufmgr_gem->has_relaxed_fencing = ret == 0;
3591 bufmgr_gem->bufmgr.bo_alloc_userptr = check_bo_alloc_userptr;
3593 gp.param = I915_PARAM_HAS_WAIT_TIMEOUT;
3594 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3595 bufmgr_gem->has_wait_timeout = ret == 0;
3597 gp.param = I915_PARAM_HAS_LLC;
3598 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3600 /* Kernel does not supports HAS_LLC query, fallback to GPU
3601 * generation detection and assume that we have LLC on GEN6/7
3603 bufmgr_gem->has_llc = (IS_GEN6(bufmgr_gem->pci_device) |
3604 IS_GEN7(bufmgr_gem->pci_device));
3606 bufmgr_gem->has_llc = *gp.value;
3608 gp.param = I915_PARAM_HAS_VEBOX;
3609 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3610 bufmgr_gem->has_vebox = (ret == 0) & (*gp.value > 0);
3612 gp.param = I915_PARAM_HAS_EXEC_SOFTPIN;
3613 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3614 if (ret == 0 && *gp.value > 0)
3615 bufmgr_gem->bufmgr.bo_set_softpin_offset = drm_intel_gem_bo_set_softpin_offset;
3617 if (bufmgr_gem->gen < 4) {
3618 gp.param = I915_PARAM_NUM_FENCES_AVAIL;
3619 gp.value = &bufmgr_gem->available_fences;
3620 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3622 fprintf(stderr, "get fences failed: %d [%d]\n", ret,
3624 fprintf(stderr, "param: %d, val: %d\n", gp.param,
3626 bufmgr_gem->available_fences = 0;
3628 /* XXX The kernel reports the total number of fences,
3629 * including any that may be pinned.
3631 * We presume that there will be at least one pinned
3632 * fence for the scanout buffer, but there may be more
3633 * than one scanout and the user may be manually
3634 * pinning buffers. Let's move to execbuffer2 and
3635 * thereby forget the insanity of using fences...
3637 bufmgr_gem->available_fences -= 2;
3638 if (bufmgr_gem->available_fences < 0)
3639 bufmgr_gem->available_fences = 0;
3643 if (bufmgr_gem->gen >= 8) {
3644 gp.param = I915_PARAM_HAS_ALIASING_PPGTT;
3645 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3646 if (ret == 0 && *gp.value == 3)
3647 bufmgr_gem->bufmgr.bo_use_48b_address_range = drm_intel_gem_bo_use_48b_address_range;
3650 /* Let's go with one relocation per every 2 dwords (but round down a bit
3651 * since a power of two will mean an extra page allocation for the reloc
3654 * Every 4 was too few for the blender benchmark.
3656 bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
3658 bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
3659 bufmgr_gem->bufmgr.bo_alloc_for_render =
3660 drm_intel_gem_bo_alloc_for_render;
3661 bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled;
3662 bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
3663 bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
3664 bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
3665 bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap;
3666 bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata;
3667 bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata;
3668 bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering;
3669 bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc;
3670 bufmgr_gem->bufmgr.bo_emit_reloc_fence = drm_intel_gem_bo_emit_reloc_fence;
3671 bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin;
3672 bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin;
3673 bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
3674 bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
3675 bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
3676 /* Use the new one if available */
3678 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec2;
3679 bufmgr_gem->bufmgr.bo_mrb_exec = drm_intel_gem_bo_mrb_exec2;
3681 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
3682 bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
3683 bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
3684 bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_unref;
3685 bufmgr_gem->bufmgr.debug = 0;
3686 bufmgr_gem->bufmgr.check_aperture_space =
3687 drm_intel_gem_check_aperture_space;
3688 bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse;
3689 bufmgr_gem->bufmgr.bo_is_reusable = drm_intel_gem_bo_is_reusable;
3690 bufmgr_gem->bufmgr.get_pipe_from_crtc_id =
3691 drm_intel_gem_get_pipe_from_crtc_id;
3692 bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references;
3694 DRMINITLISTHEAD(&bufmgr_gem->named);
3695 init_cache_buckets(bufmgr_gem);
3697 DRMINITLISTHEAD(&bufmgr_gem->vma_cache);
3698 bufmgr_gem->vma_max = -1; /* unlimited by default */
3700 DRMLISTADD(&bufmgr_gem->managers, &bufmgr_list);
3703 pthread_mutex_unlock(&bufmgr_list_mutex);
3705 return bufmgr_gem != NULL ? &bufmgr_gem->bufmgr : NULL;