1 /**************************************************************************
3 * Copyright © 2007 Red Hat Inc.
4 * Copyright © 2007 Intel Corporation
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
29 **************************************************************************/
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
42 #include <xf86atomic.h>
50 #include <sys/ioctl.h>
53 #include <sys/types.h>
56 #include "libdrm_lists.h"
57 #include "intel_bufmgr.h"
58 #include "intel_bufmgr_priv.h"
59 #include "intel_chipset.h"
64 #define DBG(...) do { \
65 if (bufmgr_gem->bufmgr.debug) \
66 fprintf(stderr, __VA_ARGS__); \
69 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
71 typedef struct _drm_intel_bo_gem drm_intel_bo_gem;
73 struct drm_intel_gem_bo_bucket {
78 typedef struct _drm_intel_bufmgr_gem {
79 drm_intel_bufmgr bufmgr;
87 struct drm_i915_gem_exec_object *exec_objects;
88 struct drm_i915_gem_exec_object2 *exec2_objects;
89 drm_intel_bo **exec_bos;
93 /** Array of lists of cached gem objects of power-of-two sizes */
94 struct drm_intel_gem_bo_bucket cache_bucket[14 * 4];
102 unsigned int has_bsd : 1;
103 unsigned int has_blt : 1;
104 unsigned int has_relaxed_fencing : 1;
105 unsigned int bo_reuse : 1;
107 } drm_intel_bufmgr_gem;
109 #define DRM_INTEL_RELOC_FENCE (1<<0)
111 typedef struct _drm_intel_reloc_target_info {
114 } drm_intel_reloc_target;
116 struct _drm_intel_bo_gem {
124 * Kenel-assigned global name for this object
126 unsigned int global_name;
129 * Index of the buffer within the validation list while preparing a
130 * batchbuffer execution.
135 * Current tiling mode
137 uint32_t tiling_mode;
138 uint32_t swizzle_mode;
139 unsigned long stride;
143 /** Array passed to the DRM containing relocation information. */
144 struct drm_i915_gem_relocation_entry *relocs;
146 * Array of info structs corresponding to relocs[i].target_handle etc
148 drm_intel_reloc_target *reloc_target_info;
149 /** Number of entries in relocs */
151 /** Mapped address for the buffer, saved across map/unmap cycles */
153 /** GTT virtual address for the buffer, saved across map/unmap cycles */
160 * Boolean of whether this BO and its children have been included in
161 * the current drm_intel_bufmgr_check_aperture_space() total.
163 char included_in_check_aperture;
166 * Boolean of whether this buffer has been used as a relocation
167 * target and had its size accounted for, and thus can't have any
168 * further relocations added to it.
170 char used_as_reloc_target;
173 * Boolean of whether we have encountered an error whilst building the relocation tree.
178 * Boolean of whether this buffer can be re-used
183 * Size in bytes of this buffer and its relocation descendents.
185 * Used to avoid costly tree walking in
186 * drm_intel_bufmgr_check_aperture in the common case.
191 * Number of potential fence registers required by this buffer and its
194 int reloc_tree_fences;
198 drm_intel_gem_estimate_batch_space(drm_intel_bo ** bo_array, int count);
201 drm_intel_gem_compute_batch_space(drm_intel_bo ** bo_array, int count);
204 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
205 uint32_t * swizzle_mode);
208 drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
209 uint32_t tiling_mode,
212 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
215 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo);
217 static void drm_intel_gem_bo_free(drm_intel_bo *bo);
220 drm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size,
221 uint32_t *tiling_mode)
223 unsigned long min_size, max_size;
226 if (*tiling_mode == I915_TILING_NONE)
229 /* 965+ just need multiples of page size for tiling */
230 if (bufmgr_gem->gen >= 4)
231 return ROUND_UP_TO(size, 4096);
233 /* Older chips need powers of two, of at least 512k or 1M */
234 if (bufmgr_gem->gen == 3) {
235 min_size = 1024*1024;
236 max_size = 128*1024*1024;
239 max_size = 64*1024*1024;
242 if (size > max_size) {
243 *tiling_mode = I915_TILING_NONE;
247 /* Do we need to allocate every page for the fence? */
248 if (bufmgr_gem->has_relaxed_fencing)
249 return ROUND_UP_TO(size, 4096);
251 for (i = min_size; i < size; i <<= 1)
258 * Round a given pitch up to the minimum required for X tiling on a
259 * given chip. We use 512 as the minimum to allow for a later tiling
263 drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem,
264 unsigned long pitch, uint32_t *tiling_mode)
266 unsigned long tile_width;
269 /* If untiled, then just align it so that we can do rendering
270 * to it with the 3D engine.
272 if (*tiling_mode == I915_TILING_NONE)
273 return ALIGN(pitch, 64);
275 if (*tiling_mode == I915_TILING_X)
280 /* 965 is flexible */
281 if (bufmgr_gem->gen >= 4)
282 return ROUND_UP_TO(pitch, tile_width);
284 /* The older hardware has a maximum pitch of 8192 with tiled
285 * surfaces, so fallback to untiled if it's too large.
288 *tiling_mode = I915_TILING_NONE;
289 return ALIGN(pitch, 64);
292 /* Pre-965 needs power of two tile width */
293 for (i = tile_width; i < pitch; i <<= 1)
299 static struct drm_intel_gem_bo_bucket *
300 drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
305 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
306 struct drm_intel_gem_bo_bucket *bucket =
307 &bufmgr_gem->cache_bucket[i];
308 if (bucket->size >= size) {
317 drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
321 for (i = 0; i < bufmgr_gem->exec_count; i++) {
322 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
323 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
325 if (bo_gem->relocs == NULL) {
326 DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle,
331 for (j = 0; j < bo_gem->reloc_count; j++) {
332 drm_intel_bo *target_bo = bo_gem->reloc_target_info[j].bo;
333 drm_intel_bo_gem *target_gem =
334 (drm_intel_bo_gem *) target_bo;
336 DBG("%2d: %d (%s)@0x%08llx -> "
337 "%d (%s)@0x%08lx + 0x%08x\n",
339 bo_gem->gem_handle, bo_gem->name,
340 (unsigned long long)bo_gem->relocs[j].offset,
341 target_gem->gem_handle,
344 bo_gem->relocs[j].delta);
350 drm_intel_gem_bo_reference(drm_intel_bo *bo)
352 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
354 atomic_inc(&bo_gem->refcount);
358 * Adds the given buffer to the list of buffers to be validated (moved into the
359 * appropriate memory type) with the next batch submission.
361 * If a buffer is validated multiple times in a batch submission, it ends up
362 * with the intersection of the memory type flags and the union of the
366 drm_intel_add_validate_buffer(drm_intel_bo *bo)
368 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
369 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
372 if (bo_gem->validate_index != -1)
375 /* Extend the array of validation entries as necessary. */
376 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
377 int new_size = bufmgr_gem->exec_size * 2;
382 bufmgr_gem->exec_objects =
383 realloc(bufmgr_gem->exec_objects,
384 sizeof(*bufmgr_gem->exec_objects) * new_size);
385 bufmgr_gem->exec_bos =
386 realloc(bufmgr_gem->exec_bos,
387 sizeof(*bufmgr_gem->exec_bos) * new_size);
388 bufmgr_gem->exec_size = new_size;
391 index = bufmgr_gem->exec_count;
392 bo_gem->validate_index = index;
393 /* Fill in array entry */
394 bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
395 bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
396 bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs;
397 bufmgr_gem->exec_objects[index].alignment = 0;
398 bufmgr_gem->exec_objects[index].offset = 0;
399 bufmgr_gem->exec_bos[index] = bo;
400 bufmgr_gem->exec_count++;
404 drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence)
406 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
407 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
410 if (bo_gem->validate_index != -1) {
412 bufmgr_gem->exec2_objects[bo_gem->validate_index].flags |=
413 EXEC_OBJECT_NEEDS_FENCE;
417 /* Extend the array of validation entries as necessary. */
418 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
419 int new_size = bufmgr_gem->exec_size * 2;
424 bufmgr_gem->exec2_objects =
425 realloc(bufmgr_gem->exec2_objects,
426 sizeof(*bufmgr_gem->exec2_objects) * new_size);
427 bufmgr_gem->exec_bos =
428 realloc(bufmgr_gem->exec_bos,
429 sizeof(*bufmgr_gem->exec_bos) * new_size);
430 bufmgr_gem->exec_size = new_size;
433 index = bufmgr_gem->exec_count;
434 bo_gem->validate_index = index;
435 /* Fill in array entry */
436 bufmgr_gem->exec2_objects[index].handle = bo_gem->gem_handle;
437 bufmgr_gem->exec2_objects[index].relocation_count = bo_gem->reloc_count;
438 bufmgr_gem->exec2_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
439 bufmgr_gem->exec2_objects[index].alignment = 0;
440 bufmgr_gem->exec2_objects[index].offset = 0;
441 bufmgr_gem->exec_bos[index] = bo;
442 bufmgr_gem->exec2_objects[index].flags = 0;
443 bufmgr_gem->exec2_objects[index].rsvd1 = 0;
444 bufmgr_gem->exec2_objects[index].rsvd2 = 0;
446 bufmgr_gem->exec2_objects[index].flags |=
447 EXEC_OBJECT_NEEDS_FENCE;
449 bufmgr_gem->exec_count++;
452 #define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
456 drm_intel_bo_gem_set_in_aperture_size(drm_intel_bufmgr_gem *bufmgr_gem,
457 drm_intel_bo_gem *bo_gem)
461 assert(!bo_gem->used_as_reloc_target);
463 /* The older chipsets are far-less flexible in terms of tiling,
464 * and require tiled buffer to be size aligned in the aperture.
465 * This means that in the worst possible case we will need a hole
466 * twice as large as the object in order for it to fit into the
467 * aperture. Optimal packing is for wimps.
469 size = bo_gem->bo.size;
470 if (bufmgr_gem->gen < 4 && bo_gem->tiling_mode != I915_TILING_NONE)
473 bo_gem->reloc_tree_size = size;
477 drm_intel_setup_reloc_list(drm_intel_bo *bo)
479 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
480 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
481 unsigned int max_relocs = bufmgr_gem->max_relocs;
483 if (bo->size / 4 < max_relocs)
484 max_relocs = bo->size / 4;
486 bo_gem->relocs = malloc(max_relocs *
487 sizeof(struct drm_i915_gem_relocation_entry));
488 bo_gem->reloc_target_info = malloc(max_relocs *
489 sizeof(drm_intel_reloc_target));
490 if (bo_gem->relocs == NULL || bo_gem->reloc_target_info == NULL) {
491 bo_gem->has_error = 1;
493 free (bo_gem->relocs);
494 bo_gem->relocs = NULL;
496 free (bo_gem->reloc_target_info);
497 bo_gem->reloc_target_info = NULL;
506 drm_intel_gem_bo_busy(drm_intel_bo *bo)
508 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
509 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
510 struct drm_i915_gem_busy busy;
513 memset(&busy, 0, sizeof(busy));
514 busy.handle = bo_gem->gem_handle;
516 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
518 return (ret == 0 && busy.busy);
522 drm_intel_gem_bo_madvise_internal(drm_intel_bufmgr_gem *bufmgr_gem,
523 drm_intel_bo_gem *bo_gem, int state)
525 struct drm_i915_gem_madvise madv;
527 madv.handle = bo_gem->gem_handle;
530 drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
532 return madv.retained;
536 drm_intel_gem_bo_madvise(drm_intel_bo *bo, int madv)
538 return drm_intel_gem_bo_madvise_internal
539 ((drm_intel_bufmgr_gem *) bo->bufmgr,
540 (drm_intel_bo_gem *) bo,
544 /* drop the oldest entries that have been purged by the kernel */
546 drm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem *bufmgr_gem,
547 struct drm_intel_gem_bo_bucket *bucket)
549 while (!DRMLISTEMPTY(&bucket->head)) {
550 drm_intel_bo_gem *bo_gem;
552 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
553 bucket->head.next, head);
554 if (drm_intel_gem_bo_madvise_internal
555 (bufmgr_gem, bo_gem, I915_MADV_DONTNEED))
558 DRMLISTDEL(&bo_gem->head);
559 drm_intel_gem_bo_free(&bo_gem->bo);
563 static drm_intel_bo *
564 drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
568 uint32_t tiling_mode,
569 unsigned long stride)
571 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
572 drm_intel_bo_gem *bo_gem;
573 unsigned int page_size = getpagesize();
575 struct drm_intel_gem_bo_bucket *bucket;
576 int alloc_from_cache;
577 unsigned long bo_size;
580 if (flags & BO_ALLOC_FOR_RENDER)
583 /* Round the allocated size up to a power of two number of pages. */
584 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size);
586 /* If we don't have caching at this size, don't actually round the
589 if (bucket == NULL) {
591 if (bo_size < page_size)
594 bo_size = bucket->size;
597 pthread_mutex_lock(&bufmgr_gem->lock);
598 /* Get a buffer out of the cache if available */
600 alloc_from_cache = 0;
601 if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) {
603 /* Allocate new render-target BOs from the tail (MRU)
604 * of the list, as it will likely be hot in the GPU
605 * cache and in the aperture for us.
607 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
608 bucket->head.prev, head);
609 DRMLISTDEL(&bo_gem->head);
610 alloc_from_cache = 1;
612 /* For non-render-target BOs (where we're probably
613 * going to map it first thing in order to fill it
614 * with data), check if the last BO in the cache is
615 * unbusy, and only reuse in that case. Otherwise,
616 * allocating a new buffer is probably faster than
617 * waiting for the GPU to finish.
619 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
620 bucket->head.next, head);
621 if (!drm_intel_gem_bo_busy(&bo_gem->bo)) {
622 alloc_from_cache = 1;
623 DRMLISTDEL(&bo_gem->head);
627 if (alloc_from_cache) {
628 if (!drm_intel_gem_bo_madvise_internal
629 (bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) {
630 drm_intel_gem_bo_free(&bo_gem->bo);
631 drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem,
636 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
639 drm_intel_gem_bo_free(&bo_gem->bo);
644 pthread_mutex_unlock(&bufmgr_gem->lock);
646 if (!alloc_from_cache) {
647 struct drm_i915_gem_create create;
649 bo_gem = calloc(1, sizeof(*bo_gem));
653 bo_gem->bo.size = bo_size;
654 memset(&create, 0, sizeof(create));
655 create.size = bo_size;
657 ret = drmIoctl(bufmgr_gem->fd,
658 DRM_IOCTL_I915_GEM_CREATE,
660 bo_gem->gem_handle = create.handle;
661 bo_gem->bo.handle = bo_gem->gem_handle;
666 bo_gem->bo.bufmgr = bufmgr;
668 bo_gem->tiling_mode = I915_TILING_NONE;
669 bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
672 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
675 drm_intel_gem_bo_free(&bo_gem->bo);
681 atomic_set(&bo_gem->refcount, 1);
682 bo_gem->validate_index = -1;
683 bo_gem->reloc_tree_fences = 0;
684 bo_gem->used_as_reloc_target = 0;
685 bo_gem->has_error = 0;
686 bo_gem->reusable = 1;
688 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
690 DBG("bo_create: buf %d (%s) %ldb\n",
691 bo_gem->gem_handle, bo_gem->name, size);
696 static drm_intel_bo *
697 drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
700 unsigned int alignment)
702 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size,
704 I915_TILING_NONE, 0);
707 static drm_intel_bo *
708 drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr,
711 unsigned int alignment)
713 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0,
714 I915_TILING_NONE, 0);
717 static drm_intel_bo *
718 drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
719 int x, int y, int cpp, uint32_t *tiling_mode,
720 unsigned long *pitch, unsigned long flags)
722 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
723 unsigned long size, stride;
727 unsigned long aligned_y;
729 tiling = *tiling_mode;
731 /* If we're tiled, our allocations are in 8 or 32-row blocks,
732 * so failure to align our height means that we won't allocate
735 * If we're untiled, we still have to align to 2 rows high
736 * because the data port accesses 2x2 blocks even if the
737 * bottom row isn't to be rendered, so failure to align means
738 * we could walk off the end of the GTT and fault. This is
739 * documented on 965, and may be the case on older chipsets
740 * too so we try to be careful.
743 if (tiling == I915_TILING_NONE)
744 aligned_y = ALIGN(y, 2);
745 else if (tiling == I915_TILING_X)
746 aligned_y = ALIGN(y, 8);
747 else if (tiling == I915_TILING_Y)
748 aligned_y = ALIGN(y, 32);
751 stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, tiling_mode);
752 size = stride * aligned_y;
753 size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, tiling_mode);
754 } while (*tiling_mode != tiling);
757 if (tiling == I915_TILING_NONE)
760 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags,
765 * Returns a drm_intel_bo wrapping the given buffer object handle.
767 * This can be used when one application needs to pass a buffer object
771 drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
775 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
776 drm_intel_bo_gem *bo_gem;
778 struct drm_gem_open open_arg;
779 struct drm_i915_gem_get_tiling get_tiling;
781 bo_gem = calloc(1, sizeof(*bo_gem));
785 memset(&open_arg, 0, sizeof(open_arg));
786 open_arg.name = handle;
787 ret = drmIoctl(bufmgr_gem->fd,
791 DBG("Couldn't reference %s handle 0x%08x: %s\n",
792 name, handle, strerror(errno));
796 bo_gem->bo.size = open_arg.size;
797 bo_gem->bo.offset = 0;
798 bo_gem->bo.virtual = NULL;
799 bo_gem->bo.bufmgr = bufmgr;
801 atomic_set(&bo_gem->refcount, 1);
802 bo_gem->validate_index = -1;
803 bo_gem->gem_handle = open_arg.handle;
804 bo_gem->global_name = handle;
805 bo_gem->reusable = 0;
807 memset(&get_tiling, 0, sizeof(get_tiling));
808 get_tiling.handle = bo_gem->gem_handle;
809 ret = drmIoctl(bufmgr_gem->fd,
810 DRM_IOCTL_I915_GEM_GET_TILING,
813 drm_intel_gem_bo_unreference(&bo_gem->bo);
816 bo_gem->tiling_mode = get_tiling.tiling_mode;
817 bo_gem->swizzle_mode = get_tiling.swizzle_mode;
818 /* XXX stride is unknown */
819 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
821 DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
827 drm_intel_gem_bo_free(drm_intel_bo *bo)
829 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
830 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
831 struct drm_gem_close close;
834 if (bo_gem->mem_virtual)
835 munmap(bo_gem->mem_virtual, bo_gem->bo.size);
836 if (bo_gem->gtt_virtual)
837 munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
839 /* Close this object */
840 memset(&close, 0, sizeof(close));
841 close.handle = bo_gem->gem_handle;
842 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
844 DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
845 bo_gem->gem_handle, bo_gem->name, strerror(errno));
850 /** Frees all cached buffers significantly older than @time. */
852 drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
856 if (bufmgr_gem->time == time)
859 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
860 struct drm_intel_gem_bo_bucket *bucket =
861 &bufmgr_gem->cache_bucket[i];
863 while (!DRMLISTEMPTY(&bucket->head)) {
864 drm_intel_bo_gem *bo_gem;
866 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
867 bucket->head.next, head);
868 if (time - bo_gem->free_time <= 1)
871 DRMLISTDEL(&bo_gem->head);
873 drm_intel_gem_bo_free(&bo_gem->bo);
877 bufmgr_gem->time = time;
881 drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
883 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
884 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
885 struct drm_intel_gem_bo_bucket *bucket;
888 /* Unreference all the target buffers */
889 for (i = 0; i < bo_gem->reloc_count; i++) {
890 if (bo_gem->reloc_target_info[i].bo != bo) {
891 drm_intel_gem_bo_unreference_locked_timed(bo_gem->
892 reloc_target_info[i].bo,
896 bo_gem->reloc_count = 0;
897 bo_gem->used_as_reloc_target = 0;
899 DBG("bo_unreference final: %d (%s)\n",
900 bo_gem->gem_handle, bo_gem->name);
902 /* release memory associated with this object */
903 if (bo_gem->reloc_target_info) {
904 free(bo_gem->reloc_target_info);
905 bo_gem->reloc_target_info = NULL;
907 if (bo_gem->relocs) {
908 free(bo_gem->relocs);
909 bo_gem->relocs = NULL;
912 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
913 /* Put the buffer into our internal cache for reuse if we can. */
914 if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
915 drm_intel_gem_bo_madvise_internal(bufmgr_gem, bo_gem,
916 I915_MADV_DONTNEED)) {
917 bo_gem->free_time = time;
920 bo_gem->validate_index = -1;
922 DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
924 drm_intel_gem_bo_free(bo);
928 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
931 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
933 assert(atomic_read(&bo_gem->refcount) > 0);
934 if (atomic_dec_and_test(&bo_gem->refcount))
935 drm_intel_gem_bo_unreference_final(bo, time);
938 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
940 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
942 assert(atomic_read(&bo_gem->refcount) > 0);
943 if (atomic_dec_and_test(&bo_gem->refcount)) {
944 drm_intel_bufmgr_gem *bufmgr_gem =
945 (drm_intel_bufmgr_gem *) bo->bufmgr;
946 struct timespec time;
948 clock_gettime(CLOCK_MONOTONIC, &time);
950 pthread_mutex_lock(&bufmgr_gem->lock);
951 drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
952 drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
953 pthread_mutex_unlock(&bufmgr_gem->lock);
957 static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
959 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
960 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
961 struct drm_i915_gem_set_domain set_domain;
964 pthread_mutex_lock(&bufmgr_gem->lock);
966 /* Allow recursive mapping. Mesa may recursively map buffers with
967 * nested display loops.
969 if (!bo_gem->mem_virtual) {
970 struct drm_i915_gem_mmap mmap_arg;
972 DBG("bo_map: %d (%s)\n", bo_gem->gem_handle, bo_gem->name);
974 memset(&mmap_arg, 0, sizeof(mmap_arg));
975 mmap_arg.handle = bo_gem->gem_handle;
977 mmap_arg.size = bo->size;
978 ret = drmIoctl(bufmgr_gem->fd,
979 DRM_IOCTL_I915_GEM_MMAP,
983 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
984 __FILE__, __LINE__, bo_gem->gem_handle,
985 bo_gem->name, strerror(errno));
986 pthread_mutex_unlock(&bufmgr_gem->lock);
989 bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
991 DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
992 bo_gem->mem_virtual);
993 bo->virtual = bo_gem->mem_virtual;
995 set_domain.handle = bo_gem->gem_handle;
996 set_domain.read_domains = I915_GEM_DOMAIN_CPU;
998 set_domain.write_domain = I915_GEM_DOMAIN_CPU;
1000 set_domain.write_domain = 0;
1001 ret = drmIoctl(bufmgr_gem->fd,
1002 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1005 DBG("%s:%d: Error setting to CPU domain %d: %s\n",
1006 __FILE__, __LINE__, bo_gem->gem_handle,
1010 pthread_mutex_unlock(&bufmgr_gem->lock);
1015 int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
1017 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1018 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1019 struct drm_i915_gem_set_domain set_domain;
1022 pthread_mutex_lock(&bufmgr_gem->lock);
1024 /* Get a mapping of the buffer if we haven't before. */
1025 if (bo_gem->gtt_virtual == NULL) {
1026 struct drm_i915_gem_mmap_gtt mmap_arg;
1028 DBG("bo_map_gtt: mmap %d (%s)\n", bo_gem->gem_handle,
1031 memset(&mmap_arg, 0, sizeof(mmap_arg));
1032 mmap_arg.handle = bo_gem->gem_handle;
1034 /* Get the fake offset back... */
1035 ret = drmIoctl(bufmgr_gem->fd,
1036 DRM_IOCTL_I915_GEM_MMAP_GTT,
1040 DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
1042 bo_gem->gem_handle, bo_gem->name,
1044 pthread_mutex_unlock(&bufmgr_gem->lock);
1049 bo_gem->gtt_virtual = mmap(0, bo->size, PROT_READ | PROT_WRITE,
1050 MAP_SHARED, bufmgr_gem->fd,
1052 if (bo_gem->gtt_virtual == MAP_FAILED) {
1053 bo_gem->gtt_virtual = NULL;
1055 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1057 bo_gem->gem_handle, bo_gem->name,
1059 pthread_mutex_unlock(&bufmgr_gem->lock);
1064 bo->virtual = bo_gem->gtt_virtual;
1066 DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1067 bo_gem->gtt_virtual);
1069 /* Now move it to the GTT domain so that the CPU caches are flushed */
1070 set_domain.handle = bo_gem->gem_handle;
1071 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1072 set_domain.write_domain = I915_GEM_DOMAIN_GTT;
1073 ret = drmIoctl(bufmgr_gem->fd,
1074 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1077 DBG("%s:%d: Error setting domain %d: %s\n",
1078 __FILE__, __LINE__, bo_gem->gem_handle,
1082 pthread_mutex_unlock(&bufmgr_gem->lock);
1087 int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
1089 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1095 pthread_mutex_lock(&bufmgr_gem->lock);
1097 pthread_mutex_unlock(&bufmgr_gem->lock);
1102 static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
1104 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1105 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1106 struct drm_i915_gem_sw_finish sw_finish;
1112 pthread_mutex_lock(&bufmgr_gem->lock);
1114 /* Cause a flush to happen if the buffer's pinned for scanout, so the
1115 * results show up in a timely manner.
1117 sw_finish.handle = bo_gem->gem_handle;
1118 ret = drmIoctl(bufmgr_gem->fd,
1119 DRM_IOCTL_I915_GEM_SW_FINISH,
1121 ret = ret == -1 ? -errno : 0;
1124 pthread_mutex_unlock(&bufmgr_gem->lock);
1130 drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
1131 unsigned long size, const void *data)
1133 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1134 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1135 struct drm_i915_gem_pwrite pwrite;
1138 memset(&pwrite, 0, sizeof(pwrite));
1139 pwrite.handle = bo_gem->gem_handle;
1140 pwrite.offset = offset;
1142 pwrite.data_ptr = (uint64_t) (uintptr_t) data;
1143 ret = drmIoctl(bufmgr_gem->fd,
1144 DRM_IOCTL_I915_GEM_PWRITE,
1148 DBG("%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
1149 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1150 (int)size, strerror(errno));
1157 drm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
1159 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1160 struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id;
1163 get_pipe_from_crtc_id.crtc_id = crtc_id;
1164 ret = drmIoctl(bufmgr_gem->fd,
1165 DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID,
1166 &get_pipe_from_crtc_id);
1168 /* We return -1 here to signal that we don't
1169 * know which pipe is associated with this crtc.
1170 * This lets the caller know that this information
1171 * isn't available; using the wrong pipe for
1172 * vblank waiting can cause the chipset to lock up
1177 return get_pipe_from_crtc_id.pipe;
1181 drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
1182 unsigned long size, void *data)
1184 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1185 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1186 struct drm_i915_gem_pread pread;
1189 memset(&pread, 0, sizeof(pread));
1190 pread.handle = bo_gem->gem_handle;
1191 pread.offset = offset;
1193 pread.data_ptr = (uint64_t) (uintptr_t) data;
1194 ret = drmIoctl(bufmgr_gem->fd,
1195 DRM_IOCTL_I915_GEM_PREAD,
1199 DBG("%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
1200 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1201 (int)size, strerror(errno));
1207 /** Waits for all GPU rendering to the object to have completed. */
1209 drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo)
1211 drm_intel_gem_bo_start_gtt_access(bo, 0);
1215 * Sets the object to the GTT read and possibly write domain, used by the X
1216 * 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt().
1218 * In combination with drm_intel_gem_bo_pin() and manual fence management, we
1219 * can do tiled pixmaps this way.
1222 drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
1224 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1225 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1226 struct drm_i915_gem_set_domain set_domain;
1229 set_domain.handle = bo_gem->gem_handle;
1230 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1231 set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0;
1232 ret = drmIoctl(bufmgr_gem->fd,
1233 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1236 DBG("%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
1237 __FILE__, __LINE__, bo_gem->gem_handle,
1238 set_domain.read_domains, set_domain.write_domain,
1244 drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
1246 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1249 free(bufmgr_gem->exec2_objects);
1250 free(bufmgr_gem->exec_objects);
1251 free(bufmgr_gem->exec_bos);
1253 pthread_mutex_destroy(&bufmgr_gem->lock);
1255 /* Free any cached buffer objects we were going to reuse */
1256 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
1257 struct drm_intel_gem_bo_bucket *bucket =
1258 &bufmgr_gem->cache_bucket[i];
1259 drm_intel_bo_gem *bo_gem;
1261 while (!DRMLISTEMPTY(&bucket->head)) {
1262 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1263 bucket->head.next, head);
1264 DRMLISTDEL(&bo_gem->head);
1266 drm_intel_gem_bo_free(&bo_gem->bo);
1274 * Adds the target buffer to the validation list and adds the relocation
1275 * to the reloc_buffer's relocation list.
1277 * The relocation entry at the given offset must already contain the
1278 * precomputed relocation value, because the kernel will optimize out
1279 * the relocation entry write when the buffer hasn't moved from the
1280 * last known offset in target_bo.
1283 do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1284 drm_intel_bo *target_bo, uint32_t target_offset,
1285 uint32_t read_domains, uint32_t write_domain,
1288 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1289 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1290 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
1292 if (bo_gem->has_error)
1295 if (target_bo_gem->has_error) {
1296 bo_gem->has_error = 1;
1300 if (target_bo_gem->tiling_mode == I915_TILING_NONE)
1303 /* We never use HW fences for rendering on 965+ */
1304 if (bufmgr_gem->gen >= 4)
1307 /* Create a new relocation list if needed */
1308 if (bo_gem->relocs == NULL && drm_intel_setup_reloc_list(bo))
1311 /* Check overflow */
1312 assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
1315 assert(offset <= bo->size - 4);
1316 assert((write_domain & (write_domain - 1)) == 0);
1318 /* Make sure that we're not adding a reloc to something whose size has
1319 * already been accounted for.
1321 assert(!bo_gem->used_as_reloc_target);
1322 if (target_bo_gem != bo_gem) {
1323 target_bo_gem->used_as_reloc_target = 1;
1324 bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
1326 /* An object needing a fence is a tiled buffer, so it won't have
1327 * relocs to other buffers.
1330 target_bo_gem->reloc_tree_fences = 1;
1331 bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
1333 bo_gem->relocs[bo_gem->reloc_count].offset = offset;
1334 bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
1335 bo_gem->relocs[bo_gem->reloc_count].target_handle =
1336 target_bo_gem->gem_handle;
1337 bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
1338 bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
1339 bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset;
1341 bo_gem->reloc_target_info[bo_gem->reloc_count].bo = target_bo;
1342 if (target_bo != bo)
1343 drm_intel_gem_bo_reference(target_bo);
1345 bo_gem->reloc_target_info[bo_gem->reloc_count].flags =
1346 DRM_INTEL_RELOC_FENCE;
1348 bo_gem->reloc_target_info[bo_gem->reloc_count].flags = 0;
1350 bo_gem->reloc_count++;
1356 drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1357 drm_intel_bo *target_bo, uint32_t target_offset,
1358 uint32_t read_domains, uint32_t write_domain)
1360 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
1362 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
1363 read_domains, write_domain,
1364 !bufmgr_gem->fenced_relocs);
1368 drm_intel_gem_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
1369 drm_intel_bo *target_bo,
1370 uint32_t target_offset,
1371 uint32_t read_domains, uint32_t write_domain)
1373 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
1374 read_domains, write_domain, 1);
1378 * Walk the tree of relocations rooted at BO and accumulate the list of
1379 * validations to be performed and update the relocation buffers with
1380 * index values into the validation list.
1383 drm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
1385 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1388 if (bo_gem->relocs == NULL)
1391 for (i = 0; i < bo_gem->reloc_count; i++) {
1392 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
1394 if (target_bo == bo)
1397 /* Continue walking the tree depth-first. */
1398 drm_intel_gem_bo_process_reloc(target_bo);
1400 /* Add the target to the validate list */
1401 drm_intel_add_validate_buffer(target_bo);
1406 drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo)
1408 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1411 if (bo_gem->relocs == NULL)
1414 for (i = 0; i < bo_gem->reloc_count; i++) {
1415 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
1418 if (target_bo == bo)
1421 /* Continue walking the tree depth-first. */
1422 drm_intel_gem_bo_process_reloc2(target_bo);
1424 need_fence = (bo_gem->reloc_target_info[i].flags &
1425 DRM_INTEL_RELOC_FENCE);
1427 /* Add the target to the validate list */
1428 drm_intel_add_validate_buffer2(target_bo, need_fence);
1434 drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem)
1438 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1439 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1440 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1442 /* Update the buffer offset */
1443 if (bufmgr_gem->exec_objects[i].offset != bo->offset) {
1444 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
1445 bo_gem->gem_handle, bo_gem->name, bo->offset,
1446 (unsigned long long)bufmgr_gem->exec_objects[i].
1448 bo->offset = bufmgr_gem->exec_objects[i].offset;
1454 drm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem)
1458 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1459 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1460 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1462 /* Update the buffer offset */
1463 if (bufmgr_gem->exec2_objects[i].offset != bo->offset) {
1464 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
1465 bo_gem->gem_handle, bo_gem->name, bo->offset,
1466 (unsigned long long)bufmgr_gem->exec2_objects[i].offset);
1467 bo->offset = bufmgr_gem->exec2_objects[i].offset;
1473 drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
1474 drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
1476 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1477 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1478 struct drm_i915_gem_execbuffer execbuf;
1481 if (bo_gem->has_error)
1484 pthread_mutex_lock(&bufmgr_gem->lock);
1485 /* Update indices and set up the validate list. */
1486 drm_intel_gem_bo_process_reloc(bo);
1488 /* Add the batch buffer to the validation list. There are no
1489 * relocations pointing to it.
1491 drm_intel_add_validate_buffer(bo);
1493 execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects;
1494 execbuf.buffer_count = bufmgr_gem->exec_count;
1495 execbuf.batch_start_offset = 0;
1496 execbuf.batch_len = used;
1497 execbuf.cliprects_ptr = (uintptr_t) cliprects;
1498 execbuf.num_cliprects = num_cliprects;
1502 ret = drmIoctl(bufmgr_gem->fd,
1503 DRM_IOCTL_I915_GEM_EXECBUFFER,
1507 if (errno == ENOSPC) {
1508 DBG("Execbuffer fails to pin. "
1509 "Estimate: %u. Actual: %u. Available: %u\n",
1510 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
1513 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
1516 (unsigned int)bufmgr_gem->gtt_size);
1519 drm_intel_update_buffer_offsets(bufmgr_gem);
1521 if (bufmgr_gem->bufmgr.debug)
1522 drm_intel_gem_dump_validation_list(bufmgr_gem);
1524 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1525 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1526 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1528 /* Disconnect the buffer from the validate list */
1529 bo_gem->validate_index = -1;
1530 bufmgr_gem->exec_bos[i] = NULL;
1532 bufmgr_gem->exec_count = 0;
1533 pthread_mutex_unlock(&bufmgr_gem->lock);
1539 drm_intel_gem_bo_mrb_exec2(drm_intel_bo *bo, int used,
1540 drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
1543 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
1544 struct drm_i915_gem_execbuffer2 execbuf;
1547 switch (ring_flag) {
1551 if (!bufmgr_gem->has_blt)
1555 if (!bufmgr_gem->has_bsd)
1558 case I915_EXEC_RENDER:
1559 case I915_EXEC_DEFAULT:
1563 pthread_mutex_lock(&bufmgr_gem->lock);
1564 /* Update indices and set up the validate list. */
1565 drm_intel_gem_bo_process_reloc2(bo);
1567 /* Add the batch buffer to the validation list. There are no relocations
1570 drm_intel_add_validate_buffer2(bo, 0);
1572 execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec2_objects;
1573 execbuf.buffer_count = bufmgr_gem->exec_count;
1574 execbuf.batch_start_offset = 0;
1575 execbuf.batch_len = used;
1576 execbuf.cliprects_ptr = (uintptr_t)cliprects;
1577 execbuf.num_cliprects = num_cliprects;
1580 execbuf.flags = ring_flag;
1584 ret = drmIoctl(bufmgr_gem->fd,
1585 DRM_IOCTL_I915_GEM_EXECBUFFER2,
1589 if (ret == -ENOSPC) {
1590 DBG("Execbuffer fails to pin. "
1591 "Estimate: %u. Actual: %u. Available: %u\n",
1592 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
1593 bufmgr_gem->exec_count),
1594 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
1595 bufmgr_gem->exec_count),
1596 (unsigned int) bufmgr_gem->gtt_size);
1599 drm_intel_update_buffer_offsets2(bufmgr_gem);
1601 if (bufmgr_gem->bufmgr.debug)
1602 drm_intel_gem_dump_validation_list(bufmgr_gem);
1604 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1605 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1606 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1608 /* Disconnect the buffer from the validate list */
1609 bo_gem->validate_index = -1;
1610 bufmgr_gem->exec_bos[i] = NULL;
1612 bufmgr_gem->exec_count = 0;
1613 pthread_mutex_unlock(&bufmgr_gem->lock);
1619 drm_intel_gem_bo_exec2(drm_intel_bo *bo, int used,
1620 drm_clip_rect_t *cliprects, int num_cliprects,
1623 return drm_intel_gem_bo_mrb_exec2(bo, used,
1624 cliprects, num_cliprects, DR4,
1629 drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
1631 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1632 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1633 struct drm_i915_gem_pin pin;
1636 memset(&pin, 0, sizeof(pin));
1637 pin.handle = bo_gem->gem_handle;
1638 pin.alignment = alignment;
1640 ret = drmIoctl(bufmgr_gem->fd,
1641 DRM_IOCTL_I915_GEM_PIN,
1646 bo->offset = pin.offset;
1651 drm_intel_gem_bo_unpin(drm_intel_bo *bo)
1653 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1654 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1655 struct drm_i915_gem_unpin unpin;
1658 memset(&unpin, 0, sizeof(unpin));
1659 unpin.handle = bo_gem->gem_handle;
1661 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
1669 drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
1670 uint32_t tiling_mode,
1673 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1674 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1675 struct drm_i915_gem_set_tiling set_tiling;
1678 if (bo_gem->global_name == 0 &&
1679 tiling_mode == bo_gem->tiling_mode &&
1680 stride == bo_gem->stride)
1683 memset(&set_tiling, 0, sizeof(set_tiling));
1685 /* set_tiling is slightly broken and overwrites the
1686 * input on the error path, so we have to open code
1689 set_tiling.handle = bo_gem->gem_handle;
1690 set_tiling.tiling_mode = tiling_mode;
1691 set_tiling.stride = stride;
1693 ret = ioctl(bufmgr_gem->fd,
1694 DRM_IOCTL_I915_GEM_SET_TILING,
1696 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
1700 bo_gem->tiling_mode = set_tiling.tiling_mode;
1701 bo_gem->swizzle_mode = set_tiling.swizzle_mode;
1702 bo_gem->stride = set_tiling.stride;
1707 drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
1710 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1711 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1714 /* Linear buffers have no stride. By ensuring that we only ever use
1715 * stride 0 with linear buffers, we simplify our code.
1717 if (*tiling_mode == I915_TILING_NONE)
1720 ret = drm_intel_gem_bo_set_tiling_internal(bo, *tiling_mode, stride);
1722 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
1724 *tiling_mode = bo_gem->tiling_mode;
1729 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
1730 uint32_t * swizzle_mode)
1732 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1734 *tiling_mode = bo_gem->tiling_mode;
1735 *swizzle_mode = bo_gem->swizzle_mode;
1740 drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
1742 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1743 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1744 struct drm_gem_flink flink;
1747 if (!bo_gem->global_name) {
1748 memset(&flink, 0, sizeof(flink));
1749 flink.handle = bo_gem->gem_handle;
1751 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink);
1754 bo_gem->global_name = flink.name;
1755 bo_gem->reusable = 0;
1758 *name = bo_gem->global_name;
1763 * Enables unlimited caching of buffer objects for reuse.
1765 * This is potentially very memory expensive, as the cache at each bucket
1766 * size is only bounded by how many buffers of that size we've managed to have
1767 * in flight at once.
1770 drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
1772 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1774 bufmgr_gem->bo_reuse = 1;
1778 * Enable use of fenced reloc type.
1780 * New code should enable this to avoid unnecessary fence register
1781 * allocation. If this option is not enabled, all relocs will have fence
1782 * register allocated.
1785 drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr)
1787 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
1789 if (bufmgr_gem->bufmgr.bo_exec == drm_intel_gem_bo_exec2)
1790 bufmgr_gem->fenced_relocs = 1;
1794 * Return the additional aperture space required by the tree of buffer objects
1798 drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
1800 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1804 if (bo == NULL || bo_gem->included_in_check_aperture)
1808 bo_gem->included_in_check_aperture = 1;
1810 for (i = 0; i < bo_gem->reloc_count; i++)
1812 drm_intel_gem_bo_get_aperture_space(bo_gem->
1813 reloc_target_info[i].bo);
1819 * Count the number of buffers in this list that need a fence reg
1821 * If the count is greater than the number of available regs, we'll have
1822 * to ask the caller to resubmit a batch with fewer tiled buffers.
1824 * This function over-counts if the same buffer is used multiple times.
1827 drm_intel_gem_total_fences(drm_intel_bo ** bo_array, int count)
1830 unsigned int total = 0;
1832 for (i = 0; i < count; i++) {
1833 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
1838 total += bo_gem->reloc_tree_fences;
1844 * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready
1845 * for the next drm_intel_bufmgr_check_aperture_space() call.
1848 drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo)
1850 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1853 if (bo == NULL || !bo_gem->included_in_check_aperture)
1856 bo_gem->included_in_check_aperture = 0;
1858 for (i = 0; i < bo_gem->reloc_count; i++)
1859 drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->
1860 reloc_target_info[i].bo);
1864 * Return a conservative estimate for the amount of aperture required
1865 * for a collection of buffers. This may double-count some buffers.
1868 drm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count)
1871 unsigned int total = 0;
1873 for (i = 0; i < count; i++) {
1874 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
1876 total += bo_gem->reloc_tree_size;
1882 * Return the amount of aperture needed for a collection of buffers.
1883 * This avoids double counting any buffers, at the cost of looking
1884 * at every buffer in the set.
1887 drm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count)
1890 unsigned int total = 0;
1892 for (i = 0; i < count; i++) {
1893 total += drm_intel_gem_bo_get_aperture_space(bo_array[i]);
1894 /* For the first buffer object in the array, we get an
1895 * accurate count back for its reloc_tree size (since nothing
1896 * had been flagged as being counted yet). We can save that
1897 * value out as a more conservative reloc_tree_size that
1898 * avoids double-counting target buffers. Since the first
1899 * buffer happens to usually be the batch buffer in our
1900 * callers, this can pull us back from doing the tree
1901 * walk on every new batch emit.
1904 drm_intel_bo_gem *bo_gem =
1905 (drm_intel_bo_gem *) bo_array[i];
1906 bo_gem->reloc_tree_size = total;
1910 for (i = 0; i < count; i++)
1911 drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]);
1916 * Return -1 if the batchbuffer should be flushed before attempting to
1917 * emit rendering referencing the buffers pointed to by bo_array.
1919 * This is required because if we try to emit a batchbuffer with relocations
1920 * to a tree of buffers that won't simultaneously fit in the aperture,
1921 * the rendering will return an error at a point where the software is not
1922 * prepared to recover from it.
1924 * However, we also want to emit the batchbuffer significantly before we reach
1925 * the limit, as a series of batchbuffers each of which references buffers
1926 * covering almost all of the aperture means that at each emit we end up
1927 * waiting to evict a buffer from the last rendering, and we get synchronous
1928 * performance. By emitting smaller batchbuffers, we eat some CPU overhead to
1929 * get better parallelism.
1932 drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
1934 drm_intel_bufmgr_gem *bufmgr_gem =
1935 (drm_intel_bufmgr_gem *) bo_array[0]->bufmgr;
1936 unsigned int total = 0;
1937 unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
1940 /* Check for fence reg constraints if necessary */
1941 if (bufmgr_gem->available_fences) {
1942 total_fences = drm_intel_gem_total_fences(bo_array, count);
1943 if (total_fences > bufmgr_gem->available_fences)
1947 total = drm_intel_gem_estimate_batch_space(bo_array, count);
1949 if (total > threshold)
1950 total = drm_intel_gem_compute_batch_space(bo_array, count);
1952 if (total > threshold) {
1953 DBG("check_space: overflowed available aperture, "
1955 total / 1024, (int)bufmgr_gem->gtt_size / 1024);
1958 DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024,
1959 (int)bufmgr_gem->gtt_size / 1024);
1965 * Disable buffer reuse for objects which are shared with the kernel
1966 * as scanout buffers
1969 drm_intel_gem_bo_disable_reuse(drm_intel_bo *bo)
1971 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1973 bo_gem->reusable = 0;
1978 drm_intel_gem_bo_is_reusable(drm_intel_bo *bo)
1980 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1982 return bo_gem->reusable;
1986 _drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
1988 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1991 for (i = 0; i < bo_gem->reloc_count; i++) {
1992 if (bo_gem->reloc_target_info[i].bo == target_bo)
1994 if (bo == bo_gem->reloc_target_info[i].bo)
1996 if (_drm_intel_gem_bo_references(bo_gem->reloc_target_info[i].bo,
2004 /** Return true if target_bo is referenced by bo's relocation tree. */
2006 drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
2008 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
2010 if (bo == NULL || target_bo == NULL)
2012 if (target_bo_gem->used_as_reloc_target)
2013 return _drm_intel_gem_bo_references(bo, target_bo);
2018 add_bucket(drm_intel_bufmgr_gem *bufmgr_gem, int size)
2020 unsigned int i = bufmgr_gem->num_buckets;
2022 assert(i < ARRAY_SIZE(bufmgr_gem->cache_bucket));
2024 DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head);
2025 bufmgr_gem->cache_bucket[i].size = size;
2026 bufmgr_gem->num_buckets++;
2030 init_cache_buckets(drm_intel_bufmgr_gem *bufmgr_gem)
2032 unsigned long size, cache_max_size = 64 * 1024 * 1024;
2034 /* OK, so power of two buckets was too wasteful of memory.
2035 * Give 3 other sizes between each power of two, to hopefully
2036 * cover things accurately enough. (The alternative is
2037 * probably to just go for exact matching of sizes, and assume
2038 * that for things like composited window resize the tiled
2039 * width/height alignment and rounding of sizes to pages will
2040 * get us useful cache hit rates anyway)
2042 add_bucket(bufmgr_gem, 4096);
2043 add_bucket(bufmgr_gem, 4096 * 2);
2044 add_bucket(bufmgr_gem, 4096 * 3);
2046 /* Initialize the linked lists for BO reuse cache. */
2047 for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
2048 add_bucket(bufmgr_gem, size);
2050 add_bucket(bufmgr_gem, size + size * 1 / 4);
2051 add_bucket(bufmgr_gem, size + size * 2 / 4);
2052 add_bucket(bufmgr_gem, size + size * 3 / 4);
2057 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
2058 * and manage map buffer objections.
2060 * \param fd File descriptor of the opened DRM device.
2063 drm_intel_bufmgr_gem_init(int fd, int batch_size)
2065 drm_intel_bufmgr_gem *bufmgr_gem;
2066 struct drm_i915_gem_get_aperture aperture;
2067 drm_i915_getparam_t gp;
2071 bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
2072 if (bufmgr_gem == NULL)
2075 bufmgr_gem->fd = fd;
2077 if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
2082 ret = drmIoctl(bufmgr_gem->fd,
2083 DRM_IOCTL_I915_GEM_GET_APERTURE,
2087 bufmgr_gem->gtt_size = aperture.aper_available_size;
2089 fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
2091 bufmgr_gem->gtt_size = 128 * 1024 * 1024;
2092 fprintf(stderr, "Assuming %dkB available aperture size.\n"
2093 "May lead to reduced performance or incorrect "
2095 (int)bufmgr_gem->gtt_size / 1024);
2098 gp.param = I915_PARAM_CHIPSET_ID;
2099 gp.value = &bufmgr_gem->pci_device;
2100 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2102 fprintf(stderr, "get chip id failed: %d [%d]\n", ret, errno);
2103 fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
2106 if (IS_GEN2(bufmgr_gem))
2107 bufmgr_gem->gen = 2;
2108 else if (IS_GEN3(bufmgr_gem))
2109 bufmgr_gem->gen = 3;
2110 else if (IS_GEN4(bufmgr_gem))
2111 bufmgr_gem->gen = 4;
2113 bufmgr_gem->gen = 6;
2115 gp.param = I915_PARAM_HAS_EXECBUF2;
2116 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2120 gp.param = I915_PARAM_HAS_BSD;
2121 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2122 bufmgr_gem->has_bsd = ret == 0;
2124 gp.param = I915_PARAM_HAS_BLT;
2125 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2126 bufmgr_gem->has_blt = ret == 0;
2128 gp.param = I915_PARAM_HAS_RELAXED_FENCING;
2129 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2130 bufmgr_gem->has_relaxed_fencing = ret == 0;
2132 if (bufmgr_gem->gen < 4) {
2133 gp.param = I915_PARAM_NUM_FENCES_AVAIL;
2134 gp.value = &bufmgr_gem->available_fences;
2135 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2137 fprintf(stderr, "get fences failed: %d [%d]\n", ret,
2139 fprintf(stderr, "param: %d, val: %d\n", gp.param,
2141 bufmgr_gem->available_fences = 0;
2143 /* XXX The kernel reports the total number of fences,
2144 * including any that may be pinned.
2146 * We presume that there will be at least one pinned
2147 * fence for the scanout buffer, but there may be more
2148 * than one scanout and the user may be manually
2149 * pinning buffers. Let's move to execbuffer2 and
2150 * thereby forget the insanity of using fences...
2152 bufmgr_gem->available_fences -= 2;
2153 if (bufmgr_gem->available_fences < 0)
2154 bufmgr_gem->available_fences = 0;
2158 /* Let's go with one relocation per every 2 dwords (but round down a bit
2159 * since a power of two will mean an extra page allocation for the reloc
2162 * Every 4 was too few for the blender benchmark.
2164 bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
2166 bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
2167 bufmgr_gem->bufmgr.bo_alloc_for_render =
2168 drm_intel_gem_bo_alloc_for_render;
2169 bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled;
2170 bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
2171 bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
2172 bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
2173 bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap;
2174 bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata;
2175 bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata;
2176 bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering;
2177 bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc;
2178 bufmgr_gem->bufmgr.bo_emit_reloc_fence = drm_intel_gem_bo_emit_reloc_fence;
2179 bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin;
2180 bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin;
2181 bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
2182 bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
2183 bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
2184 /* Use the new one if available */
2186 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec2;
2187 bufmgr_gem->bufmgr.bo_mrb_exec = drm_intel_gem_bo_mrb_exec2;
2189 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
2190 bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
2191 bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
2192 bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy;
2193 bufmgr_gem->bufmgr.debug = 0;
2194 bufmgr_gem->bufmgr.check_aperture_space =
2195 drm_intel_gem_check_aperture_space;
2196 bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse;
2197 bufmgr_gem->bufmgr.bo_is_reusable = drm_intel_gem_bo_is_reusable;
2198 bufmgr_gem->bufmgr.get_pipe_from_crtc_id =
2199 drm_intel_gem_get_pipe_from_crtc_id;
2200 bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references;
2202 init_cache_buckets(bufmgr_gem);
2204 return &bufmgr_gem->bufmgr;