1 /**************************************************************************
3 * Copyright © 2007 Red Hat Inc.
4 * Copyright © 2007 Intel Corporation
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
29 **************************************************************************/
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
42 #include <xf86atomic.h>
50 #include <sys/ioctl.h>
53 #include <sys/types.h>
56 #include "libdrm_lists.h"
57 #include "intel_bufmgr.h"
58 #include "intel_bufmgr_priv.h"
59 #include "intel_chipset.h"
64 #define DBG(...) do { \
65 if (bufmgr_gem->bufmgr.debug) \
66 fprintf(stderr, __VA_ARGS__); \
69 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
71 typedef struct _drm_intel_bo_gem drm_intel_bo_gem;
73 struct drm_intel_gem_bo_bucket {
78 typedef struct _drm_intel_bufmgr_gem {
79 drm_intel_bufmgr bufmgr;
87 struct drm_i915_gem_exec_object *exec_objects;
88 struct drm_i915_gem_exec_object2 *exec2_objects;
89 drm_intel_bo **exec_bos;
93 /** Array of lists of cached gem objects of power-of-two sizes */
94 struct drm_intel_gem_bo_bucket cache_bucket[14 * 4];
101 int available_fences;
104 unsigned int has_bsd : 1;
105 unsigned int has_blt : 1;
106 unsigned int has_relaxed_fencing : 1;
107 unsigned int bo_reuse : 1;
109 } drm_intel_bufmgr_gem;
111 #define DRM_INTEL_RELOC_FENCE (1<<0)
113 typedef struct _drm_intel_reloc_target_info {
116 } drm_intel_reloc_target;
118 struct _drm_intel_bo_gem {
126 * Kenel-assigned global name for this object
128 unsigned int global_name;
129 drmMMListHead name_list;
132 * Index of the buffer within the validation list while preparing a
133 * batchbuffer execution.
138 * Current tiling mode
140 uint32_t tiling_mode;
141 uint32_t swizzle_mode;
142 unsigned long stride;
146 /** Array passed to the DRM containing relocation information. */
147 struct drm_i915_gem_relocation_entry *relocs;
149 * Array of info structs corresponding to relocs[i].target_handle etc
151 drm_intel_reloc_target *reloc_target_info;
152 /** Number of entries in relocs */
154 /** Mapped address for the buffer, saved across map/unmap cycles */
156 /** GTT virtual address for the buffer, saved across map/unmap cycles */
163 * Boolean of whether this BO and its children have been included in
164 * the current drm_intel_bufmgr_check_aperture_space() total.
166 char included_in_check_aperture;
169 * Boolean of whether this buffer has been used as a relocation
170 * target and had its size accounted for, and thus can't have any
171 * further relocations added to it.
173 char used_as_reloc_target;
176 * Boolean of whether we have encountered an error whilst building the relocation tree.
181 * Boolean of whether this buffer can be re-used
186 * Size in bytes of this buffer and its relocation descendents.
188 * Used to avoid costly tree walking in
189 * drm_intel_bufmgr_check_aperture in the common case.
194 * Number of potential fence registers required by this buffer and its
197 int reloc_tree_fences;
201 drm_intel_gem_estimate_batch_space(drm_intel_bo ** bo_array, int count);
204 drm_intel_gem_compute_batch_space(drm_intel_bo ** bo_array, int count);
207 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
208 uint32_t * swizzle_mode);
211 drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
212 uint32_t tiling_mode,
215 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
218 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo);
220 static void drm_intel_gem_bo_free(drm_intel_bo *bo);
223 drm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size,
224 uint32_t *tiling_mode)
226 unsigned long min_size, max_size;
229 if (*tiling_mode == I915_TILING_NONE)
232 /* 965+ just need multiples of page size for tiling */
233 if (bufmgr_gem->gen >= 4)
234 return ROUND_UP_TO(size, 4096);
236 /* Older chips need powers of two, of at least 512k or 1M */
237 if (bufmgr_gem->gen == 3) {
238 min_size = 1024*1024;
239 max_size = 128*1024*1024;
242 max_size = 64*1024*1024;
245 if (size > max_size) {
246 *tiling_mode = I915_TILING_NONE;
250 /* Do we need to allocate every page for the fence? */
251 if (bufmgr_gem->has_relaxed_fencing)
252 return ROUND_UP_TO(size, 4096);
254 for (i = min_size; i < size; i <<= 1)
261 * Round a given pitch up to the minimum required for X tiling on a
262 * given chip. We use 512 as the minimum to allow for a later tiling
266 drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem,
267 unsigned long pitch, uint32_t *tiling_mode)
269 unsigned long tile_width;
272 /* If untiled, then just align it so that we can do rendering
273 * to it with the 3D engine.
275 if (*tiling_mode == I915_TILING_NONE)
276 return ALIGN(pitch, 64);
278 if (*tiling_mode == I915_TILING_X
279 || (IS_915(bufmgr_gem) && *tiling_mode == I915_TILING_Y))
284 /* 965 is flexible */
285 if (bufmgr_gem->gen >= 4)
286 return ROUND_UP_TO(pitch, tile_width);
288 /* The older hardware has a maximum pitch of 8192 with tiled
289 * surfaces, so fallback to untiled if it's too large.
292 *tiling_mode = I915_TILING_NONE;
293 return ALIGN(pitch, 64);
296 /* Pre-965 needs power of two tile width */
297 for (i = tile_width; i < pitch; i <<= 1)
303 static struct drm_intel_gem_bo_bucket *
304 drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
309 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
310 struct drm_intel_gem_bo_bucket *bucket =
311 &bufmgr_gem->cache_bucket[i];
312 if (bucket->size >= size) {
321 drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
325 for (i = 0; i < bufmgr_gem->exec_count; i++) {
326 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
327 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
329 if (bo_gem->relocs == NULL) {
330 DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle,
335 for (j = 0; j < bo_gem->reloc_count; j++) {
336 drm_intel_bo *target_bo = bo_gem->reloc_target_info[j].bo;
337 drm_intel_bo_gem *target_gem =
338 (drm_intel_bo_gem *) target_bo;
340 DBG("%2d: %d (%s)@0x%08llx -> "
341 "%d (%s)@0x%08lx + 0x%08x\n",
343 bo_gem->gem_handle, bo_gem->name,
344 (unsigned long long)bo_gem->relocs[j].offset,
345 target_gem->gem_handle,
348 bo_gem->relocs[j].delta);
354 drm_intel_gem_bo_reference(drm_intel_bo *bo)
356 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
358 atomic_inc(&bo_gem->refcount);
362 * Adds the given buffer to the list of buffers to be validated (moved into the
363 * appropriate memory type) with the next batch submission.
365 * If a buffer is validated multiple times in a batch submission, it ends up
366 * with the intersection of the memory type flags and the union of the
370 drm_intel_add_validate_buffer(drm_intel_bo *bo)
372 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
373 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
376 if (bo_gem->validate_index != -1)
379 /* Extend the array of validation entries as necessary. */
380 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
381 int new_size = bufmgr_gem->exec_size * 2;
386 bufmgr_gem->exec_objects =
387 realloc(bufmgr_gem->exec_objects,
388 sizeof(*bufmgr_gem->exec_objects) * new_size);
389 bufmgr_gem->exec_bos =
390 realloc(bufmgr_gem->exec_bos,
391 sizeof(*bufmgr_gem->exec_bos) * new_size);
392 bufmgr_gem->exec_size = new_size;
395 index = bufmgr_gem->exec_count;
396 bo_gem->validate_index = index;
397 /* Fill in array entry */
398 bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
399 bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
400 bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs;
401 bufmgr_gem->exec_objects[index].alignment = 0;
402 bufmgr_gem->exec_objects[index].offset = 0;
403 bufmgr_gem->exec_bos[index] = bo;
404 bufmgr_gem->exec_count++;
408 drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence)
410 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
411 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
414 if (bo_gem->validate_index != -1) {
416 bufmgr_gem->exec2_objects[bo_gem->validate_index].flags |=
417 EXEC_OBJECT_NEEDS_FENCE;
421 /* Extend the array of validation entries as necessary. */
422 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
423 int new_size = bufmgr_gem->exec_size * 2;
428 bufmgr_gem->exec2_objects =
429 realloc(bufmgr_gem->exec2_objects,
430 sizeof(*bufmgr_gem->exec2_objects) * new_size);
431 bufmgr_gem->exec_bos =
432 realloc(bufmgr_gem->exec_bos,
433 sizeof(*bufmgr_gem->exec_bos) * new_size);
434 bufmgr_gem->exec_size = new_size;
437 index = bufmgr_gem->exec_count;
438 bo_gem->validate_index = index;
439 /* Fill in array entry */
440 bufmgr_gem->exec2_objects[index].handle = bo_gem->gem_handle;
441 bufmgr_gem->exec2_objects[index].relocation_count = bo_gem->reloc_count;
442 bufmgr_gem->exec2_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
443 bufmgr_gem->exec2_objects[index].alignment = 0;
444 bufmgr_gem->exec2_objects[index].offset = 0;
445 bufmgr_gem->exec_bos[index] = bo;
446 bufmgr_gem->exec2_objects[index].flags = 0;
447 bufmgr_gem->exec2_objects[index].rsvd1 = 0;
448 bufmgr_gem->exec2_objects[index].rsvd2 = 0;
450 bufmgr_gem->exec2_objects[index].flags |=
451 EXEC_OBJECT_NEEDS_FENCE;
453 bufmgr_gem->exec_count++;
456 #define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
460 drm_intel_bo_gem_set_in_aperture_size(drm_intel_bufmgr_gem *bufmgr_gem,
461 drm_intel_bo_gem *bo_gem)
465 assert(!bo_gem->used_as_reloc_target);
467 /* The older chipsets are far-less flexible in terms of tiling,
468 * and require tiled buffer to be size aligned in the aperture.
469 * This means that in the worst possible case we will need a hole
470 * twice as large as the object in order for it to fit into the
471 * aperture. Optimal packing is for wimps.
473 size = bo_gem->bo.size;
474 if (bufmgr_gem->gen < 4 && bo_gem->tiling_mode != I915_TILING_NONE) {
477 if (bufmgr_gem->has_relaxed_fencing) {
478 if (bufmgr_gem->gen == 3)
479 min_size = 1024*1024;
483 while (min_size < size)
488 /* Account for worst-case alignment. */
492 bo_gem->reloc_tree_size = size;
496 drm_intel_setup_reloc_list(drm_intel_bo *bo)
498 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
499 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
500 unsigned int max_relocs = bufmgr_gem->max_relocs;
502 if (bo->size / 4 < max_relocs)
503 max_relocs = bo->size / 4;
505 bo_gem->relocs = malloc(max_relocs *
506 sizeof(struct drm_i915_gem_relocation_entry));
507 bo_gem->reloc_target_info = malloc(max_relocs *
508 sizeof(drm_intel_reloc_target));
509 if (bo_gem->relocs == NULL || bo_gem->reloc_target_info == NULL) {
510 bo_gem->has_error = 1;
512 free (bo_gem->relocs);
513 bo_gem->relocs = NULL;
515 free (bo_gem->reloc_target_info);
516 bo_gem->reloc_target_info = NULL;
525 drm_intel_gem_bo_busy(drm_intel_bo *bo)
527 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
528 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
529 struct drm_i915_gem_busy busy;
532 memset(&busy, 0, sizeof(busy));
533 busy.handle = bo_gem->gem_handle;
535 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
537 return (ret == 0 && busy.busy);
541 drm_intel_gem_bo_madvise_internal(drm_intel_bufmgr_gem *bufmgr_gem,
542 drm_intel_bo_gem *bo_gem, int state)
544 struct drm_i915_gem_madvise madv;
546 madv.handle = bo_gem->gem_handle;
549 drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
551 return madv.retained;
555 drm_intel_gem_bo_madvise(drm_intel_bo *bo, int madv)
557 return drm_intel_gem_bo_madvise_internal
558 ((drm_intel_bufmgr_gem *) bo->bufmgr,
559 (drm_intel_bo_gem *) bo,
563 /* drop the oldest entries that have been purged by the kernel */
565 drm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem *bufmgr_gem,
566 struct drm_intel_gem_bo_bucket *bucket)
568 while (!DRMLISTEMPTY(&bucket->head)) {
569 drm_intel_bo_gem *bo_gem;
571 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
572 bucket->head.next, head);
573 if (drm_intel_gem_bo_madvise_internal
574 (bufmgr_gem, bo_gem, I915_MADV_DONTNEED))
577 DRMLISTDEL(&bo_gem->head);
578 drm_intel_gem_bo_free(&bo_gem->bo);
582 static drm_intel_bo *
583 drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
587 uint32_t tiling_mode,
588 unsigned long stride)
590 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
591 drm_intel_bo_gem *bo_gem;
592 unsigned int page_size = getpagesize();
594 struct drm_intel_gem_bo_bucket *bucket;
595 int alloc_from_cache;
596 unsigned long bo_size;
599 if (flags & BO_ALLOC_FOR_RENDER)
602 /* Round the allocated size up to a power of two number of pages. */
603 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size);
605 /* If we don't have caching at this size, don't actually round the
608 if (bucket == NULL) {
610 if (bo_size < page_size)
613 bo_size = bucket->size;
616 pthread_mutex_lock(&bufmgr_gem->lock);
617 /* Get a buffer out of the cache if available */
619 alloc_from_cache = 0;
620 if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) {
622 /* Allocate new render-target BOs from the tail (MRU)
623 * of the list, as it will likely be hot in the GPU
624 * cache and in the aperture for us.
626 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
627 bucket->head.prev, head);
628 DRMLISTDEL(&bo_gem->head);
629 alloc_from_cache = 1;
631 /* For non-render-target BOs (where we're probably
632 * going to map it first thing in order to fill it
633 * with data), check if the last BO in the cache is
634 * unbusy, and only reuse in that case. Otherwise,
635 * allocating a new buffer is probably faster than
636 * waiting for the GPU to finish.
638 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
639 bucket->head.next, head);
640 if (!drm_intel_gem_bo_busy(&bo_gem->bo)) {
641 alloc_from_cache = 1;
642 DRMLISTDEL(&bo_gem->head);
646 if (alloc_from_cache) {
647 if (!drm_intel_gem_bo_madvise_internal
648 (bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) {
649 drm_intel_gem_bo_free(&bo_gem->bo);
650 drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem,
655 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
658 drm_intel_gem_bo_free(&bo_gem->bo);
663 pthread_mutex_unlock(&bufmgr_gem->lock);
665 if (!alloc_from_cache) {
666 struct drm_i915_gem_create create;
668 bo_gem = calloc(1, sizeof(*bo_gem));
672 bo_gem->bo.size = bo_size;
673 memset(&create, 0, sizeof(create));
674 create.size = bo_size;
676 ret = drmIoctl(bufmgr_gem->fd,
677 DRM_IOCTL_I915_GEM_CREATE,
679 bo_gem->gem_handle = create.handle;
680 bo_gem->bo.handle = bo_gem->gem_handle;
685 bo_gem->bo.bufmgr = bufmgr;
687 bo_gem->tiling_mode = I915_TILING_NONE;
688 bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
691 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
694 drm_intel_gem_bo_free(&bo_gem->bo);
698 DRMINITLISTHEAD(&bo_gem->name_list);
702 atomic_set(&bo_gem->refcount, 1);
703 bo_gem->validate_index = -1;
704 bo_gem->reloc_tree_fences = 0;
705 bo_gem->used_as_reloc_target = 0;
706 bo_gem->has_error = 0;
707 bo_gem->reusable = 1;
709 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
711 DBG("bo_create: buf %d (%s) %ldb\n",
712 bo_gem->gem_handle, bo_gem->name, size);
717 static drm_intel_bo *
718 drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
721 unsigned int alignment)
723 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size,
725 I915_TILING_NONE, 0);
728 static drm_intel_bo *
729 drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr,
732 unsigned int alignment)
734 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0,
735 I915_TILING_NONE, 0);
738 static drm_intel_bo *
739 drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
740 int x, int y, int cpp, uint32_t *tiling_mode,
741 unsigned long *pitch, unsigned long flags)
743 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
744 unsigned long size, stride;
748 unsigned long aligned_y, height_alignment;
750 tiling = *tiling_mode;
752 /* If we're tiled, our allocations are in 8 or 32-row blocks,
753 * so failure to align our height means that we won't allocate
756 * If we're untiled, we still have to align to 2 rows high
757 * because the data port accesses 2x2 blocks even if the
758 * bottom row isn't to be rendered, so failure to align means
759 * we could walk off the end of the GTT and fault. This is
760 * documented on 965, and may be the case on older chipsets
761 * too so we try to be careful.
764 height_alignment = 2;
766 if (IS_GEN2(bufmgr_gem) && tiling != I915_TILING_NONE)
767 height_alignment = 16;
768 else if (tiling == I915_TILING_X
769 || (IS_915(bufmgr_gem) && tiling == I915_TILING_Y))
770 height_alignment = 8;
771 else if (tiling == I915_TILING_Y)
772 height_alignment = 32;
773 aligned_y = ALIGN(y, height_alignment);
776 stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, tiling_mode);
777 size = stride * aligned_y;
778 size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, tiling_mode);
779 } while (*tiling_mode != tiling);
782 if (tiling == I915_TILING_NONE)
785 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags,
790 * Returns a drm_intel_bo wrapping the given buffer object handle.
792 * This can be used when one application needs to pass a buffer object
796 drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
800 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
801 drm_intel_bo_gem *bo_gem;
803 struct drm_gem_open open_arg;
804 struct drm_i915_gem_get_tiling get_tiling;
807 /* At the moment most applications only have a few named bo.
808 * For instance, in a DRI client only the render buffers passed
809 * between X and the client are named. And since X returns the
810 * alternating names for the front/back buffer a linear search
811 * provides a sufficiently fast match.
813 for (list = bufmgr_gem->named.next;
814 list != &bufmgr_gem->named;
816 bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
817 if (bo_gem->global_name == handle) {
818 drm_intel_gem_bo_reference(&bo_gem->bo);
823 bo_gem = calloc(1, sizeof(*bo_gem));
827 memset(&open_arg, 0, sizeof(open_arg));
828 open_arg.name = handle;
829 ret = drmIoctl(bufmgr_gem->fd,
833 DBG("Couldn't reference %s handle 0x%08x: %s\n",
834 name, handle, strerror(errno));
838 bo_gem->bo.size = open_arg.size;
839 bo_gem->bo.offset = 0;
840 bo_gem->bo.virtual = NULL;
841 bo_gem->bo.bufmgr = bufmgr;
843 atomic_set(&bo_gem->refcount, 1);
844 bo_gem->validate_index = -1;
845 bo_gem->gem_handle = open_arg.handle;
846 bo_gem->bo.handle = open_arg.handle;
847 bo_gem->global_name = handle;
848 bo_gem->reusable = 0;
850 memset(&get_tiling, 0, sizeof(get_tiling));
851 get_tiling.handle = bo_gem->gem_handle;
852 ret = drmIoctl(bufmgr_gem->fd,
853 DRM_IOCTL_I915_GEM_GET_TILING,
856 drm_intel_gem_bo_unreference(&bo_gem->bo);
859 bo_gem->tiling_mode = get_tiling.tiling_mode;
860 bo_gem->swizzle_mode = get_tiling.swizzle_mode;
861 /* XXX stride is unknown */
862 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
864 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
865 DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
871 drm_intel_gem_bo_free(drm_intel_bo *bo)
873 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
874 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
875 struct drm_gem_close close;
878 if (bo_gem->mem_virtual)
879 munmap(bo_gem->mem_virtual, bo_gem->bo.size);
880 if (bo_gem->gtt_virtual)
881 munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
883 /* Close this object */
884 memset(&close, 0, sizeof(close));
885 close.handle = bo_gem->gem_handle;
886 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
888 DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
889 bo_gem->gem_handle, bo_gem->name, strerror(errno));
894 /** Frees all cached buffers significantly older than @time. */
896 drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
900 if (bufmgr_gem->time == time)
903 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
904 struct drm_intel_gem_bo_bucket *bucket =
905 &bufmgr_gem->cache_bucket[i];
907 while (!DRMLISTEMPTY(&bucket->head)) {
908 drm_intel_bo_gem *bo_gem;
910 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
911 bucket->head.next, head);
912 if (time - bo_gem->free_time <= 1)
915 DRMLISTDEL(&bo_gem->head);
917 drm_intel_gem_bo_free(&bo_gem->bo);
921 bufmgr_gem->time = time;
925 drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
927 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
928 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
929 struct drm_intel_gem_bo_bucket *bucket;
932 /* Unreference all the target buffers */
933 for (i = 0; i < bo_gem->reloc_count; i++) {
934 if (bo_gem->reloc_target_info[i].bo != bo) {
935 drm_intel_gem_bo_unreference_locked_timed(bo_gem->
936 reloc_target_info[i].bo,
940 bo_gem->reloc_count = 0;
941 bo_gem->used_as_reloc_target = 0;
943 DBG("bo_unreference final: %d (%s)\n",
944 bo_gem->gem_handle, bo_gem->name);
946 /* release memory associated with this object */
947 if (bo_gem->reloc_target_info) {
948 free(bo_gem->reloc_target_info);
949 bo_gem->reloc_target_info = NULL;
951 if (bo_gem->relocs) {
952 free(bo_gem->relocs);
953 bo_gem->relocs = NULL;
956 DRMLISTDEL(&bo_gem->name_list);
958 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
959 /* Put the buffer into our internal cache for reuse if we can. */
960 if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
961 drm_intel_gem_bo_madvise_internal(bufmgr_gem, bo_gem,
962 I915_MADV_DONTNEED)) {
963 bo_gem->free_time = time;
966 bo_gem->validate_index = -1;
968 DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
970 drm_intel_gem_bo_free(bo);
974 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
977 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
979 assert(atomic_read(&bo_gem->refcount) > 0);
980 if (atomic_dec_and_test(&bo_gem->refcount))
981 drm_intel_gem_bo_unreference_final(bo, time);
984 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
986 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
988 assert(atomic_read(&bo_gem->refcount) > 0);
989 if (atomic_dec_and_test(&bo_gem->refcount)) {
990 drm_intel_bufmgr_gem *bufmgr_gem =
991 (drm_intel_bufmgr_gem *) bo->bufmgr;
992 struct timespec time;
994 clock_gettime(CLOCK_MONOTONIC, &time);
996 pthread_mutex_lock(&bufmgr_gem->lock);
997 drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
998 drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
999 pthread_mutex_unlock(&bufmgr_gem->lock);
1003 static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
1005 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1006 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1007 struct drm_i915_gem_set_domain set_domain;
1010 pthread_mutex_lock(&bufmgr_gem->lock);
1012 /* Allow recursive mapping. Mesa may recursively map buffers with
1013 * nested display loops.
1015 if (!bo_gem->mem_virtual) {
1016 struct drm_i915_gem_mmap mmap_arg;
1018 DBG("bo_map: %d (%s)\n", bo_gem->gem_handle, bo_gem->name);
1020 memset(&mmap_arg, 0, sizeof(mmap_arg));
1021 mmap_arg.handle = bo_gem->gem_handle;
1022 mmap_arg.offset = 0;
1023 mmap_arg.size = bo->size;
1024 ret = drmIoctl(bufmgr_gem->fd,
1025 DRM_IOCTL_I915_GEM_MMAP,
1029 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1030 __FILE__, __LINE__, bo_gem->gem_handle,
1031 bo_gem->name, strerror(errno));
1032 pthread_mutex_unlock(&bufmgr_gem->lock);
1035 bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
1037 DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1038 bo_gem->mem_virtual);
1039 bo->virtual = bo_gem->mem_virtual;
1041 set_domain.handle = bo_gem->gem_handle;
1042 set_domain.read_domains = I915_GEM_DOMAIN_CPU;
1044 set_domain.write_domain = I915_GEM_DOMAIN_CPU;
1046 set_domain.write_domain = 0;
1047 ret = drmIoctl(bufmgr_gem->fd,
1048 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1051 DBG("%s:%d: Error setting to CPU domain %d: %s\n",
1052 __FILE__, __LINE__, bo_gem->gem_handle,
1056 pthread_mutex_unlock(&bufmgr_gem->lock);
1061 int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
1063 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1064 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1065 struct drm_i915_gem_set_domain set_domain;
1068 pthread_mutex_lock(&bufmgr_gem->lock);
1070 /* Get a mapping of the buffer if we haven't before. */
1071 if (bo_gem->gtt_virtual == NULL) {
1072 struct drm_i915_gem_mmap_gtt mmap_arg;
1074 DBG("bo_map_gtt: mmap %d (%s)\n", bo_gem->gem_handle,
1077 memset(&mmap_arg, 0, sizeof(mmap_arg));
1078 mmap_arg.handle = bo_gem->gem_handle;
1080 /* Get the fake offset back... */
1081 ret = drmIoctl(bufmgr_gem->fd,
1082 DRM_IOCTL_I915_GEM_MMAP_GTT,
1086 DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
1088 bo_gem->gem_handle, bo_gem->name,
1090 pthread_mutex_unlock(&bufmgr_gem->lock);
1095 bo_gem->gtt_virtual = mmap(0, bo->size, PROT_READ | PROT_WRITE,
1096 MAP_SHARED, bufmgr_gem->fd,
1098 if (bo_gem->gtt_virtual == MAP_FAILED) {
1099 bo_gem->gtt_virtual = NULL;
1101 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1103 bo_gem->gem_handle, bo_gem->name,
1105 pthread_mutex_unlock(&bufmgr_gem->lock);
1110 bo->virtual = bo_gem->gtt_virtual;
1112 DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1113 bo_gem->gtt_virtual);
1115 /* Now move it to the GTT domain so that the CPU caches are flushed */
1116 set_domain.handle = bo_gem->gem_handle;
1117 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1118 set_domain.write_domain = I915_GEM_DOMAIN_GTT;
1119 ret = drmIoctl(bufmgr_gem->fd,
1120 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1123 DBG("%s:%d: Error setting domain %d: %s\n",
1124 __FILE__, __LINE__, bo_gem->gem_handle,
1128 pthread_mutex_unlock(&bufmgr_gem->lock);
1133 int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
1135 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1141 pthread_mutex_lock(&bufmgr_gem->lock);
1143 pthread_mutex_unlock(&bufmgr_gem->lock);
1148 static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
1150 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1151 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1152 struct drm_i915_gem_sw_finish sw_finish;
1158 pthread_mutex_lock(&bufmgr_gem->lock);
1160 /* Cause a flush to happen if the buffer's pinned for scanout, so the
1161 * results show up in a timely manner.
1163 sw_finish.handle = bo_gem->gem_handle;
1164 ret = drmIoctl(bufmgr_gem->fd,
1165 DRM_IOCTL_I915_GEM_SW_FINISH,
1167 ret = ret == -1 ? -errno : 0;
1170 pthread_mutex_unlock(&bufmgr_gem->lock);
1176 drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
1177 unsigned long size, const void *data)
1179 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1180 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1181 struct drm_i915_gem_pwrite pwrite;
1184 memset(&pwrite, 0, sizeof(pwrite));
1185 pwrite.handle = bo_gem->gem_handle;
1186 pwrite.offset = offset;
1188 pwrite.data_ptr = (uint64_t) (uintptr_t) data;
1189 ret = drmIoctl(bufmgr_gem->fd,
1190 DRM_IOCTL_I915_GEM_PWRITE,
1194 DBG("%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
1195 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1196 (int)size, strerror(errno));
1203 drm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
1205 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1206 struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id;
1209 get_pipe_from_crtc_id.crtc_id = crtc_id;
1210 ret = drmIoctl(bufmgr_gem->fd,
1211 DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID,
1212 &get_pipe_from_crtc_id);
1214 /* We return -1 here to signal that we don't
1215 * know which pipe is associated with this crtc.
1216 * This lets the caller know that this information
1217 * isn't available; using the wrong pipe for
1218 * vblank waiting can cause the chipset to lock up
1223 return get_pipe_from_crtc_id.pipe;
1227 drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
1228 unsigned long size, void *data)
1230 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1231 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1232 struct drm_i915_gem_pread pread;
1235 memset(&pread, 0, sizeof(pread));
1236 pread.handle = bo_gem->gem_handle;
1237 pread.offset = offset;
1239 pread.data_ptr = (uint64_t) (uintptr_t) data;
1240 ret = drmIoctl(bufmgr_gem->fd,
1241 DRM_IOCTL_I915_GEM_PREAD,
1245 DBG("%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
1246 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1247 (int)size, strerror(errno));
1253 /** Waits for all GPU rendering with the object to have completed. */
1255 drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo)
1257 drm_intel_gem_bo_start_gtt_access(bo, 1);
1261 * Sets the object to the GTT read and possibly write domain, used by the X
1262 * 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt().
1264 * In combination with drm_intel_gem_bo_pin() and manual fence management, we
1265 * can do tiled pixmaps this way.
1268 drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
1270 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1271 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1272 struct drm_i915_gem_set_domain set_domain;
1275 set_domain.handle = bo_gem->gem_handle;
1276 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1277 set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0;
1278 ret = drmIoctl(bufmgr_gem->fd,
1279 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1282 DBG("%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
1283 __FILE__, __LINE__, bo_gem->gem_handle,
1284 set_domain.read_domains, set_domain.write_domain,
1290 drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
1292 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1295 free(bufmgr_gem->exec2_objects);
1296 free(bufmgr_gem->exec_objects);
1297 free(bufmgr_gem->exec_bos);
1299 pthread_mutex_destroy(&bufmgr_gem->lock);
1301 /* Free any cached buffer objects we were going to reuse */
1302 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
1303 struct drm_intel_gem_bo_bucket *bucket =
1304 &bufmgr_gem->cache_bucket[i];
1305 drm_intel_bo_gem *bo_gem;
1307 while (!DRMLISTEMPTY(&bucket->head)) {
1308 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1309 bucket->head.next, head);
1310 DRMLISTDEL(&bo_gem->head);
1312 drm_intel_gem_bo_free(&bo_gem->bo);
1320 * Adds the target buffer to the validation list and adds the relocation
1321 * to the reloc_buffer's relocation list.
1323 * The relocation entry at the given offset must already contain the
1324 * precomputed relocation value, because the kernel will optimize out
1325 * the relocation entry write when the buffer hasn't moved from the
1326 * last known offset in target_bo.
1329 do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1330 drm_intel_bo *target_bo, uint32_t target_offset,
1331 uint32_t read_domains, uint32_t write_domain,
1334 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1335 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1336 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
1339 if (bo_gem->has_error)
1342 if (target_bo_gem->has_error) {
1343 bo_gem->has_error = 1;
1347 /* We never use HW fences for rendering on 965+ */
1348 if (bufmgr_gem->gen >= 4)
1351 fenced_command = need_fence;
1352 if (target_bo_gem->tiling_mode == I915_TILING_NONE)
1355 /* Create a new relocation list if needed */
1356 if (bo_gem->relocs == NULL && drm_intel_setup_reloc_list(bo))
1359 /* Check overflow */
1360 assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
1363 assert(offset <= bo->size - 4);
1364 assert((write_domain & (write_domain - 1)) == 0);
1366 /* Make sure that we're not adding a reloc to something whose size has
1367 * already been accounted for.
1369 assert(!bo_gem->used_as_reloc_target);
1370 if (target_bo_gem != bo_gem) {
1371 target_bo_gem->used_as_reloc_target = 1;
1372 bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
1374 /* An object needing a fence is a tiled buffer, so it won't have
1375 * relocs to other buffers.
1378 target_bo_gem->reloc_tree_fences = 1;
1379 bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
1381 bo_gem->relocs[bo_gem->reloc_count].offset = offset;
1382 bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
1383 bo_gem->relocs[bo_gem->reloc_count].target_handle =
1384 target_bo_gem->gem_handle;
1385 bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
1386 bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
1387 bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset;
1389 bo_gem->reloc_target_info[bo_gem->reloc_count].bo = target_bo;
1390 if (target_bo != bo)
1391 drm_intel_gem_bo_reference(target_bo);
1393 bo_gem->reloc_target_info[bo_gem->reloc_count].flags =
1394 DRM_INTEL_RELOC_FENCE;
1396 bo_gem->reloc_target_info[bo_gem->reloc_count].flags = 0;
1398 bo_gem->reloc_count++;
1404 drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1405 drm_intel_bo *target_bo, uint32_t target_offset,
1406 uint32_t read_domains, uint32_t write_domain)
1408 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
1410 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
1411 read_domains, write_domain,
1412 !bufmgr_gem->fenced_relocs);
1416 drm_intel_gem_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
1417 drm_intel_bo *target_bo,
1418 uint32_t target_offset,
1419 uint32_t read_domains, uint32_t write_domain)
1421 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
1422 read_domains, write_domain, 1);
1426 * Walk the tree of relocations rooted at BO and accumulate the list of
1427 * validations to be performed and update the relocation buffers with
1428 * index values into the validation list.
1431 drm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
1433 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1436 if (bo_gem->relocs == NULL)
1439 for (i = 0; i < bo_gem->reloc_count; i++) {
1440 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
1442 if (target_bo == bo)
1445 /* Continue walking the tree depth-first. */
1446 drm_intel_gem_bo_process_reloc(target_bo);
1448 /* Add the target to the validate list */
1449 drm_intel_add_validate_buffer(target_bo);
1454 drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo)
1456 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1459 if (bo_gem->relocs == NULL)
1462 for (i = 0; i < bo_gem->reloc_count; i++) {
1463 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
1466 if (target_bo == bo)
1469 /* Continue walking the tree depth-first. */
1470 drm_intel_gem_bo_process_reloc2(target_bo);
1472 need_fence = (bo_gem->reloc_target_info[i].flags &
1473 DRM_INTEL_RELOC_FENCE);
1475 /* Add the target to the validate list */
1476 drm_intel_add_validate_buffer2(target_bo, need_fence);
1482 drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem)
1486 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1487 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1488 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1490 /* Update the buffer offset */
1491 if (bufmgr_gem->exec_objects[i].offset != bo->offset) {
1492 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
1493 bo_gem->gem_handle, bo_gem->name, bo->offset,
1494 (unsigned long long)bufmgr_gem->exec_objects[i].
1496 bo->offset = bufmgr_gem->exec_objects[i].offset;
1502 drm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem)
1506 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1507 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1508 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1510 /* Update the buffer offset */
1511 if (bufmgr_gem->exec2_objects[i].offset != bo->offset) {
1512 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
1513 bo_gem->gem_handle, bo_gem->name, bo->offset,
1514 (unsigned long long)bufmgr_gem->exec2_objects[i].offset);
1515 bo->offset = bufmgr_gem->exec2_objects[i].offset;
1521 drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
1522 drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
1524 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1525 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1526 struct drm_i915_gem_execbuffer execbuf;
1529 if (bo_gem->has_error)
1532 pthread_mutex_lock(&bufmgr_gem->lock);
1533 /* Update indices and set up the validate list. */
1534 drm_intel_gem_bo_process_reloc(bo);
1536 /* Add the batch buffer to the validation list. There are no
1537 * relocations pointing to it.
1539 drm_intel_add_validate_buffer(bo);
1541 execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects;
1542 execbuf.buffer_count = bufmgr_gem->exec_count;
1543 execbuf.batch_start_offset = 0;
1544 execbuf.batch_len = used;
1545 execbuf.cliprects_ptr = (uintptr_t) cliprects;
1546 execbuf.num_cliprects = num_cliprects;
1550 ret = drmIoctl(bufmgr_gem->fd,
1551 DRM_IOCTL_I915_GEM_EXECBUFFER,
1555 if (errno == ENOSPC) {
1556 DBG("Execbuffer fails to pin. "
1557 "Estimate: %u. Actual: %u. Available: %u\n",
1558 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
1561 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
1564 (unsigned int)bufmgr_gem->gtt_size);
1567 drm_intel_update_buffer_offsets(bufmgr_gem);
1569 if (bufmgr_gem->bufmgr.debug)
1570 drm_intel_gem_dump_validation_list(bufmgr_gem);
1572 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1573 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1574 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1576 /* Disconnect the buffer from the validate list */
1577 bo_gem->validate_index = -1;
1578 bufmgr_gem->exec_bos[i] = NULL;
1580 bufmgr_gem->exec_count = 0;
1581 pthread_mutex_unlock(&bufmgr_gem->lock);
1587 drm_intel_gem_bo_mrb_exec2(drm_intel_bo *bo, int used,
1588 drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
1591 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
1592 struct drm_i915_gem_execbuffer2 execbuf;
1595 switch (flags & 0x7) {
1599 if (!bufmgr_gem->has_blt)
1603 if (!bufmgr_gem->has_bsd)
1606 case I915_EXEC_RENDER:
1607 case I915_EXEC_DEFAULT:
1611 pthread_mutex_lock(&bufmgr_gem->lock);
1612 /* Update indices and set up the validate list. */
1613 drm_intel_gem_bo_process_reloc2(bo);
1615 /* Add the batch buffer to the validation list. There are no relocations
1618 drm_intel_add_validate_buffer2(bo, 0);
1620 execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec2_objects;
1621 execbuf.buffer_count = bufmgr_gem->exec_count;
1622 execbuf.batch_start_offset = 0;
1623 execbuf.batch_len = used;
1624 execbuf.cliprects_ptr = (uintptr_t)cliprects;
1625 execbuf.num_cliprects = num_cliprects;
1628 execbuf.flags = flags;
1632 ret = drmIoctl(bufmgr_gem->fd,
1633 DRM_IOCTL_I915_GEM_EXECBUFFER2,
1637 if (ret == -ENOSPC) {
1638 DBG("Execbuffer fails to pin. "
1639 "Estimate: %u. Actual: %u. Available: %u\n",
1640 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
1641 bufmgr_gem->exec_count),
1642 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
1643 bufmgr_gem->exec_count),
1644 (unsigned int) bufmgr_gem->gtt_size);
1647 drm_intel_update_buffer_offsets2(bufmgr_gem);
1649 if (bufmgr_gem->bufmgr.debug)
1650 drm_intel_gem_dump_validation_list(bufmgr_gem);
1652 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1653 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1654 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1656 /* Disconnect the buffer from the validate list */
1657 bo_gem->validate_index = -1;
1658 bufmgr_gem->exec_bos[i] = NULL;
1660 bufmgr_gem->exec_count = 0;
1661 pthread_mutex_unlock(&bufmgr_gem->lock);
1667 drm_intel_gem_bo_exec2(drm_intel_bo *bo, int used,
1668 drm_clip_rect_t *cliprects, int num_cliprects,
1671 return drm_intel_gem_bo_mrb_exec2(bo, used,
1672 cliprects, num_cliprects, DR4,
1677 drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
1679 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1680 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1681 struct drm_i915_gem_pin pin;
1684 memset(&pin, 0, sizeof(pin));
1685 pin.handle = bo_gem->gem_handle;
1686 pin.alignment = alignment;
1688 ret = drmIoctl(bufmgr_gem->fd,
1689 DRM_IOCTL_I915_GEM_PIN,
1694 bo->offset = pin.offset;
1699 drm_intel_gem_bo_unpin(drm_intel_bo *bo)
1701 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1702 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1703 struct drm_i915_gem_unpin unpin;
1706 memset(&unpin, 0, sizeof(unpin));
1707 unpin.handle = bo_gem->gem_handle;
1709 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
1717 drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
1718 uint32_t tiling_mode,
1721 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1722 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1723 struct drm_i915_gem_set_tiling set_tiling;
1726 if (bo_gem->global_name == 0 &&
1727 tiling_mode == bo_gem->tiling_mode &&
1728 stride == bo_gem->stride)
1731 memset(&set_tiling, 0, sizeof(set_tiling));
1733 /* set_tiling is slightly broken and overwrites the
1734 * input on the error path, so we have to open code
1737 set_tiling.handle = bo_gem->gem_handle;
1738 set_tiling.tiling_mode = tiling_mode;
1739 set_tiling.stride = stride;
1741 ret = ioctl(bufmgr_gem->fd,
1742 DRM_IOCTL_I915_GEM_SET_TILING,
1744 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
1748 bo_gem->tiling_mode = set_tiling.tiling_mode;
1749 bo_gem->swizzle_mode = set_tiling.swizzle_mode;
1750 bo_gem->stride = set_tiling.stride;
1755 drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
1758 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1759 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1762 /* Linear buffers have no stride. By ensuring that we only ever use
1763 * stride 0 with linear buffers, we simplify our code.
1765 if (*tiling_mode == I915_TILING_NONE)
1768 ret = drm_intel_gem_bo_set_tiling_internal(bo, *tiling_mode, stride);
1770 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
1772 *tiling_mode = bo_gem->tiling_mode;
1777 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
1778 uint32_t * swizzle_mode)
1780 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1782 *tiling_mode = bo_gem->tiling_mode;
1783 *swizzle_mode = bo_gem->swizzle_mode;
1788 drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
1790 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1791 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1792 struct drm_gem_flink flink;
1795 if (!bo_gem->global_name) {
1796 memset(&flink, 0, sizeof(flink));
1797 flink.handle = bo_gem->gem_handle;
1799 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink);
1802 bo_gem->global_name = flink.name;
1803 bo_gem->reusable = 0;
1805 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
1808 *name = bo_gem->global_name;
1813 * Enables unlimited caching of buffer objects for reuse.
1815 * This is potentially very memory expensive, as the cache at each bucket
1816 * size is only bounded by how many buffers of that size we've managed to have
1817 * in flight at once.
1820 drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
1822 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1824 bufmgr_gem->bo_reuse = 1;
1828 * Enable use of fenced reloc type.
1830 * New code should enable this to avoid unnecessary fence register
1831 * allocation. If this option is not enabled, all relocs will have fence
1832 * register allocated.
1835 drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr)
1837 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
1839 if (bufmgr_gem->bufmgr.bo_exec == drm_intel_gem_bo_exec2)
1840 bufmgr_gem->fenced_relocs = 1;
1844 * Return the additional aperture space required by the tree of buffer objects
1848 drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
1850 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1854 if (bo == NULL || bo_gem->included_in_check_aperture)
1858 bo_gem->included_in_check_aperture = 1;
1860 for (i = 0; i < bo_gem->reloc_count; i++)
1862 drm_intel_gem_bo_get_aperture_space(bo_gem->
1863 reloc_target_info[i].bo);
1869 * Count the number of buffers in this list that need a fence reg
1871 * If the count is greater than the number of available regs, we'll have
1872 * to ask the caller to resubmit a batch with fewer tiled buffers.
1874 * This function over-counts if the same buffer is used multiple times.
1877 drm_intel_gem_total_fences(drm_intel_bo ** bo_array, int count)
1880 unsigned int total = 0;
1882 for (i = 0; i < count; i++) {
1883 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
1888 total += bo_gem->reloc_tree_fences;
1894 * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready
1895 * for the next drm_intel_bufmgr_check_aperture_space() call.
1898 drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo)
1900 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1903 if (bo == NULL || !bo_gem->included_in_check_aperture)
1906 bo_gem->included_in_check_aperture = 0;
1908 for (i = 0; i < bo_gem->reloc_count; i++)
1909 drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->
1910 reloc_target_info[i].bo);
1914 * Return a conservative estimate for the amount of aperture required
1915 * for a collection of buffers. This may double-count some buffers.
1918 drm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count)
1921 unsigned int total = 0;
1923 for (i = 0; i < count; i++) {
1924 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
1926 total += bo_gem->reloc_tree_size;
1932 * Return the amount of aperture needed for a collection of buffers.
1933 * This avoids double counting any buffers, at the cost of looking
1934 * at every buffer in the set.
1937 drm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count)
1940 unsigned int total = 0;
1942 for (i = 0; i < count; i++) {
1943 total += drm_intel_gem_bo_get_aperture_space(bo_array[i]);
1944 /* For the first buffer object in the array, we get an
1945 * accurate count back for its reloc_tree size (since nothing
1946 * had been flagged as being counted yet). We can save that
1947 * value out as a more conservative reloc_tree_size that
1948 * avoids double-counting target buffers. Since the first
1949 * buffer happens to usually be the batch buffer in our
1950 * callers, this can pull us back from doing the tree
1951 * walk on every new batch emit.
1954 drm_intel_bo_gem *bo_gem =
1955 (drm_intel_bo_gem *) bo_array[i];
1956 bo_gem->reloc_tree_size = total;
1960 for (i = 0; i < count; i++)
1961 drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]);
1966 * Return -1 if the batchbuffer should be flushed before attempting to
1967 * emit rendering referencing the buffers pointed to by bo_array.
1969 * This is required because if we try to emit a batchbuffer with relocations
1970 * to a tree of buffers that won't simultaneously fit in the aperture,
1971 * the rendering will return an error at a point where the software is not
1972 * prepared to recover from it.
1974 * However, we also want to emit the batchbuffer significantly before we reach
1975 * the limit, as a series of batchbuffers each of which references buffers
1976 * covering almost all of the aperture means that at each emit we end up
1977 * waiting to evict a buffer from the last rendering, and we get synchronous
1978 * performance. By emitting smaller batchbuffers, we eat some CPU overhead to
1979 * get better parallelism.
1982 drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
1984 drm_intel_bufmgr_gem *bufmgr_gem =
1985 (drm_intel_bufmgr_gem *) bo_array[0]->bufmgr;
1986 unsigned int total = 0;
1987 unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
1990 /* Check for fence reg constraints if necessary */
1991 if (bufmgr_gem->available_fences) {
1992 total_fences = drm_intel_gem_total_fences(bo_array, count);
1993 if (total_fences > bufmgr_gem->available_fences)
1997 total = drm_intel_gem_estimate_batch_space(bo_array, count);
1999 if (total > threshold)
2000 total = drm_intel_gem_compute_batch_space(bo_array, count);
2002 if (total > threshold) {
2003 DBG("check_space: overflowed available aperture, "
2005 total / 1024, (int)bufmgr_gem->gtt_size / 1024);
2008 DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024,
2009 (int)bufmgr_gem->gtt_size / 1024);
2015 * Disable buffer reuse for objects which are shared with the kernel
2016 * as scanout buffers
2019 drm_intel_gem_bo_disable_reuse(drm_intel_bo *bo)
2021 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2023 bo_gem->reusable = 0;
2028 drm_intel_gem_bo_is_reusable(drm_intel_bo *bo)
2030 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2032 return bo_gem->reusable;
2036 _drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
2038 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2041 for (i = 0; i < bo_gem->reloc_count; i++) {
2042 if (bo_gem->reloc_target_info[i].bo == target_bo)
2044 if (bo == bo_gem->reloc_target_info[i].bo)
2046 if (_drm_intel_gem_bo_references(bo_gem->reloc_target_info[i].bo,
2054 /** Return true if target_bo is referenced by bo's relocation tree. */
2056 drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
2058 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
2060 if (bo == NULL || target_bo == NULL)
2062 if (target_bo_gem->used_as_reloc_target)
2063 return _drm_intel_gem_bo_references(bo, target_bo);
2068 add_bucket(drm_intel_bufmgr_gem *bufmgr_gem, int size)
2070 unsigned int i = bufmgr_gem->num_buckets;
2072 assert(i < ARRAY_SIZE(bufmgr_gem->cache_bucket));
2074 DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head);
2075 bufmgr_gem->cache_bucket[i].size = size;
2076 bufmgr_gem->num_buckets++;
2080 init_cache_buckets(drm_intel_bufmgr_gem *bufmgr_gem)
2082 unsigned long size, cache_max_size = 64 * 1024 * 1024;
2084 /* OK, so power of two buckets was too wasteful of memory.
2085 * Give 3 other sizes between each power of two, to hopefully
2086 * cover things accurately enough. (The alternative is
2087 * probably to just go for exact matching of sizes, and assume
2088 * that for things like composited window resize the tiled
2089 * width/height alignment and rounding of sizes to pages will
2090 * get us useful cache hit rates anyway)
2092 add_bucket(bufmgr_gem, 4096);
2093 add_bucket(bufmgr_gem, 4096 * 2);
2094 add_bucket(bufmgr_gem, 4096 * 3);
2096 /* Initialize the linked lists for BO reuse cache. */
2097 for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
2098 add_bucket(bufmgr_gem, size);
2100 add_bucket(bufmgr_gem, size + size * 1 / 4);
2101 add_bucket(bufmgr_gem, size + size * 2 / 4);
2102 add_bucket(bufmgr_gem, size + size * 3 / 4);
2107 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
2108 * and manage map buffer objections.
2110 * \param fd File descriptor of the opened DRM device.
2113 drm_intel_bufmgr_gem_init(int fd, int batch_size)
2115 drm_intel_bufmgr_gem *bufmgr_gem;
2116 struct drm_i915_gem_get_aperture aperture;
2117 drm_i915_getparam_t gp;
2121 bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
2122 if (bufmgr_gem == NULL)
2125 bufmgr_gem->fd = fd;
2127 if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
2132 ret = drmIoctl(bufmgr_gem->fd,
2133 DRM_IOCTL_I915_GEM_GET_APERTURE,
2137 bufmgr_gem->gtt_size = aperture.aper_available_size;
2139 fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
2141 bufmgr_gem->gtt_size = 128 * 1024 * 1024;
2142 fprintf(stderr, "Assuming %dkB available aperture size.\n"
2143 "May lead to reduced performance or incorrect "
2145 (int)bufmgr_gem->gtt_size / 1024);
2148 gp.param = I915_PARAM_CHIPSET_ID;
2149 gp.value = &bufmgr_gem->pci_device;
2150 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2152 fprintf(stderr, "get chip id failed: %d [%d]\n", ret, errno);
2153 fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
2156 if (IS_GEN2(bufmgr_gem))
2157 bufmgr_gem->gen = 2;
2158 else if (IS_GEN3(bufmgr_gem))
2159 bufmgr_gem->gen = 3;
2160 else if (IS_GEN4(bufmgr_gem))
2161 bufmgr_gem->gen = 4;
2163 bufmgr_gem->gen = 6;
2167 gp.param = I915_PARAM_HAS_EXECBUF2;
2168 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2172 gp.param = I915_PARAM_HAS_BSD;
2173 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2174 bufmgr_gem->has_bsd = ret == 0;
2176 gp.param = I915_PARAM_HAS_BLT;
2177 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2178 bufmgr_gem->has_blt = ret == 0;
2180 gp.param = I915_PARAM_HAS_RELAXED_FENCING;
2181 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2182 bufmgr_gem->has_relaxed_fencing = ret == 0;
2184 if (bufmgr_gem->gen < 4) {
2185 gp.param = I915_PARAM_NUM_FENCES_AVAIL;
2186 gp.value = &bufmgr_gem->available_fences;
2187 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2189 fprintf(stderr, "get fences failed: %d [%d]\n", ret,
2191 fprintf(stderr, "param: %d, val: %d\n", gp.param,
2193 bufmgr_gem->available_fences = 0;
2195 /* XXX The kernel reports the total number of fences,
2196 * including any that may be pinned.
2198 * We presume that there will be at least one pinned
2199 * fence for the scanout buffer, but there may be more
2200 * than one scanout and the user may be manually
2201 * pinning buffers. Let's move to execbuffer2 and
2202 * thereby forget the insanity of using fences...
2204 bufmgr_gem->available_fences -= 2;
2205 if (bufmgr_gem->available_fences < 0)
2206 bufmgr_gem->available_fences = 0;
2210 /* Let's go with one relocation per every 2 dwords (but round down a bit
2211 * since a power of two will mean an extra page allocation for the reloc
2214 * Every 4 was too few for the blender benchmark.
2216 bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
2218 bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
2219 bufmgr_gem->bufmgr.bo_alloc_for_render =
2220 drm_intel_gem_bo_alloc_for_render;
2221 bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled;
2222 bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
2223 bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
2224 bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
2225 bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap;
2226 bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata;
2227 bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata;
2228 bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering;
2229 bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc;
2230 bufmgr_gem->bufmgr.bo_emit_reloc_fence = drm_intel_gem_bo_emit_reloc_fence;
2231 bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin;
2232 bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin;
2233 bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
2234 bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
2235 bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
2236 /* Use the new one if available */
2238 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec2;
2239 bufmgr_gem->bufmgr.bo_mrb_exec = drm_intel_gem_bo_mrb_exec2;
2241 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
2242 bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
2243 bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
2244 bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy;
2245 bufmgr_gem->bufmgr.debug = 0;
2246 bufmgr_gem->bufmgr.check_aperture_space =
2247 drm_intel_gem_check_aperture_space;
2248 bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse;
2249 bufmgr_gem->bufmgr.bo_is_reusable = drm_intel_gem_bo_is_reusable;
2250 bufmgr_gem->bufmgr.get_pipe_from_crtc_id =
2251 drm_intel_gem_get_pipe_from_crtc_id;
2252 bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references;
2254 DRMINITLISTHEAD(&bufmgr_gem->named);
2255 init_cache_buckets(bufmgr_gem);
2257 return &bufmgr_gem->bufmgr;