1 /**************************************************************************
3 * Copyright © 2007 Red Hat Inc.
4 * Copyright © 2007 Intel Corporation
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
29 **************************************************************************/
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
42 #include <xf86atomic.h>
50 #include <sys/ioctl.h>
53 #include <sys/types.h>
57 #include "libdrm_lists.h"
58 #include "intel_bufmgr.h"
59 #include "intel_bufmgr_priv.h"
60 #include "intel_chipset.h"
65 #define DBG(...) do { \
66 if (bufmgr_gem->bufmgr.debug) \
67 fprintf(stderr, __VA_ARGS__); \
70 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
72 typedef struct _drm_intel_bo_gem drm_intel_bo_gem;
74 struct drm_intel_gem_bo_bucket {
79 typedef struct _drm_intel_bufmgr_gem {
80 drm_intel_bufmgr bufmgr;
88 struct drm_i915_gem_exec_object *exec_objects;
89 struct drm_i915_gem_exec_object2 *exec2_objects;
90 drm_intel_bo **exec_bos;
94 /** Array of lists of cached gem objects of power-of-two sizes */
95 struct drm_intel_gem_bo_bucket cache_bucket[14 * 4];
102 int available_fences;
105 unsigned int has_bsd : 1;
106 unsigned int has_blt : 1;
107 unsigned int has_relaxed_fencing : 1;
108 unsigned int bo_reuse : 1;
110 } drm_intel_bufmgr_gem;
112 #define DRM_INTEL_RELOC_FENCE (1<<0)
114 typedef struct _drm_intel_reloc_target_info {
117 } drm_intel_reloc_target;
119 struct _drm_intel_bo_gem {
127 * Kenel-assigned global name for this object
129 unsigned int global_name;
130 drmMMListHead name_list;
133 * Index of the buffer within the validation list while preparing a
134 * batchbuffer execution.
139 * Current tiling mode
141 uint32_t tiling_mode;
142 uint32_t swizzle_mode;
143 unsigned long stride;
147 /** Array passed to the DRM containing relocation information. */
148 struct drm_i915_gem_relocation_entry *relocs;
150 * Array of info structs corresponding to relocs[i].target_handle etc
152 drm_intel_reloc_target *reloc_target_info;
153 /** Number of entries in relocs */
155 /** Mapped address for the buffer, saved across map/unmap cycles */
157 /** GTT virtual address for the buffer, saved across map/unmap cycles */
164 * Boolean of whether this BO and its children have been included in
165 * the current drm_intel_bufmgr_check_aperture_space() total.
167 bool included_in_check_aperture;
170 * Boolean of whether this buffer has been used as a relocation
171 * target and had its size accounted for, and thus can't have any
172 * further relocations added to it.
174 bool used_as_reloc_target;
177 * Boolean of whether we have encountered an error whilst building the relocation tree.
182 * Boolean of whether this buffer can be re-used
187 * Size in bytes of this buffer and its relocation descendents.
189 * Used to avoid costly tree walking in
190 * drm_intel_bufmgr_check_aperture in the common case.
195 * Number of potential fence registers required by this buffer and its
198 int reloc_tree_fences;
200 /** Flags that we may need to do the SW_FINSIH ioctl on unmap. */
201 bool mapped_cpu_write;
205 drm_intel_gem_estimate_batch_space(drm_intel_bo ** bo_array, int count);
208 drm_intel_gem_compute_batch_space(drm_intel_bo ** bo_array, int count);
211 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
212 uint32_t * swizzle_mode);
215 drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
216 uint32_t tiling_mode,
219 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
222 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo);
224 static void drm_intel_gem_bo_free(drm_intel_bo *bo);
227 drm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size,
228 uint32_t *tiling_mode)
230 unsigned long min_size, max_size;
233 if (*tiling_mode == I915_TILING_NONE)
236 /* 965+ just need multiples of page size for tiling */
237 if (bufmgr_gem->gen >= 4)
238 return ROUND_UP_TO(size, 4096);
240 /* Older chips need powers of two, of at least 512k or 1M */
241 if (bufmgr_gem->gen == 3) {
242 min_size = 1024*1024;
243 max_size = 128*1024*1024;
246 max_size = 64*1024*1024;
249 if (size > max_size) {
250 *tiling_mode = I915_TILING_NONE;
254 /* Do we need to allocate every page for the fence? */
255 if (bufmgr_gem->has_relaxed_fencing)
256 return ROUND_UP_TO(size, 4096);
258 for (i = min_size; i < size; i <<= 1)
265 * Round a given pitch up to the minimum required for X tiling on a
266 * given chip. We use 512 as the minimum to allow for a later tiling
270 drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem,
271 unsigned long pitch, uint32_t *tiling_mode)
273 unsigned long tile_width;
276 /* If untiled, then just align it so that we can do rendering
277 * to it with the 3D engine.
279 if (*tiling_mode == I915_TILING_NONE)
280 return ALIGN(pitch, 64);
282 if (*tiling_mode == I915_TILING_X
283 || (IS_915(bufmgr_gem) && *tiling_mode == I915_TILING_Y))
288 /* 965 is flexible */
289 if (bufmgr_gem->gen >= 4)
290 return ROUND_UP_TO(pitch, tile_width);
292 /* The older hardware has a maximum pitch of 8192 with tiled
293 * surfaces, so fallback to untiled if it's too large.
296 *tiling_mode = I915_TILING_NONE;
297 return ALIGN(pitch, 64);
300 /* Pre-965 needs power of two tile width */
301 for (i = tile_width; i < pitch; i <<= 1)
307 static struct drm_intel_gem_bo_bucket *
308 drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
313 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
314 struct drm_intel_gem_bo_bucket *bucket =
315 &bufmgr_gem->cache_bucket[i];
316 if (bucket->size >= size) {
325 drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
329 for (i = 0; i < bufmgr_gem->exec_count; i++) {
330 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
331 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
333 if (bo_gem->relocs == NULL) {
334 DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle,
339 for (j = 0; j < bo_gem->reloc_count; j++) {
340 drm_intel_bo *target_bo = bo_gem->reloc_target_info[j].bo;
341 drm_intel_bo_gem *target_gem =
342 (drm_intel_bo_gem *) target_bo;
344 DBG("%2d: %d (%s)@0x%08llx -> "
345 "%d (%s)@0x%08lx + 0x%08x\n",
347 bo_gem->gem_handle, bo_gem->name,
348 (unsigned long long)bo_gem->relocs[j].offset,
349 target_gem->gem_handle,
352 bo_gem->relocs[j].delta);
358 drm_intel_gem_bo_reference(drm_intel_bo *bo)
360 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
362 atomic_inc(&bo_gem->refcount);
366 * Adds the given buffer to the list of buffers to be validated (moved into the
367 * appropriate memory type) with the next batch submission.
369 * If a buffer is validated multiple times in a batch submission, it ends up
370 * with the intersection of the memory type flags and the union of the
374 drm_intel_add_validate_buffer(drm_intel_bo *bo)
376 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
377 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
380 if (bo_gem->validate_index != -1)
383 /* Extend the array of validation entries as necessary. */
384 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
385 int new_size = bufmgr_gem->exec_size * 2;
390 bufmgr_gem->exec_objects =
391 realloc(bufmgr_gem->exec_objects,
392 sizeof(*bufmgr_gem->exec_objects) * new_size);
393 bufmgr_gem->exec_bos =
394 realloc(bufmgr_gem->exec_bos,
395 sizeof(*bufmgr_gem->exec_bos) * new_size);
396 bufmgr_gem->exec_size = new_size;
399 index = bufmgr_gem->exec_count;
400 bo_gem->validate_index = index;
401 /* Fill in array entry */
402 bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
403 bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
404 bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs;
405 bufmgr_gem->exec_objects[index].alignment = 0;
406 bufmgr_gem->exec_objects[index].offset = 0;
407 bufmgr_gem->exec_bos[index] = bo;
408 bufmgr_gem->exec_count++;
412 drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence)
414 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
415 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
418 if (bo_gem->validate_index != -1) {
420 bufmgr_gem->exec2_objects[bo_gem->validate_index].flags |=
421 EXEC_OBJECT_NEEDS_FENCE;
425 /* Extend the array of validation entries as necessary. */
426 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
427 int new_size = bufmgr_gem->exec_size * 2;
432 bufmgr_gem->exec2_objects =
433 realloc(bufmgr_gem->exec2_objects,
434 sizeof(*bufmgr_gem->exec2_objects) * new_size);
435 bufmgr_gem->exec_bos =
436 realloc(bufmgr_gem->exec_bos,
437 sizeof(*bufmgr_gem->exec_bos) * new_size);
438 bufmgr_gem->exec_size = new_size;
441 index = bufmgr_gem->exec_count;
442 bo_gem->validate_index = index;
443 /* Fill in array entry */
444 bufmgr_gem->exec2_objects[index].handle = bo_gem->gem_handle;
445 bufmgr_gem->exec2_objects[index].relocation_count = bo_gem->reloc_count;
446 bufmgr_gem->exec2_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
447 bufmgr_gem->exec2_objects[index].alignment = 0;
448 bufmgr_gem->exec2_objects[index].offset = 0;
449 bufmgr_gem->exec_bos[index] = bo;
450 bufmgr_gem->exec2_objects[index].flags = 0;
451 bufmgr_gem->exec2_objects[index].rsvd1 = 0;
452 bufmgr_gem->exec2_objects[index].rsvd2 = 0;
454 bufmgr_gem->exec2_objects[index].flags |=
455 EXEC_OBJECT_NEEDS_FENCE;
457 bufmgr_gem->exec_count++;
460 #define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
464 drm_intel_bo_gem_set_in_aperture_size(drm_intel_bufmgr_gem *bufmgr_gem,
465 drm_intel_bo_gem *bo_gem)
469 assert(!bo_gem->used_as_reloc_target);
471 /* The older chipsets are far-less flexible in terms of tiling,
472 * and require tiled buffer to be size aligned in the aperture.
473 * This means that in the worst possible case we will need a hole
474 * twice as large as the object in order for it to fit into the
475 * aperture. Optimal packing is for wimps.
477 size = bo_gem->bo.size;
478 if (bufmgr_gem->gen < 4 && bo_gem->tiling_mode != I915_TILING_NONE) {
481 if (bufmgr_gem->has_relaxed_fencing) {
482 if (bufmgr_gem->gen == 3)
483 min_size = 1024*1024;
487 while (min_size < size)
492 /* Account for worst-case alignment. */
496 bo_gem->reloc_tree_size = size;
500 drm_intel_setup_reloc_list(drm_intel_bo *bo)
502 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
503 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
504 unsigned int max_relocs = bufmgr_gem->max_relocs;
506 if (bo->size / 4 < max_relocs)
507 max_relocs = bo->size / 4;
509 bo_gem->relocs = malloc(max_relocs *
510 sizeof(struct drm_i915_gem_relocation_entry));
511 bo_gem->reloc_target_info = malloc(max_relocs *
512 sizeof(drm_intel_reloc_target));
513 if (bo_gem->relocs == NULL || bo_gem->reloc_target_info == NULL) {
514 bo_gem->has_error = true;
516 free (bo_gem->relocs);
517 bo_gem->relocs = NULL;
519 free (bo_gem->reloc_target_info);
520 bo_gem->reloc_target_info = NULL;
529 drm_intel_gem_bo_busy(drm_intel_bo *bo)
531 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
532 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
533 struct drm_i915_gem_busy busy;
536 memset(&busy, 0, sizeof(busy));
537 busy.handle = bo_gem->gem_handle;
539 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
541 return (ret == 0 && busy.busy);
545 drm_intel_gem_bo_madvise_internal(drm_intel_bufmgr_gem *bufmgr_gem,
546 drm_intel_bo_gem *bo_gem, int state)
548 struct drm_i915_gem_madvise madv;
550 madv.handle = bo_gem->gem_handle;
553 drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
555 return madv.retained;
559 drm_intel_gem_bo_madvise(drm_intel_bo *bo, int madv)
561 return drm_intel_gem_bo_madvise_internal
562 ((drm_intel_bufmgr_gem *) bo->bufmgr,
563 (drm_intel_bo_gem *) bo,
567 /* drop the oldest entries that have been purged by the kernel */
569 drm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem *bufmgr_gem,
570 struct drm_intel_gem_bo_bucket *bucket)
572 while (!DRMLISTEMPTY(&bucket->head)) {
573 drm_intel_bo_gem *bo_gem;
575 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
576 bucket->head.next, head);
577 if (drm_intel_gem_bo_madvise_internal
578 (bufmgr_gem, bo_gem, I915_MADV_DONTNEED))
581 DRMLISTDEL(&bo_gem->head);
582 drm_intel_gem_bo_free(&bo_gem->bo);
586 static drm_intel_bo *
587 drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
591 uint32_t tiling_mode,
592 unsigned long stride)
594 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
595 drm_intel_bo_gem *bo_gem;
596 unsigned int page_size = getpagesize();
598 struct drm_intel_gem_bo_bucket *bucket;
599 bool alloc_from_cache;
600 unsigned long bo_size;
601 bool for_render = false;
603 if (flags & BO_ALLOC_FOR_RENDER)
606 /* Round the allocated size up to a power of two number of pages. */
607 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size);
609 /* If we don't have caching at this size, don't actually round the
612 if (bucket == NULL) {
614 if (bo_size < page_size)
617 bo_size = bucket->size;
620 pthread_mutex_lock(&bufmgr_gem->lock);
621 /* Get a buffer out of the cache if available */
623 alloc_from_cache = false;
624 if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) {
626 /* Allocate new render-target BOs from the tail (MRU)
627 * of the list, as it will likely be hot in the GPU
628 * cache and in the aperture for us.
630 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
631 bucket->head.prev, head);
632 DRMLISTDEL(&bo_gem->head);
633 alloc_from_cache = true;
635 /* For non-render-target BOs (where we're probably
636 * going to map it first thing in order to fill it
637 * with data), check if the last BO in the cache is
638 * unbusy, and only reuse in that case. Otherwise,
639 * allocating a new buffer is probably faster than
640 * waiting for the GPU to finish.
642 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
643 bucket->head.next, head);
644 if (!drm_intel_gem_bo_busy(&bo_gem->bo)) {
645 alloc_from_cache = true;
646 DRMLISTDEL(&bo_gem->head);
650 if (alloc_from_cache) {
651 if (!drm_intel_gem_bo_madvise_internal
652 (bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) {
653 drm_intel_gem_bo_free(&bo_gem->bo);
654 drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem,
659 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
662 drm_intel_gem_bo_free(&bo_gem->bo);
667 pthread_mutex_unlock(&bufmgr_gem->lock);
669 if (!alloc_from_cache) {
670 struct drm_i915_gem_create create;
672 bo_gem = calloc(1, sizeof(*bo_gem));
676 bo_gem->bo.size = bo_size;
677 memset(&create, 0, sizeof(create));
678 create.size = bo_size;
680 ret = drmIoctl(bufmgr_gem->fd,
681 DRM_IOCTL_I915_GEM_CREATE,
683 bo_gem->gem_handle = create.handle;
684 bo_gem->bo.handle = bo_gem->gem_handle;
689 bo_gem->bo.bufmgr = bufmgr;
691 bo_gem->tiling_mode = I915_TILING_NONE;
692 bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
695 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
698 drm_intel_gem_bo_free(&bo_gem->bo);
702 DRMINITLISTHEAD(&bo_gem->name_list);
706 atomic_set(&bo_gem->refcount, 1);
707 bo_gem->validate_index = -1;
708 bo_gem->reloc_tree_fences = 0;
709 bo_gem->used_as_reloc_target = false;
710 bo_gem->has_error = false;
711 bo_gem->reusable = true;
713 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
715 DBG("bo_create: buf %d (%s) %ldb\n",
716 bo_gem->gem_handle, bo_gem->name, size);
721 static drm_intel_bo *
722 drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
725 unsigned int alignment)
727 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size,
729 I915_TILING_NONE, 0);
732 static drm_intel_bo *
733 drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr,
736 unsigned int alignment)
738 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0,
739 I915_TILING_NONE, 0);
742 static drm_intel_bo *
743 drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
744 int x, int y, int cpp, uint32_t *tiling_mode,
745 unsigned long *pitch, unsigned long flags)
747 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
748 unsigned long size, stride;
752 unsigned long aligned_y, height_alignment;
754 tiling = *tiling_mode;
756 /* If we're tiled, our allocations are in 8 or 32-row blocks,
757 * so failure to align our height means that we won't allocate
760 * If we're untiled, we still have to align to 2 rows high
761 * because the data port accesses 2x2 blocks even if the
762 * bottom row isn't to be rendered, so failure to align means
763 * we could walk off the end of the GTT and fault. This is
764 * documented on 965, and may be the case on older chipsets
765 * too so we try to be careful.
768 height_alignment = 2;
770 if (IS_GEN2(bufmgr_gem) && tiling != I915_TILING_NONE)
771 height_alignment = 16;
772 else if (tiling == I915_TILING_X
773 || (IS_915(bufmgr_gem) && tiling == I915_TILING_Y))
774 height_alignment = 8;
775 else if (tiling == I915_TILING_Y)
776 height_alignment = 32;
777 aligned_y = ALIGN(y, height_alignment);
780 stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, tiling_mode);
781 size = stride * aligned_y;
782 size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, tiling_mode);
783 } while (*tiling_mode != tiling);
786 if (tiling == I915_TILING_NONE)
789 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags,
794 * Returns a drm_intel_bo wrapping the given buffer object handle.
796 * This can be used when one application needs to pass a buffer object
800 drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
804 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
805 drm_intel_bo_gem *bo_gem;
807 struct drm_gem_open open_arg;
808 struct drm_i915_gem_get_tiling get_tiling;
811 /* At the moment most applications only have a few named bo.
812 * For instance, in a DRI client only the render buffers passed
813 * between X and the client are named. And since X returns the
814 * alternating names for the front/back buffer a linear search
815 * provides a sufficiently fast match.
817 for (list = bufmgr_gem->named.next;
818 list != &bufmgr_gem->named;
820 bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
821 if (bo_gem->global_name == handle) {
822 drm_intel_gem_bo_reference(&bo_gem->bo);
827 bo_gem = calloc(1, sizeof(*bo_gem));
831 memset(&open_arg, 0, sizeof(open_arg));
832 open_arg.name = handle;
833 ret = drmIoctl(bufmgr_gem->fd,
837 DBG("Couldn't reference %s handle 0x%08x: %s\n",
838 name, handle, strerror(errno));
842 bo_gem->bo.size = open_arg.size;
843 bo_gem->bo.offset = 0;
844 bo_gem->bo.virtual = NULL;
845 bo_gem->bo.bufmgr = bufmgr;
847 atomic_set(&bo_gem->refcount, 1);
848 bo_gem->validate_index = -1;
849 bo_gem->gem_handle = open_arg.handle;
850 bo_gem->bo.handle = open_arg.handle;
851 bo_gem->global_name = handle;
852 bo_gem->reusable = false;
854 memset(&get_tiling, 0, sizeof(get_tiling));
855 get_tiling.handle = bo_gem->gem_handle;
856 ret = drmIoctl(bufmgr_gem->fd,
857 DRM_IOCTL_I915_GEM_GET_TILING,
860 drm_intel_gem_bo_unreference(&bo_gem->bo);
863 bo_gem->tiling_mode = get_tiling.tiling_mode;
864 bo_gem->swizzle_mode = get_tiling.swizzle_mode;
865 /* XXX stride is unknown */
866 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
868 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
869 DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
875 drm_intel_gem_bo_free(drm_intel_bo *bo)
877 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
878 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
879 struct drm_gem_close close;
882 if (bo_gem->mem_virtual)
883 munmap(bo_gem->mem_virtual, bo_gem->bo.size);
884 if (bo_gem->gtt_virtual)
885 munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
887 /* Close this object */
888 memset(&close, 0, sizeof(close));
889 close.handle = bo_gem->gem_handle;
890 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
892 DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
893 bo_gem->gem_handle, bo_gem->name, strerror(errno));
898 /** Frees all cached buffers significantly older than @time. */
900 drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
904 if (bufmgr_gem->time == time)
907 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
908 struct drm_intel_gem_bo_bucket *bucket =
909 &bufmgr_gem->cache_bucket[i];
911 while (!DRMLISTEMPTY(&bucket->head)) {
912 drm_intel_bo_gem *bo_gem;
914 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
915 bucket->head.next, head);
916 if (time - bo_gem->free_time <= 1)
919 DRMLISTDEL(&bo_gem->head);
921 drm_intel_gem_bo_free(&bo_gem->bo);
925 bufmgr_gem->time = time;
929 drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
931 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
932 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
933 struct drm_intel_gem_bo_bucket *bucket;
936 /* Unreference all the target buffers */
937 for (i = 0; i < bo_gem->reloc_count; i++) {
938 if (bo_gem->reloc_target_info[i].bo != bo) {
939 drm_intel_gem_bo_unreference_locked_timed(bo_gem->
940 reloc_target_info[i].bo,
944 bo_gem->reloc_count = 0;
945 bo_gem->used_as_reloc_target = false;
947 DBG("bo_unreference final: %d (%s)\n",
948 bo_gem->gem_handle, bo_gem->name);
950 /* release memory associated with this object */
951 if (bo_gem->reloc_target_info) {
952 free(bo_gem->reloc_target_info);
953 bo_gem->reloc_target_info = NULL;
955 if (bo_gem->relocs) {
956 free(bo_gem->relocs);
957 bo_gem->relocs = NULL;
960 DRMLISTDEL(&bo_gem->name_list);
962 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
963 /* Put the buffer into our internal cache for reuse if we can. */
964 if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
965 drm_intel_gem_bo_madvise_internal(bufmgr_gem, bo_gem,
966 I915_MADV_DONTNEED)) {
967 bo_gem->free_time = time;
970 bo_gem->validate_index = -1;
972 DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
974 drm_intel_gem_bo_free(bo);
978 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
981 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
983 assert(atomic_read(&bo_gem->refcount) > 0);
984 if (atomic_dec_and_test(&bo_gem->refcount))
985 drm_intel_gem_bo_unreference_final(bo, time);
988 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
990 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
992 assert(atomic_read(&bo_gem->refcount) > 0);
993 if (atomic_dec_and_test(&bo_gem->refcount)) {
994 drm_intel_bufmgr_gem *bufmgr_gem =
995 (drm_intel_bufmgr_gem *) bo->bufmgr;
996 struct timespec time;
998 clock_gettime(CLOCK_MONOTONIC, &time);
1000 pthread_mutex_lock(&bufmgr_gem->lock);
1001 drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
1002 drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
1003 pthread_mutex_unlock(&bufmgr_gem->lock);
1007 static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
1009 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1010 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1011 struct drm_i915_gem_set_domain set_domain;
1014 pthread_mutex_lock(&bufmgr_gem->lock);
1016 if (!bo_gem->mem_virtual) {
1017 struct drm_i915_gem_mmap mmap_arg;
1019 DBG("bo_map: %d (%s)\n", bo_gem->gem_handle, bo_gem->name);
1021 memset(&mmap_arg, 0, sizeof(mmap_arg));
1022 mmap_arg.handle = bo_gem->gem_handle;
1023 mmap_arg.offset = 0;
1024 mmap_arg.size = bo->size;
1025 ret = drmIoctl(bufmgr_gem->fd,
1026 DRM_IOCTL_I915_GEM_MMAP,
1030 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1031 __FILE__, __LINE__, bo_gem->gem_handle,
1032 bo_gem->name, strerror(errno));
1033 pthread_mutex_unlock(&bufmgr_gem->lock);
1036 bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
1038 DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1039 bo_gem->mem_virtual);
1040 bo->virtual = bo_gem->mem_virtual;
1042 set_domain.handle = bo_gem->gem_handle;
1043 set_domain.read_domains = I915_GEM_DOMAIN_CPU;
1045 set_domain.write_domain = I915_GEM_DOMAIN_CPU;
1047 set_domain.write_domain = 0;
1048 ret = drmIoctl(bufmgr_gem->fd,
1049 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1052 DBG("%s:%d: Error setting to CPU domain %d: %s\n",
1053 __FILE__, __LINE__, bo_gem->gem_handle,
1058 bo_gem->mapped_cpu_write = true;
1060 pthread_mutex_unlock(&bufmgr_gem->lock);
1067 extern void* __mmap2(void*, size_t, int, int, int, size_t);
1069 #define MMAP2_SHIFT 12
1070 static void* android_mmap2(void *addr, size_t size, int prot, int flags, int fd, unsigned long long offset)
1072 if ( offset & ((1UL << MMAP2_SHIFT)-1) ) {
1077 return __mmap2(addr, size, prot, flags, fd, (size_t)(offset >> MMAP2_SHIFT));
1080 #define mmap(addr, size, prot, flags, fd, offset) android_mmap2(addr, size, prot, flags, fd, offset)
1082 #endif /* ANDROID */
1084 int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
1086 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1087 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1088 struct drm_i915_gem_set_domain set_domain;
1091 pthread_mutex_lock(&bufmgr_gem->lock);
1093 /* Get a mapping of the buffer if we haven't before. */
1094 if (bo_gem->gtt_virtual == NULL) {
1095 struct drm_i915_gem_mmap_gtt mmap_arg;
1097 DBG("bo_map_gtt: mmap %d (%s)\n", bo_gem->gem_handle,
1100 memset(&mmap_arg, 0, sizeof(mmap_arg));
1101 mmap_arg.handle = bo_gem->gem_handle;
1103 /* Get the fake offset back... */
1104 ret = drmIoctl(bufmgr_gem->fd,
1105 DRM_IOCTL_I915_GEM_MMAP_GTT,
1109 DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
1111 bo_gem->gem_handle, bo_gem->name,
1113 pthread_mutex_unlock(&bufmgr_gem->lock);
1118 bo_gem->gtt_virtual = mmap(0, bo->size, PROT_READ | PROT_WRITE,
1119 MAP_SHARED, bufmgr_gem->fd,
1121 if (bo_gem->gtt_virtual == MAP_FAILED) {
1122 bo_gem->gtt_virtual = NULL;
1124 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1126 bo_gem->gem_handle, bo_gem->name,
1128 pthread_mutex_unlock(&bufmgr_gem->lock);
1133 bo->virtual = bo_gem->gtt_virtual;
1135 DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1136 bo_gem->gtt_virtual);
1138 /* Now move it to the GTT domain so that the CPU caches are flushed */
1139 set_domain.handle = bo_gem->gem_handle;
1140 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1141 set_domain.write_domain = I915_GEM_DOMAIN_GTT;
1142 ret = drmIoctl(bufmgr_gem->fd,
1143 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1146 DBG("%s:%d: Error setting domain %d: %s\n",
1147 __FILE__, __LINE__, bo_gem->gem_handle,
1151 pthread_mutex_unlock(&bufmgr_gem->lock);
1156 static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
1158 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1159 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1160 struct drm_i915_gem_sw_finish sw_finish;
1166 pthread_mutex_lock(&bufmgr_gem->lock);
1168 if (bo_gem->mapped_cpu_write) {
1169 /* Cause a flush to happen if the buffer's pinned for
1170 * scanout, so the results show up in a timely manner.
1171 * Unlike GTT set domains, this only does work if the
1172 * buffer should be scanout-related.
1174 sw_finish.handle = bo_gem->gem_handle;
1175 ret = drmIoctl(bufmgr_gem->fd,
1176 DRM_IOCTL_I915_GEM_SW_FINISH,
1178 ret = ret == -1 ? -errno : 0;
1180 bo_gem->mapped_cpu_write = false;
1184 pthread_mutex_unlock(&bufmgr_gem->lock);
1189 int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
1191 return drm_intel_gem_bo_unmap(bo);
1195 drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
1196 unsigned long size, const void *data)
1198 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1199 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1200 struct drm_i915_gem_pwrite pwrite;
1203 memset(&pwrite, 0, sizeof(pwrite));
1204 pwrite.handle = bo_gem->gem_handle;
1205 pwrite.offset = offset;
1207 pwrite.data_ptr = (uint64_t) (uintptr_t) data;
1208 ret = drmIoctl(bufmgr_gem->fd,
1209 DRM_IOCTL_I915_GEM_PWRITE,
1213 DBG("%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
1214 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1215 (int)size, strerror(errno));
1222 drm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
1224 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1225 struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id;
1228 get_pipe_from_crtc_id.crtc_id = crtc_id;
1229 ret = drmIoctl(bufmgr_gem->fd,
1230 DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID,
1231 &get_pipe_from_crtc_id);
1233 /* We return -1 here to signal that we don't
1234 * know which pipe is associated with this crtc.
1235 * This lets the caller know that this information
1236 * isn't available; using the wrong pipe for
1237 * vblank waiting can cause the chipset to lock up
1242 return get_pipe_from_crtc_id.pipe;
1246 drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
1247 unsigned long size, void *data)
1249 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1250 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1251 struct drm_i915_gem_pread pread;
1254 memset(&pread, 0, sizeof(pread));
1255 pread.handle = bo_gem->gem_handle;
1256 pread.offset = offset;
1258 pread.data_ptr = (uint64_t) (uintptr_t) data;
1259 ret = drmIoctl(bufmgr_gem->fd,
1260 DRM_IOCTL_I915_GEM_PREAD,
1264 DBG("%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
1265 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1266 (int)size, strerror(errno));
1272 /** Waits for all GPU rendering with the object to have completed. */
1274 drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo)
1276 drm_intel_gem_bo_start_gtt_access(bo, 1);
1280 * Sets the object to the GTT read and possibly write domain, used by the X
1281 * 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt().
1283 * In combination with drm_intel_gem_bo_pin() and manual fence management, we
1284 * can do tiled pixmaps this way.
1287 drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
1289 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1290 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1291 struct drm_i915_gem_set_domain set_domain;
1294 set_domain.handle = bo_gem->gem_handle;
1295 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1296 set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0;
1297 ret = drmIoctl(bufmgr_gem->fd,
1298 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1301 DBG("%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
1302 __FILE__, __LINE__, bo_gem->gem_handle,
1303 set_domain.read_domains, set_domain.write_domain,
1309 drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
1311 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1314 free(bufmgr_gem->exec2_objects);
1315 free(bufmgr_gem->exec_objects);
1316 free(bufmgr_gem->exec_bos);
1318 pthread_mutex_destroy(&bufmgr_gem->lock);
1320 /* Free any cached buffer objects we were going to reuse */
1321 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
1322 struct drm_intel_gem_bo_bucket *bucket =
1323 &bufmgr_gem->cache_bucket[i];
1324 drm_intel_bo_gem *bo_gem;
1326 while (!DRMLISTEMPTY(&bucket->head)) {
1327 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1328 bucket->head.next, head);
1329 DRMLISTDEL(&bo_gem->head);
1331 drm_intel_gem_bo_free(&bo_gem->bo);
1339 * Adds the target buffer to the validation list and adds the relocation
1340 * to the reloc_buffer's relocation list.
1342 * The relocation entry at the given offset must already contain the
1343 * precomputed relocation value, because the kernel will optimize out
1344 * the relocation entry write when the buffer hasn't moved from the
1345 * last known offset in target_bo.
1348 do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1349 drm_intel_bo *target_bo, uint32_t target_offset,
1350 uint32_t read_domains, uint32_t write_domain,
1353 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1354 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1355 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
1356 bool fenced_command;
1358 if (bo_gem->has_error)
1361 if (target_bo_gem->has_error) {
1362 bo_gem->has_error = true;
1366 /* We never use HW fences for rendering on 965+ */
1367 if (bufmgr_gem->gen >= 4)
1370 fenced_command = need_fence;
1371 if (target_bo_gem->tiling_mode == I915_TILING_NONE)
1374 /* Create a new relocation list if needed */
1375 if (bo_gem->relocs == NULL && drm_intel_setup_reloc_list(bo))
1378 /* Check overflow */
1379 assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
1382 assert(offset <= bo->size - 4);
1383 assert((write_domain & (write_domain - 1)) == 0);
1385 /* Make sure that we're not adding a reloc to something whose size has
1386 * already been accounted for.
1388 assert(!bo_gem->used_as_reloc_target);
1389 if (target_bo_gem != bo_gem) {
1390 target_bo_gem->used_as_reloc_target = true;
1391 bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
1393 /* An object needing a fence is a tiled buffer, so it won't have
1394 * relocs to other buffers.
1397 target_bo_gem->reloc_tree_fences = 1;
1398 bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
1400 bo_gem->relocs[bo_gem->reloc_count].offset = offset;
1401 bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
1402 bo_gem->relocs[bo_gem->reloc_count].target_handle =
1403 target_bo_gem->gem_handle;
1404 bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
1405 bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
1406 bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset;
1408 bo_gem->reloc_target_info[bo_gem->reloc_count].bo = target_bo;
1409 if (target_bo != bo)
1410 drm_intel_gem_bo_reference(target_bo);
1412 bo_gem->reloc_target_info[bo_gem->reloc_count].flags =
1413 DRM_INTEL_RELOC_FENCE;
1415 bo_gem->reloc_target_info[bo_gem->reloc_count].flags = 0;
1417 bo_gem->reloc_count++;
1423 drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1424 drm_intel_bo *target_bo, uint32_t target_offset,
1425 uint32_t read_domains, uint32_t write_domain)
1427 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
1429 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
1430 read_domains, write_domain,
1431 !bufmgr_gem->fenced_relocs);
1435 drm_intel_gem_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
1436 drm_intel_bo *target_bo,
1437 uint32_t target_offset,
1438 uint32_t read_domains, uint32_t write_domain)
1440 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
1441 read_domains, write_domain, true);
1445 drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo)
1447 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1449 return bo_gem->reloc_count;
1453 * Removes existing relocation entries in the BO after "start".
1455 * This allows a user to avoid a two-step process for state setup with
1456 * counting up all the buffer objects and doing a
1457 * drm_intel_bufmgr_check_aperture_space() before emitting any of the
1458 * relocations for the state setup. Instead, save the state of the
1459 * batchbuffer including drm_intel_gem_get_reloc_count(), emit all the
1460 * state, and then check if it still fits in the aperture.
1462 * Any further drm_intel_bufmgr_check_aperture_space() queries
1463 * involving this buffer in the tree are undefined after this call.
1466 drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start)
1468 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1470 struct timespec time;
1472 clock_gettime(CLOCK_MONOTONIC, &time);
1474 assert(bo_gem->reloc_count >= start);
1475 /* Unreference the cleared target buffers */
1476 for (i = start; i < bo_gem->reloc_count; i++) {
1477 if (bo_gem->reloc_target_info[i].bo != bo) {
1478 drm_intel_gem_bo_unreference_locked_timed(bo_gem->
1479 reloc_target_info[i].bo,
1483 bo_gem->reloc_count = start;
1487 * Walk the tree of relocations rooted at BO and accumulate the list of
1488 * validations to be performed and update the relocation buffers with
1489 * index values into the validation list.
1492 drm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
1494 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1497 if (bo_gem->relocs == NULL)
1500 for (i = 0; i < bo_gem->reloc_count; i++) {
1501 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
1503 if (target_bo == bo)
1506 /* Continue walking the tree depth-first. */
1507 drm_intel_gem_bo_process_reloc(target_bo);
1509 /* Add the target to the validate list */
1510 drm_intel_add_validate_buffer(target_bo);
1515 drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo)
1517 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1520 if (bo_gem->relocs == NULL)
1523 for (i = 0; i < bo_gem->reloc_count; i++) {
1524 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
1527 if (target_bo == bo)
1530 /* Continue walking the tree depth-first. */
1531 drm_intel_gem_bo_process_reloc2(target_bo);
1533 need_fence = (bo_gem->reloc_target_info[i].flags &
1534 DRM_INTEL_RELOC_FENCE);
1536 /* Add the target to the validate list */
1537 drm_intel_add_validate_buffer2(target_bo, need_fence);
1543 drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem)
1547 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1548 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1549 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1551 /* Update the buffer offset */
1552 if (bufmgr_gem->exec_objects[i].offset != bo->offset) {
1553 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
1554 bo_gem->gem_handle, bo_gem->name, bo->offset,
1555 (unsigned long long)bufmgr_gem->exec_objects[i].
1557 bo->offset = bufmgr_gem->exec_objects[i].offset;
1563 drm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem)
1567 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1568 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1569 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1571 /* Update the buffer offset */
1572 if (bufmgr_gem->exec2_objects[i].offset != bo->offset) {
1573 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
1574 bo_gem->gem_handle, bo_gem->name, bo->offset,
1575 (unsigned long long)bufmgr_gem->exec2_objects[i].offset);
1576 bo->offset = bufmgr_gem->exec2_objects[i].offset;
1582 drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
1583 drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
1585 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1586 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1587 struct drm_i915_gem_execbuffer execbuf;
1590 if (bo_gem->has_error)
1593 pthread_mutex_lock(&bufmgr_gem->lock);
1594 /* Update indices and set up the validate list. */
1595 drm_intel_gem_bo_process_reloc(bo);
1597 /* Add the batch buffer to the validation list. There are no
1598 * relocations pointing to it.
1600 drm_intel_add_validate_buffer(bo);
1602 execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects;
1603 execbuf.buffer_count = bufmgr_gem->exec_count;
1604 execbuf.batch_start_offset = 0;
1605 execbuf.batch_len = used;
1606 execbuf.cliprects_ptr = (uintptr_t) cliprects;
1607 execbuf.num_cliprects = num_cliprects;
1611 ret = drmIoctl(bufmgr_gem->fd,
1612 DRM_IOCTL_I915_GEM_EXECBUFFER,
1616 if (errno == ENOSPC) {
1617 DBG("Execbuffer fails to pin. "
1618 "Estimate: %u. Actual: %u. Available: %u\n",
1619 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
1622 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
1625 (unsigned int)bufmgr_gem->gtt_size);
1628 drm_intel_update_buffer_offsets(bufmgr_gem);
1630 if (bufmgr_gem->bufmgr.debug)
1631 drm_intel_gem_dump_validation_list(bufmgr_gem);
1633 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1634 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1635 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1637 /* Disconnect the buffer from the validate list */
1638 bo_gem->validate_index = -1;
1639 bufmgr_gem->exec_bos[i] = NULL;
1641 bufmgr_gem->exec_count = 0;
1642 pthread_mutex_unlock(&bufmgr_gem->lock);
1648 drm_intel_gem_bo_mrb_exec2(drm_intel_bo *bo, int used,
1649 drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
1652 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
1653 struct drm_i915_gem_execbuffer2 execbuf;
1656 switch (flags & 0x7) {
1660 if (!bufmgr_gem->has_blt)
1664 if (!bufmgr_gem->has_bsd)
1667 case I915_EXEC_RENDER:
1668 case I915_EXEC_DEFAULT:
1672 pthread_mutex_lock(&bufmgr_gem->lock);
1673 /* Update indices and set up the validate list. */
1674 drm_intel_gem_bo_process_reloc2(bo);
1676 /* Add the batch buffer to the validation list. There are no relocations
1679 drm_intel_add_validate_buffer2(bo, 0);
1681 execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec2_objects;
1682 execbuf.buffer_count = bufmgr_gem->exec_count;
1683 execbuf.batch_start_offset = 0;
1684 execbuf.batch_len = used;
1685 execbuf.cliprects_ptr = (uintptr_t)cliprects;
1686 execbuf.num_cliprects = num_cliprects;
1689 execbuf.flags = flags;
1693 ret = drmIoctl(bufmgr_gem->fd,
1694 DRM_IOCTL_I915_GEM_EXECBUFFER2,
1698 if (ret == -ENOSPC) {
1699 DBG("Execbuffer fails to pin. "
1700 "Estimate: %u. Actual: %u. Available: %u\n",
1701 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
1702 bufmgr_gem->exec_count),
1703 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
1704 bufmgr_gem->exec_count),
1705 (unsigned int) bufmgr_gem->gtt_size);
1708 drm_intel_update_buffer_offsets2(bufmgr_gem);
1710 if (bufmgr_gem->bufmgr.debug)
1711 drm_intel_gem_dump_validation_list(bufmgr_gem);
1713 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1714 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1715 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1717 /* Disconnect the buffer from the validate list */
1718 bo_gem->validate_index = -1;
1719 bufmgr_gem->exec_bos[i] = NULL;
1721 bufmgr_gem->exec_count = 0;
1722 pthread_mutex_unlock(&bufmgr_gem->lock);
1728 drm_intel_gem_bo_exec2(drm_intel_bo *bo, int used,
1729 drm_clip_rect_t *cliprects, int num_cliprects,
1732 return drm_intel_gem_bo_mrb_exec2(bo, used,
1733 cliprects, num_cliprects, DR4,
1738 drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
1740 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1741 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1742 struct drm_i915_gem_pin pin;
1745 memset(&pin, 0, sizeof(pin));
1746 pin.handle = bo_gem->gem_handle;
1747 pin.alignment = alignment;
1749 ret = drmIoctl(bufmgr_gem->fd,
1750 DRM_IOCTL_I915_GEM_PIN,
1755 bo->offset = pin.offset;
1760 drm_intel_gem_bo_unpin(drm_intel_bo *bo)
1762 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1763 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1764 struct drm_i915_gem_unpin unpin;
1767 memset(&unpin, 0, sizeof(unpin));
1768 unpin.handle = bo_gem->gem_handle;
1770 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
1778 drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
1779 uint32_t tiling_mode,
1782 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1783 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1784 struct drm_i915_gem_set_tiling set_tiling;
1787 if (bo_gem->global_name == 0 &&
1788 tiling_mode == bo_gem->tiling_mode &&
1789 stride == bo_gem->stride)
1792 memset(&set_tiling, 0, sizeof(set_tiling));
1794 /* set_tiling is slightly broken and overwrites the
1795 * input on the error path, so we have to open code
1798 set_tiling.handle = bo_gem->gem_handle;
1799 set_tiling.tiling_mode = tiling_mode;
1800 set_tiling.stride = stride;
1802 ret = ioctl(bufmgr_gem->fd,
1803 DRM_IOCTL_I915_GEM_SET_TILING,
1805 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
1809 bo_gem->tiling_mode = set_tiling.tiling_mode;
1810 bo_gem->swizzle_mode = set_tiling.swizzle_mode;
1811 bo_gem->stride = set_tiling.stride;
1816 drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
1819 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1820 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1823 /* Linear buffers have no stride. By ensuring that we only ever use
1824 * stride 0 with linear buffers, we simplify our code.
1826 if (*tiling_mode == I915_TILING_NONE)
1829 ret = drm_intel_gem_bo_set_tiling_internal(bo, *tiling_mode, stride);
1831 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
1833 *tiling_mode = bo_gem->tiling_mode;
1838 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
1839 uint32_t * swizzle_mode)
1841 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1843 *tiling_mode = bo_gem->tiling_mode;
1844 *swizzle_mode = bo_gem->swizzle_mode;
1849 drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
1851 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1852 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1853 struct drm_gem_flink flink;
1856 if (!bo_gem->global_name) {
1857 memset(&flink, 0, sizeof(flink));
1858 flink.handle = bo_gem->gem_handle;
1860 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink);
1863 bo_gem->global_name = flink.name;
1864 bo_gem->reusable = false;
1866 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
1869 *name = bo_gem->global_name;
1874 * Enables unlimited caching of buffer objects for reuse.
1876 * This is potentially very memory expensive, as the cache at each bucket
1877 * size is only bounded by how many buffers of that size we've managed to have
1878 * in flight at once.
1881 drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
1883 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1885 bufmgr_gem->bo_reuse = true;
1889 * Enable use of fenced reloc type.
1891 * New code should enable this to avoid unnecessary fence register
1892 * allocation. If this option is not enabled, all relocs will have fence
1893 * register allocated.
1896 drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr)
1898 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
1900 if (bufmgr_gem->bufmgr.bo_exec == drm_intel_gem_bo_exec2)
1901 bufmgr_gem->fenced_relocs = true;
1905 * Return the additional aperture space required by the tree of buffer objects
1909 drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
1911 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1915 if (bo == NULL || bo_gem->included_in_check_aperture)
1919 bo_gem->included_in_check_aperture = true;
1921 for (i = 0; i < bo_gem->reloc_count; i++)
1923 drm_intel_gem_bo_get_aperture_space(bo_gem->
1924 reloc_target_info[i].bo);
1930 * Count the number of buffers in this list that need a fence reg
1932 * If the count is greater than the number of available regs, we'll have
1933 * to ask the caller to resubmit a batch with fewer tiled buffers.
1935 * This function over-counts if the same buffer is used multiple times.
1938 drm_intel_gem_total_fences(drm_intel_bo ** bo_array, int count)
1941 unsigned int total = 0;
1943 for (i = 0; i < count; i++) {
1944 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
1949 total += bo_gem->reloc_tree_fences;
1955 * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready
1956 * for the next drm_intel_bufmgr_check_aperture_space() call.
1959 drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo)
1961 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1964 if (bo == NULL || !bo_gem->included_in_check_aperture)
1967 bo_gem->included_in_check_aperture = false;
1969 for (i = 0; i < bo_gem->reloc_count; i++)
1970 drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->
1971 reloc_target_info[i].bo);
1975 * Return a conservative estimate for the amount of aperture required
1976 * for a collection of buffers. This may double-count some buffers.
1979 drm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count)
1982 unsigned int total = 0;
1984 for (i = 0; i < count; i++) {
1985 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
1987 total += bo_gem->reloc_tree_size;
1993 * Return the amount of aperture needed for a collection of buffers.
1994 * This avoids double counting any buffers, at the cost of looking
1995 * at every buffer in the set.
1998 drm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count)
2001 unsigned int total = 0;
2003 for (i = 0; i < count; i++) {
2004 total += drm_intel_gem_bo_get_aperture_space(bo_array[i]);
2005 /* For the first buffer object in the array, we get an
2006 * accurate count back for its reloc_tree size (since nothing
2007 * had been flagged as being counted yet). We can save that
2008 * value out as a more conservative reloc_tree_size that
2009 * avoids double-counting target buffers. Since the first
2010 * buffer happens to usually be the batch buffer in our
2011 * callers, this can pull us back from doing the tree
2012 * walk on every new batch emit.
2015 drm_intel_bo_gem *bo_gem =
2016 (drm_intel_bo_gem *) bo_array[i];
2017 bo_gem->reloc_tree_size = total;
2021 for (i = 0; i < count; i++)
2022 drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]);
2027 * Return -1 if the batchbuffer should be flushed before attempting to
2028 * emit rendering referencing the buffers pointed to by bo_array.
2030 * This is required because if we try to emit a batchbuffer with relocations
2031 * to a tree of buffers that won't simultaneously fit in the aperture,
2032 * the rendering will return an error at a point where the software is not
2033 * prepared to recover from it.
2035 * However, we also want to emit the batchbuffer significantly before we reach
2036 * the limit, as a series of batchbuffers each of which references buffers
2037 * covering almost all of the aperture means that at each emit we end up
2038 * waiting to evict a buffer from the last rendering, and we get synchronous
2039 * performance. By emitting smaller batchbuffers, we eat some CPU overhead to
2040 * get better parallelism.
2043 drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
2045 drm_intel_bufmgr_gem *bufmgr_gem =
2046 (drm_intel_bufmgr_gem *) bo_array[0]->bufmgr;
2047 unsigned int total = 0;
2048 unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
2051 /* Check for fence reg constraints if necessary */
2052 if (bufmgr_gem->available_fences) {
2053 total_fences = drm_intel_gem_total_fences(bo_array, count);
2054 if (total_fences > bufmgr_gem->available_fences)
2058 total = drm_intel_gem_estimate_batch_space(bo_array, count);
2060 if (total > threshold)
2061 total = drm_intel_gem_compute_batch_space(bo_array, count);
2063 if (total > threshold) {
2064 DBG("check_space: overflowed available aperture, "
2066 total / 1024, (int)bufmgr_gem->gtt_size / 1024);
2069 DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024,
2070 (int)bufmgr_gem->gtt_size / 1024);
2076 * Disable buffer reuse for objects which are shared with the kernel
2077 * as scanout buffers
2080 drm_intel_gem_bo_disable_reuse(drm_intel_bo *bo)
2082 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2084 bo_gem->reusable = false;
2089 drm_intel_gem_bo_is_reusable(drm_intel_bo *bo)
2091 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2093 return bo_gem->reusable;
2097 _drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
2099 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2102 for (i = 0; i < bo_gem->reloc_count; i++) {
2103 if (bo_gem->reloc_target_info[i].bo == target_bo)
2105 if (bo == bo_gem->reloc_target_info[i].bo)
2107 if (_drm_intel_gem_bo_references(bo_gem->reloc_target_info[i].bo,
2115 /** Return true if target_bo is referenced by bo's relocation tree. */
2117 drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
2119 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
2121 if (bo == NULL || target_bo == NULL)
2123 if (target_bo_gem->used_as_reloc_target)
2124 return _drm_intel_gem_bo_references(bo, target_bo);
2129 add_bucket(drm_intel_bufmgr_gem *bufmgr_gem, int size)
2131 unsigned int i = bufmgr_gem->num_buckets;
2133 assert(i < ARRAY_SIZE(bufmgr_gem->cache_bucket));
2135 DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head);
2136 bufmgr_gem->cache_bucket[i].size = size;
2137 bufmgr_gem->num_buckets++;
2141 init_cache_buckets(drm_intel_bufmgr_gem *bufmgr_gem)
2143 unsigned long size, cache_max_size = 64 * 1024 * 1024;
2145 /* OK, so power of two buckets was too wasteful of memory.
2146 * Give 3 other sizes between each power of two, to hopefully
2147 * cover things accurately enough. (The alternative is
2148 * probably to just go for exact matching of sizes, and assume
2149 * that for things like composited window resize the tiled
2150 * width/height alignment and rounding of sizes to pages will
2151 * get us useful cache hit rates anyway)
2153 add_bucket(bufmgr_gem, 4096);
2154 add_bucket(bufmgr_gem, 4096 * 2);
2155 add_bucket(bufmgr_gem, 4096 * 3);
2157 /* Initialize the linked lists for BO reuse cache. */
2158 for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
2159 add_bucket(bufmgr_gem, size);
2161 add_bucket(bufmgr_gem, size + size * 1 / 4);
2162 add_bucket(bufmgr_gem, size + size * 2 / 4);
2163 add_bucket(bufmgr_gem, size + size * 3 / 4);
2168 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
2169 * and manage map buffer objections.
2171 * \param fd File descriptor of the opened DRM device.
2174 drm_intel_bufmgr_gem_init(int fd, int batch_size)
2176 drm_intel_bufmgr_gem *bufmgr_gem;
2177 struct drm_i915_gem_get_aperture aperture;
2178 drm_i915_getparam_t gp;
2182 bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
2183 if (bufmgr_gem == NULL)
2186 bufmgr_gem->fd = fd;
2188 if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
2193 ret = drmIoctl(bufmgr_gem->fd,
2194 DRM_IOCTL_I915_GEM_GET_APERTURE,
2198 bufmgr_gem->gtt_size = aperture.aper_available_size;
2200 fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
2202 bufmgr_gem->gtt_size = 128 * 1024 * 1024;
2203 fprintf(stderr, "Assuming %dkB available aperture size.\n"
2204 "May lead to reduced performance or incorrect "
2206 (int)bufmgr_gem->gtt_size / 1024);
2209 gp.param = I915_PARAM_CHIPSET_ID;
2210 gp.value = &bufmgr_gem->pci_device;
2211 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2213 fprintf(stderr, "get chip id failed: %d [%d]\n", ret, errno);
2214 fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
2217 if (IS_GEN2(bufmgr_gem))
2218 bufmgr_gem->gen = 2;
2219 else if (IS_GEN3(bufmgr_gem))
2220 bufmgr_gem->gen = 3;
2221 else if (IS_GEN4(bufmgr_gem))
2222 bufmgr_gem->gen = 4;
2224 bufmgr_gem->gen = 6;
2228 gp.param = I915_PARAM_HAS_EXECBUF2;
2229 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2233 gp.param = I915_PARAM_HAS_BSD;
2234 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2235 bufmgr_gem->has_bsd = ret == 0;
2237 gp.param = I915_PARAM_HAS_BLT;
2238 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2239 bufmgr_gem->has_blt = ret == 0;
2241 gp.param = I915_PARAM_HAS_RELAXED_FENCING;
2242 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2243 bufmgr_gem->has_relaxed_fencing = ret == 0;
2245 if (bufmgr_gem->gen < 4) {
2246 gp.param = I915_PARAM_NUM_FENCES_AVAIL;
2247 gp.value = &bufmgr_gem->available_fences;
2248 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2250 fprintf(stderr, "get fences failed: %d [%d]\n", ret,
2252 fprintf(stderr, "param: %d, val: %d\n", gp.param,
2254 bufmgr_gem->available_fences = 0;
2256 /* XXX The kernel reports the total number of fences,
2257 * including any that may be pinned.
2259 * We presume that there will be at least one pinned
2260 * fence for the scanout buffer, but there may be more
2261 * than one scanout and the user may be manually
2262 * pinning buffers. Let's move to execbuffer2 and
2263 * thereby forget the insanity of using fences...
2265 bufmgr_gem->available_fences -= 2;
2266 if (bufmgr_gem->available_fences < 0)
2267 bufmgr_gem->available_fences = 0;
2271 /* Let's go with one relocation per every 2 dwords (but round down a bit
2272 * since a power of two will mean an extra page allocation for the reloc
2275 * Every 4 was too few for the blender benchmark.
2277 bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
2279 bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
2280 bufmgr_gem->bufmgr.bo_alloc_for_render =
2281 drm_intel_gem_bo_alloc_for_render;
2282 bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled;
2283 bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
2284 bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
2285 bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
2286 bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap;
2287 bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata;
2288 bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata;
2289 bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering;
2290 bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc;
2291 bufmgr_gem->bufmgr.bo_emit_reloc_fence = drm_intel_gem_bo_emit_reloc_fence;
2292 bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin;
2293 bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin;
2294 bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
2295 bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
2296 bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
2297 /* Use the new one if available */
2299 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec2;
2300 bufmgr_gem->bufmgr.bo_mrb_exec = drm_intel_gem_bo_mrb_exec2;
2302 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
2303 bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
2304 bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
2305 bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy;
2306 bufmgr_gem->bufmgr.debug = 0;
2307 bufmgr_gem->bufmgr.check_aperture_space =
2308 drm_intel_gem_check_aperture_space;
2309 bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse;
2310 bufmgr_gem->bufmgr.bo_is_reusable = drm_intel_gem_bo_is_reusable;
2311 bufmgr_gem->bufmgr.get_pipe_from_crtc_id =
2312 drm_intel_gem_get_pipe_from_crtc_id;
2313 bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references;
2315 DRMINITLISTHEAD(&bufmgr_gem->named);
2316 init_cache_buckets(bufmgr_gem);
2318 return &bufmgr_gem->bufmgr;