1 /**************************************************************************
3 * Copyright © 2007 Red Hat Inc.
4 * Copyright © 2007-2012 Intel Corporation
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
29 **************************************************************************/
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
42 #include <xf86atomic.h>
50 #include <sys/ioctl.h>
53 #include <sys/types.h>
57 #include "libdrm_lists.h"
58 #include "intel_bufmgr.h"
59 #include "intel_bufmgr_priv.h"
60 #include "intel_chipset.h"
61 #include "intel_aub.h"
74 #define VG_CLEAR(s) VG(memset(&s, 0, sizeof(s)))
76 #define DBG(...) do { \
77 if (bufmgr_gem->bufmgr.debug) \
78 fprintf(stderr, __VA_ARGS__); \
81 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
83 typedef struct _drm_intel_bo_gem drm_intel_bo_gem;
85 struct drm_intel_gem_bo_bucket {
90 typedef struct _drm_intel_bufmgr_gem {
91 drm_intel_bufmgr bufmgr;
99 struct drm_i915_gem_exec_object *exec_objects;
100 struct drm_i915_gem_exec_object2 *exec2_objects;
101 drm_intel_bo **exec_bos;
105 /** Array of lists of cached gem objects of power-of-two sizes */
106 struct drm_intel_gem_bo_bucket cache_bucket[14 * 4];
111 drmMMListHead vma_cache;
112 int vma_count, vma_open, vma_max;
115 int available_fences;
118 unsigned int has_bsd : 1;
119 unsigned int has_blt : 1;
120 unsigned int has_relaxed_fencing : 1;
121 unsigned int has_llc : 1;
122 unsigned int bo_reuse : 1;
123 unsigned int no_exec : 1;
128 } drm_intel_bufmgr_gem;
130 #define DRM_INTEL_RELOC_FENCE (1<<0)
132 typedef struct _drm_intel_reloc_target_info {
135 } drm_intel_reloc_target;
137 struct _drm_intel_bo_gem {
145 * Kenel-assigned global name for this object
147 unsigned int global_name;
148 drmMMListHead name_list;
151 * Index of the buffer within the validation list while preparing a
152 * batchbuffer execution.
157 * Current tiling mode
159 uint32_t tiling_mode;
160 uint32_t swizzle_mode;
161 unsigned long stride;
165 /** Array passed to the DRM containing relocation information. */
166 struct drm_i915_gem_relocation_entry *relocs;
168 * Array of info structs corresponding to relocs[i].target_handle etc
170 drm_intel_reloc_target *reloc_target_info;
171 /** Number of entries in relocs */
173 /** Mapped address for the buffer, saved across map/unmap cycles */
175 /** GTT virtual address for the buffer, saved across map/unmap cycles */
178 drmMMListHead vma_list;
184 * Boolean of whether this BO and its children have been included in
185 * the current drm_intel_bufmgr_check_aperture_space() total.
187 bool included_in_check_aperture;
190 * Boolean of whether this buffer has been used as a relocation
191 * target and had its size accounted for, and thus can't have any
192 * further relocations added to it.
194 bool used_as_reloc_target;
197 * Boolean of whether we have encountered an error whilst building the relocation tree.
202 * Boolean of whether this buffer can be re-used
207 * Size in bytes of this buffer and its relocation descendents.
209 * Used to avoid costly tree walking in
210 * drm_intel_bufmgr_check_aperture in the common case.
215 * Number of potential fence registers required by this buffer and its
218 int reloc_tree_fences;
220 /** Flags that we may need to do the SW_FINSIH ioctl on unmap. */
221 bool mapped_cpu_write;
227 drm_intel_gem_estimate_batch_space(drm_intel_bo ** bo_array, int count);
230 drm_intel_gem_compute_batch_space(drm_intel_bo ** bo_array, int count);
233 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
234 uint32_t * swizzle_mode);
237 drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
238 uint32_t tiling_mode,
241 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
244 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo);
246 static void drm_intel_gem_bo_free(drm_intel_bo *bo);
249 drm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size,
250 uint32_t *tiling_mode)
252 unsigned long min_size, max_size;
255 if (*tiling_mode == I915_TILING_NONE)
258 /* 965+ just need multiples of page size for tiling */
259 if (bufmgr_gem->gen >= 4)
260 return ROUND_UP_TO(size, 4096);
262 /* Older chips need powers of two, of at least 512k or 1M */
263 if (bufmgr_gem->gen == 3) {
264 min_size = 1024*1024;
265 max_size = 128*1024*1024;
268 max_size = 64*1024*1024;
271 if (size > max_size) {
272 *tiling_mode = I915_TILING_NONE;
276 /* Do we need to allocate every page for the fence? */
277 if (bufmgr_gem->has_relaxed_fencing)
278 return ROUND_UP_TO(size, 4096);
280 for (i = min_size; i < size; i <<= 1)
287 * Round a given pitch up to the minimum required for X tiling on a
288 * given chip. We use 512 as the minimum to allow for a later tiling
292 drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem,
293 unsigned long pitch, uint32_t *tiling_mode)
295 unsigned long tile_width;
298 /* If untiled, then just align it so that we can do rendering
299 * to it with the 3D engine.
301 if (*tiling_mode == I915_TILING_NONE)
302 return ALIGN(pitch, 64);
304 if (*tiling_mode == I915_TILING_X
305 || (IS_915(bufmgr_gem->pci_device)
306 && *tiling_mode == I915_TILING_Y))
311 /* 965 is flexible */
312 if (bufmgr_gem->gen >= 4)
313 return ROUND_UP_TO(pitch, tile_width);
315 /* The older hardware has a maximum pitch of 8192 with tiled
316 * surfaces, so fallback to untiled if it's too large.
319 *tiling_mode = I915_TILING_NONE;
320 return ALIGN(pitch, 64);
323 /* Pre-965 needs power of two tile width */
324 for (i = tile_width; i < pitch; i <<= 1)
330 static struct drm_intel_gem_bo_bucket *
331 drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
336 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
337 struct drm_intel_gem_bo_bucket *bucket =
338 &bufmgr_gem->cache_bucket[i];
339 if (bucket->size >= size) {
348 drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
352 for (i = 0; i < bufmgr_gem->exec_count; i++) {
353 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
354 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
356 if (bo_gem->relocs == NULL) {
357 DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle,
362 for (j = 0; j < bo_gem->reloc_count; j++) {
363 drm_intel_bo *target_bo = bo_gem->reloc_target_info[j].bo;
364 drm_intel_bo_gem *target_gem =
365 (drm_intel_bo_gem *) target_bo;
367 DBG("%2d: %d (%s)@0x%08llx -> "
368 "%d (%s)@0x%08lx + 0x%08x\n",
370 bo_gem->gem_handle, bo_gem->name,
371 (unsigned long long)bo_gem->relocs[j].offset,
372 target_gem->gem_handle,
375 bo_gem->relocs[j].delta);
381 drm_intel_gem_bo_reference(drm_intel_bo *bo)
383 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
385 atomic_inc(&bo_gem->refcount);
389 * Adds the given buffer to the list of buffers to be validated (moved into the
390 * appropriate memory type) with the next batch submission.
392 * If a buffer is validated multiple times in a batch submission, it ends up
393 * with the intersection of the memory type flags and the union of the
397 drm_intel_add_validate_buffer(drm_intel_bo *bo)
399 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
400 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
403 if (bo_gem->validate_index != -1)
406 /* Extend the array of validation entries as necessary. */
407 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
408 int new_size = bufmgr_gem->exec_size * 2;
413 bufmgr_gem->exec_objects =
414 realloc(bufmgr_gem->exec_objects,
415 sizeof(*bufmgr_gem->exec_objects) * new_size);
416 bufmgr_gem->exec_bos =
417 realloc(bufmgr_gem->exec_bos,
418 sizeof(*bufmgr_gem->exec_bos) * new_size);
419 bufmgr_gem->exec_size = new_size;
422 index = bufmgr_gem->exec_count;
423 bo_gem->validate_index = index;
424 /* Fill in array entry */
425 bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
426 bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
427 bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs;
428 bufmgr_gem->exec_objects[index].alignment = 0;
429 bufmgr_gem->exec_objects[index].offset = 0;
430 bufmgr_gem->exec_bos[index] = bo;
431 bufmgr_gem->exec_count++;
435 drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence)
437 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
438 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
441 if (bo_gem->validate_index != -1) {
443 bufmgr_gem->exec2_objects[bo_gem->validate_index].flags |=
444 EXEC_OBJECT_NEEDS_FENCE;
448 /* Extend the array of validation entries as necessary. */
449 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
450 int new_size = bufmgr_gem->exec_size * 2;
455 bufmgr_gem->exec2_objects =
456 realloc(bufmgr_gem->exec2_objects,
457 sizeof(*bufmgr_gem->exec2_objects) * new_size);
458 bufmgr_gem->exec_bos =
459 realloc(bufmgr_gem->exec_bos,
460 sizeof(*bufmgr_gem->exec_bos) * new_size);
461 bufmgr_gem->exec_size = new_size;
464 index = bufmgr_gem->exec_count;
465 bo_gem->validate_index = index;
466 /* Fill in array entry */
467 bufmgr_gem->exec2_objects[index].handle = bo_gem->gem_handle;
468 bufmgr_gem->exec2_objects[index].relocation_count = bo_gem->reloc_count;
469 bufmgr_gem->exec2_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
470 bufmgr_gem->exec2_objects[index].alignment = 0;
471 bufmgr_gem->exec2_objects[index].offset = 0;
472 bufmgr_gem->exec_bos[index] = bo;
473 bufmgr_gem->exec2_objects[index].flags = 0;
474 bufmgr_gem->exec2_objects[index].rsvd1 = 0;
475 bufmgr_gem->exec2_objects[index].rsvd2 = 0;
477 bufmgr_gem->exec2_objects[index].flags |=
478 EXEC_OBJECT_NEEDS_FENCE;
480 bufmgr_gem->exec_count++;
483 #define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
487 drm_intel_bo_gem_set_in_aperture_size(drm_intel_bufmgr_gem *bufmgr_gem,
488 drm_intel_bo_gem *bo_gem)
492 assert(!bo_gem->used_as_reloc_target);
494 /* The older chipsets are far-less flexible in terms of tiling,
495 * and require tiled buffer to be size aligned in the aperture.
496 * This means that in the worst possible case we will need a hole
497 * twice as large as the object in order for it to fit into the
498 * aperture. Optimal packing is for wimps.
500 size = bo_gem->bo.size;
501 if (bufmgr_gem->gen < 4 && bo_gem->tiling_mode != I915_TILING_NONE) {
504 if (bufmgr_gem->has_relaxed_fencing) {
505 if (bufmgr_gem->gen == 3)
506 min_size = 1024*1024;
510 while (min_size < size)
515 /* Account for worst-case alignment. */
519 bo_gem->reloc_tree_size = size;
523 drm_intel_setup_reloc_list(drm_intel_bo *bo)
525 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
526 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
527 unsigned int max_relocs = bufmgr_gem->max_relocs;
529 if (bo->size / 4 < max_relocs)
530 max_relocs = bo->size / 4;
532 bo_gem->relocs = malloc(max_relocs *
533 sizeof(struct drm_i915_gem_relocation_entry));
534 bo_gem->reloc_target_info = malloc(max_relocs *
535 sizeof(drm_intel_reloc_target));
536 if (bo_gem->relocs == NULL || bo_gem->reloc_target_info == NULL) {
537 bo_gem->has_error = true;
539 free (bo_gem->relocs);
540 bo_gem->relocs = NULL;
542 free (bo_gem->reloc_target_info);
543 bo_gem->reloc_target_info = NULL;
552 drm_intel_gem_bo_busy(drm_intel_bo *bo)
554 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
555 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
556 struct drm_i915_gem_busy busy;
560 busy.handle = bo_gem->gem_handle;
562 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
564 return (ret == 0 && busy.busy);
568 drm_intel_gem_bo_madvise_internal(drm_intel_bufmgr_gem *bufmgr_gem,
569 drm_intel_bo_gem *bo_gem, int state)
571 struct drm_i915_gem_madvise madv;
574 madv.handle = bo_gem->gem_handle;
577 drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
579 return madv.retained;
583 drm_intel_gem_bo_madvise(drm_intel_bo *bo, int madv)
585 return drm_intel_gem_bo_madvise_internal
586 ((drm_intel_bufmgr_gem *) bo->bufmgr,
587 (drm_intel_bo_gem *) bo,
591 /* drop the oldest entries that have been purged by the kernel */
593 drm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem *bufmgr_gem,
594 struct drm_intel_gem_bo_bucket *bucket)
596 while (!DRMLISTEMPTY(&bucket->head)) {
597 drm_intel_bo_gem *bo_gem;
599 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
600 bucket->head.next, head);
601 if (drm_intel_gem_bo_madvise_internal
602 (bufmgr_gem, bo_gem, I915_MADV_DONTNEED))
605 DRMLISTDEL(&bo_gem->head);
606 drm_intel_gem_bo_free(&bo_gem->bo);
610 static drm_intel_bo *
611 drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
615 uint32_t tiling_mode,
616 unsigned long stride)
618 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
619 drm_intel_bo_gem *bo_gem;
620 unsigned int page_size = getpagesize();
622 struct drm_intel_gem_bo_bucket *bucket;
623 bool alloc_from_cache;
624 unsigned long bo_size;
625 bool for_render = false;
627 if (flags & BO_ALLOC_FOR_RENDER)
630 /* Round the allocated size up to a power of two number of pages. */
631 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size);
633 /* If we don't have caching at this size, don't actually round the
636 if (bucket == NULL) {
638 if (bo_size < page_size)
641 bo_size = bucket->size;
644 pthread_mutex_lock(&bufmgr_gem->lock);
645 /* Get a buffer out of the cache if available */
647 alloc_from_cache = false;
648 if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) {
650 /* Allocate new render-target BOs from the tail (MRU)
651 * of the list, as it will likely be hot in the GPU
652 * cache and in the aperture for us.
654 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
655 bucket->head.prev, head);
656 DRMLISTDEL(&bo_gem->head);
657 alloc_from_cache = true;
659 /* For non-render-target BOs (where we're probably
660 * going to map it first thing in order to fill it
661 * with data), check if the last BO in the cache is
662 * unbusy, and only reuse in that case. Otherwise,
663 * allocating a new buffer is probably faster than
664 * waiting for the GPU to finish.
666 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
667 bucket->head.next, head);
668 if (!drm_intel_gem_bo_busy(&bo_gem->bo)) {
669 alloc_from_cache = true;
670 DRMLISTDEL(&bo_gem->head);
674 if (alloc_from_cache) {
675 if (!drm_intel_gem_bo_madvise_internal
676 (bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) {
677 drm_intel_gem_bo_free(&bo_gem->bo);
678 drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem,
683 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
686 drm_intel_gem_bo_free(&bo_gem->bo);
691 pthread_mutex_unlock(&bufmgr_gem->lock);
693 if (!alloc_from_cache) {
694 struct drm_i915_gem_create create;
696 bo_gem = calloc(1, sizeof(*bo_gem));
700 bo_gem->bo.size = bo_size;
703 create.size = bo_size;
705 ret = drmIoctl(bufmgr_gem->fd,
706 DRM_IOCTL_I915_GEM_CREATE,
708 bo_gem->gem_handle = create.handle;
709 bo_gem->bo.handle = bo_gem->gem_handle;
714 bo_gem->bo.bufmgr = bufmgr;
716 bo_gem->tiling_mode = I915_TILING_NONE;
717 bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
720 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
723 drm_intel_gem_bo_free(&bo_gem->bo);
727 DRMINITLISTHEAD(&bo_gem->name_list);
728 DRMINITLISTHEAD(&bo_gem->vma_list);
732 atomic_set(&bo_gem->refcount, 1);
733 bo_gem->validate_index = -1;
734 bo_gem->reloc_tree_fences = 0;
735 bo_gem->used_as_reloc_target = false;
736 bo_gem->has_error = false;
737 bo_gem->reusable = true;
739 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
741 DBG("bo_create: buf %d (%s) %ldb\n",
742 bo_gem->gem_handle, bo_gem->name, size);
747 static drm_intel_bo *
748 drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
751 unsigned int alignment)
753 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size,
755 I915_TILING_NONE, 0);
758 static drm_intel_bo *
759 drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr,
762 unsigned int alignment)
764 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0,
765 I915_TILING_NONE, 0);
768 static drm_intel_bo *
769 drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
770 int x, int y, int cpp, uint32_t *tiling_mode,
771 unsigned long *pitch, unsigned long flags)
773 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
774 unsigned long size, stride;
778 unsigned long aligned_y, height_alignment;
780 tiling = *tiling_mode;
782 /* If we're tiled, our allocations are in 8 or 32-row blocks,
783 * so failure to align our height means that we won't allocate
786 * If we're untiled, we still have to align to 2 rows high
787 * because the data port accesses 2x2 blocks even if the
788 * bottom row isn't to be rendered, so failure to align means
789 * we could walk off the end of the GTT and fault. This is
790 * documented on 965, and may be the case on older chipsets
791 * too so we try to be careful.
794 height_alignment = 2;
796 if ((bufmgr_gem->gen == 2) && tiling != I915_TILING_NONE)
797 height_alignment = 16;
798 else if (tiling == I915_TILING_X
799 || (IS_915(bufmgr_gem->pci_device)
800 && tiling == I915_TILING_Y))
801 height_alignment = 8;
802 else if (tiling == I915_TILING_Y)
803 height_alignment = 32;
804 aligned_y = ALIGN(y, height_alignment);
807 stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, tiling_mode);
808 size = stride * aligned_y;
809 size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, tiling_mode);
810 } while (*tiling_mode != tiling);
813 if (tiling == I915_TILING_NONE)
816 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags,
821 * Returns a drm_intel_bo wrapping the given buffer object handle.
823 * This can be used when one application needs to pass a buffer object
827 drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
831 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
832 drm_intel_bo_gem *bo_gem;
834 struct drm_gem_open open_arg;
835 struct drm_i915_gem_get_tiling get_tiling;
838 /* At the moment most applications only have a few named bo.
839 * For instance, in a DRI client only the render buffers passed
840 * between X and the client are named. And since X returns the
841 * alternating names for the front/back buffer a linear search
842 * provides a sufficiently fast match.
844 for (list = bufmgr_gem->named.next;
845 list != &bufmgr_gem->named;
847 bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
848 if (bo_gem->global_name == handle) {
849 drm_intel_gem_bo_reference(&bo_gem->bo);
854 bo_gem = calloc(1, sizeof(*bo_gem));
859 open_arg.name = handle;
860 ret = drmIoctl(bufmgr_gem->fd,
864 DBG("Couldn't reference %s handle 0x%08x: %s\n",
865 name, handle, strerror(errno));
869 bo_gem->bo.size = open_arg.size;
870 bo_gem->bo.offset = 0;
871 bo_gem->bo.virtual = NULL;
872 bo_gem->bo.bufmgr = bufmgr;
874 atomic_set(&bo_gem->refcount, 1);
875 bo_gem->validate_index = -1;
876 bo_gem->gem_handle = open_arg.handle;
877 bo_gem->bo.handle = open_arg.handle;
878 bo_gem->global_name = handle;
879 bo_gem->reusable = false;
881 VG_CLEAR(get_tiling);
882 get_tiling.handle = bo_gem->gem_handle;
883 ret = drmIoctl(bufmgr_gem->fd,
884 DRM_IOCTL_I915_GEM_GET_TILING,
887 drm_intel_gem_bo_unreference(&bo_gem->bo);
890 bo_gem->tiling_mode = get_tiling.tiling_mode;
891 bo_gem->swizzle_mode = get_tiling.swizzle_mode;
892 /* XXX stride is unknown */
893 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
895 DRMINITLISTHEAD(&bo_gem->vma_list);
896 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
897 DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
903 drm_intel_gem_bo_free(drm_intel_bo *bo)
905 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
906 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
907 struct drm_gem_close close;
910 DRMLISTDEL(&bo_gem->vma_list);
911 if (bo_gem->mem_virtual) {
912 VG(VALGRIND_FREELIKE_BLOCK(bo_gem->mem_virtual, 0));
913 munmap(bo_gem->mem_virtual, bo_gem->bo.size);
914 bufmgr_gem->vma_count--;
916 if (bo_gem->gtt_virtual) {
917 munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
918 bufmgr_gem->vma_count--;
921 /* Close this object */
923 close.handle = bo_gem->gem_handle;
924 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
926 DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
927 bo_gem->gem_handle, bo_gem->name, strerror(errno));
933 drm_intel_gem_bo_mark_mmaps_incoherent(drm_intel_bo *bo)
936 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
938 if (bo_gem->mem_virtual)
939 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->mem_virtual, bo->size);
941 if (bo_gem->gtt_virtual)
942 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->gtt_virtual, bo->size);
946 /** Frees all cached buffers significantly older than @time. */
948 drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
952 if (bufmgr_gem->time == time)
955 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
956 struct drm_intel_gem_bo_bucket *bucket =
957 &bufmgr_gem->cache_bucket[i];
959 while (!DRMLISTEMPTY(&bucket->head)) {
960 drm_intel_bo_gem *bo_gem;
962 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
963 bucket->head.next, head);
964 if (time - bo_gem->free_time <= 1)
967 DRMLISTDEL(&bo_gem->head);
969 drm_intel_gem_bo_free(&bo_gem->bo);
973 bufmgr_gem->time = time;
976 static void drm_intel_gem_bo_purge_vma_cache(drm_intel_bufmgr_gem *bufmgr_gem)
980 DBG("%s: cached=%d, open=%d, limit=%d\n", __FUNCTION__,
981 bufmgr_gem->vma_count, bufmgr_gem->vma_open, bufmgr_gem->vma_max);
983 if (bufmgr_gem->vma_max < 0)
986 /* We may need to evict a few entries in order to create new mmaps */
987 limit = bufmgr_gem->vma_max - 2*bufmgr_gem->vma_open;
991 while (bufmgr_gem->vma_count > limit) {
992 drm_intel_bo_gem *bo_gem;
994 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
995 bufmgr_gem->vma_cache.next,
997 assert(bo_gem->map_count == 0);
998 DRMLISTDELINIT(&bo_gem->vma_list);
1000 if (bo_gem->mem_virtual) {
1001 munmap(bo_gem->mem_virtual, bo_gem->bo.size);
1002 bo_gem->mem_virtual = NULL;
1003 bufmgr_gem->vma_count--;
1005 if (bo_gem->gtt_virtual) {
1006 munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
1007 bo_gem->gtt_virtual = NULL;
1008 bufmgr_gem->vma_count--;
1013 static void drm_intel_gem_bo_close_vma(drm_intel_bufmgr_gem *bufmgr_gem,
1014 drm_intel_bo_gem *bo_gem)
1016 bufmgr_gem->vma_open--;
1017 DRMLISTADDTAIL(&bo_gem->vma_list, &bufmgr_gem->vma_cache);
1018 if (bo_gem->mem_virtual)
1019 bufmgr_gem->vma_count++;
1020 if (bo_gem->gtt_virtual)
1021 bufmgr_gem->vma_count++;
1022 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
1025 static void drm_intel_gem_bo_open_vma(drm_intel_bufmgr_gem *bufmgr_gem,
1026 drm_intel_bo_gem *bo_gem)
1028 bufmgr_gem->vma_open++;
1029 DRMLISTDEL(&bo_gem->vma_list);
1030 if (bo_gem->mem_virtual)
1031 bufmgr_gem->vma_count--;
1032 if (bo_gem->gtt_virtual)
1033 bufmgr_gem->vma_count--;
1034 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
1038 drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
1040 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1041 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1042 struct drm_intel_gem_bo_bucket *bucket;
1045 /* Unreference all the target buffers */
1046 for (i = 0; i < bo_gem->reloc_count; i++) {
1047 if (bo_gem->reloc_target_info[i].bo != bo) {
1048 drm_intel_gem_bo_unreference_locked_timed(bo_gem->
1049 reloc_target_info[i].bo,
1053 bo_gem->reloc_count = 0;
1054 bo_gem->used_as_reloc_target = false;
1056 DBG("bo_unreference final: %d (%s)\n",
1057 bo_gem->gem_handle, bo_gem->name);
1059 /* release memory associated with this object */
1060 if (bo_gem->reloc_target_info) {
1061 free(bo_gem->reloc_target_info);
1062 bo_gem->reloc_target_info = NULL;
1064 if (bo_gem->relocs) {
1065 free(bo_gem->relocs);
1066 bo_gem->relocs = NULL;
1069 /* Clear any left-over mappings */
1070 if (bo_gem->map_count) {
1071 DBG("bo freed with non-zero map-count %d\n", bo_gem->map_count);
1072 bo_gem->map_count = 0;
1073 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1074 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1077 DRMLISTDEL(&bo_gem->name_list);
1079 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
1080 /* Put the buffer into our internal cache for reuse if we can. */
1081 if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
1082 drm_intel_gem_bo_madvise_internal(bufmgr_gem, bo_gem,
1083 I915_MADV_DONTNEED)) {
1084 bo_gem->free_time = time;
1086 bo_gem->name = NULL;
1087 bo_gem->validate_index = -1;
1089 DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
1091 drm_intel_gem_bo_free(bo);
1095 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
1098 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1100 assert(atomic_read(&bo_gem->refcount) > 0);
1101 if (atomic_dec_and_test(&bo_gem->refcount))
1102 drm_intel_gem_bo_unreference_final(bo, time);
1105 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
1107 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1109 assert(atomic_read(&bo_gem->refcount) > 0);
1110 if (atomic_dec_and_test(&bo_gem->refcount)) {
1111 drm_intel_bufmgr_gem *bufmgr_gem =
1112 (drm_intel_bufmgr_gem *) bo->bufmgr;
1113 struct timespec time;
1115 clock_gettime(CLOCK_MONOTONIC, &time);
1117 pthread_mutex_lock(&bufmgr_gem->lock);
1118 drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
1119 drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
1120 pthread_mutex_unlock(&bufmgr_gem->lock);
1124 static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
1126 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1127 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1128 struct drm_i915_gem_set_domain set_domain;
1131 pthread_mutex_lock(&bufmgr_gem->lock);
1133 if (bo_gem->map_count++ == 0)
1134 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
1136 if (!bo_gem->mem_virtual) {
1137 struct drm_i915_gem_mmap mmap_arg;
1139 DBG("bo_map: %d (%s), map_count=%d\n",
1140 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
1143 mmap_arg.handle = bo_gem->gem_handle;
1144 mmap_arg.offset = 0;
1145 mmap_arg.size = bo->size;
1146 ret = drmIoctl(bufmgr_gem->fd,
1147 DRM_IOCTL_I915_GEM_MMAP,
1151 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1152 __FILE__, __LINE__, bo_gem->gem_handle,
1153 bo_gem->name, strerror(errno));
1154 if (--bo_gem->map_count == 0)
1155 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1156 pthread_mutex_unlock(&bufmgr_gem->lock);
1159 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
1160 bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
1162 DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1163 bo_gem->mem_virtual);
1164 bo->virtual = bo_gem->mem_virtual;
1166 VG_CLEAR(set_domain);
1167 set_domain.handle = bo_gem->gem_handle;
1168 set_domain.read_domains = I915_GEM_DOMAIN_CPU;
1170 set_domain.write_domain = I915_GEM_DOMAIN_CPU;
1172 set_domain.write_domain = 0;
1173 ret = drmIoctl(bufmgr_gem->fd,
1174 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1177 DBG("%s:%d: Error setting to CPU domain %d: %s\n",
1178 __FILE__, __LINE__, bo_gem->gem_handle,
1183 bo_gem->mapped_cpu_write = true;
1185 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1186 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->mem_virtual, bo->size));
1187 pthread_mutex_unlock(&bufmgr_gem->lock);
1193 map_gtt(drm_intel_bo *bo)
1195 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1196 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1199 if (bo_gem->map_count++ == 0)
1200 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
1202 /* Get a mapping of the buffer if we haven't before. */
1203 if (bo_gem->gtt_virtual == NULL) {
1204 struct drm_i915_gem_mmap_gtt mmap_arg;
1206 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
1207 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
1210 mmap_arg.handle = bo_gem->gem_handle;
1212 /* Get the fake offset back... */
1213 ret = drmIoctl(bufmgr_gem->fd,
1214 DRM_IOCTL_I915_GEM_MMAP_GTT,
1218 DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
1220 bo_gem->gem_handle, bo_gem->name,
1222 if (--bo_gem->map_count == 0)
1223 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1228 bo_gem->gtt_virtual = mmap(0, bo->size, PROT_READ | PROT_WRITE,
1229 MAP_SHARED, bufmgr_gem->fd,
1231 if (bo_gem->gtt_virtual == MAP_FAILED) {
1232 bo_gem->gtt_virtual = NULL;
1234 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1236 bo_gem->gem_handle, bo_gem->name,
1238 if (--bo_gem->map_count == 0)
1239 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1244 bo->virtual = bo_gem->gtt_virtual;
1246 DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1247 bo_gem->gtt_virtual);
1252 int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
1254 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1255 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1256 struct drm_i915_gem_set_domain set_domain;
1259 pthread_mutex_lock(&bufmgr_gem->lock);
1263 pthread_mutex_unlock(&bufmgr_gem->lock);
1267 /* Now move it to the GTT domain so that the GPU and CPU
1268 * caches are flushed and the GPU isn't actively using the
1271 * The pagefault handler does this domain change for us when
1272 * it has unbound the BO from the GTT, but it's up to us to
1273 * tell it when we're about to use things if we had done
1274 * rendering and it still happens to be bound to the GTT.
1276 VG_CLEAR(set_domain);
1277 set_domain.handle = bo_gem->gem_handle;
1278 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1279 set_domain.write_domain = I915_GEM_DOMAIN_GTT;
1280 ret = drmIoctl(bufmgr_gem->fd,
1281 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1284 DBG("%s:%d: Error setting domain %d: %s\n",
1285 __FILE__, __LINE__, bo_gem->gem_handle,
1289 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1290 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->gtt_virtual, bo->size));
1291 pthread_mutex_unlock(&bufmgr_gem->lock);
1297 * Performs a mapping of the buffer object like the normal GTT
1298 * mapping, but avoids waiting for the GPU to be done reading from or
1299 * rendering to the buffer.
1301 * This is used in the implementation of GL_ARB_map_buffer_range: The
1302 * user asks to create a buffer, then does a mapping, fills some
1303 * space, runs a drawing command, then asks to map it again without
1304 * synchronizing because it guarantees that it won't write over the
1305 * data that the GPU is busy using (or, more specifically, that if it
1306 * does write over the data, it acknowledges that rendering is
1310 int drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo)
1312 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1315 /* If the CPU cache isn't coherent with the GTT, then use a
1316 * regular synchronized mapping. The problem is that we don't
1317 * track where the buffer was last used on the CPU side in
1318 * terms of drm_intel_bo_map vs drm_intel_gem_bo_map_gtt, so
1319 * we would potentially corrupt the buffer even when the user
1320 * does reasonable things.
1322 if (!bufmgr_gem->has_llc)
1323 return drm_intel_gem_bo_map_gtt(bo);
1325 pthread_mutex_lock(&bufmgr_gem->lock);
1327 pthread_mutex_unlock(&bufmgr_gem->lock);
1332 static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
1334 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1335 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1341 pthread_mutex_lock(&bufmgr_gem->lock);
1343 if (bo_gem->map_count <= 0) {
1344 DBG("attempted to unmap an unmapped bo\n");
1345 pthread_mutex_unlock(&bufmgr_gem->lock);
1346 /* Preserve the old behaviour of just treating this as a
1347 * no-op rather than reporting the error.
1352 if (bo_gem->mapped_cpu_write) {
1353 struct drm_i915_gem_sw_finish sw_finish;
1355 /* Cause a flush to happen if the buffer's pinned for
1356 * scanout, so the results show up in a timely manner.
1357 * Unlike GTT set domains, this only does work if the
1358 * buffer should be scanout-related.
1360 VG_CLEAR(sw_finish);
1361 sw_finish.handle = bo_gem->gem_handle;
1362 ret = drmIoctl(bufmgr_gem->fd,
1363 DRM_IOCTL_I915_GEM_SW_FINISH,
1365 ret = ret == -1 ? -errno : 0;
1367 bo_gem->mapped_cpu_write = false;
1370 /* We need to unmap after every innovation as we cannot track
1371 * an open vma for every bo as that will exhaasut the system
1372 * limits and cause later failures.
1374 if (--bo_gem->map_count == 0) {
1375 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1376 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1379 pthread_mutex_unlock(&bufmgr_gem->lock);
1384 int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
1386 return drm_intel_gem_bo_unmap(bo);
1390 drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
1391 unsigned long size, const void *data)
1393 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1394 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1395 struct drm_i915_gem_pwrite pwrite;
1399 pwrite.handle = bo_gem->gem_handle;
1400 pwrite.offset = offset;
1402 pwrite.data_ptr = (uint64_t) (uintptr_t) data;
1403 ret = drmIoctl(bufmgr_gem->fd,
1404 DRM_IOCTL_I915_GEM_PWRITE,
1408 DBG("%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
1409 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1410 (int)size, strerror(errno));
1417 drm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
1419 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1420 struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id;
1423 VG_CLEAR(get_pipe_from_crtc_id);
1424 get_pipe_from_crtc_id.crtc_id = crtc_id;
1425 ret = drmIoctl(bufmgr_gem->fd,
1426 DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID,
1427 &get_pipe_from_crtc_id);
1429 /* We return -1 here to signal that we don't
1430 * know which pipe is associated with this crtc.
1431 * This lets the caller know that this information
1432 * isn't available; using the wrong pipe for
1433 * vblank waiting can cause the chipset to lock up
1438 return get_pipe_from_crtc_id.pipe;
1442 drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
1443 unsigned long size, void *data)
1445 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1446 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1447 struct drm_i915_gem_pread pread;
1451 pread.handle = bo_gem->gem_handle;
1452 pread.offset = offset;
1454 pread.data_ptr = (uint64_t) (uintptr_t) data;
1455 ret = drmIoctl(bufmgr_gem->fd,
1456 DRM_IOCTL_I915_GEM_PREAD,
1460 DBG("%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
1461 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1462 (int)size, strerror(errno));
1468 /** Waits for all GPU rendering with the object to have completed. */
1470 drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo)
1472 drm_intel_gem_bo_start_gtt_access(bo, 1);
1476 * Sets the object to the GTT read and possibly write domain, used by the X
1477 * 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt().
1479 * In combination with drm_intel_gem_bo_pin() and manual fence management, we
1480 * can do tiled pixmaps this way.
1483 drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
1485 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1486 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1487 struct drm_i915_gem_set_domain set_domain;
1490 VG_CLEAR(set_domain);
1491 set_domain.handle = bo_gem->gem_handle;
1492 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1493 set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0;
1494 ret = drmIoctl(bufmgr_gem->fd,
1495 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1498 DBG("%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
1499 __FILE__, __LINE__, bo_gem->gem_handle,
1500 set_domain.read_domains, set_domain.write_domain,
1506 drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
1508 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1511 free(bufmgr_gem->exec2_objects);
1512 free(bufmgr_gem->exec_objects);
1513 free(bufmgr_gem->exec_bos);
1515 pthread_mutex_destroy(&bufmgr_gem->lock);
1517 /* Free any cached buffer objects we were going to reuse */
1518 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
1519 struct drm_intel_gem_bo_bucket *bucket =
1520 &bufmgr_gem->cache_bucket[i];
1521 drm_intel_bo_gem *bo_gem;
1523 while (!DRMLISTEMPTY(&bucket->head)) {
1524 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1525 bucket->head.next, head);
1526 DRMLISTDEL(&bo_gem->head);
1528 drm_intel_gem_bo_free(&bo_gem->bo);
1536 * Adds the target buffer to the validation list and adds the relocation
1537 * to the reloc_buffer's relocation list.
1539 * The relocation entry at the given offset must already contain the
1540 * precomputed relocation value, because the kernel will optimize out
1541 * the relocation entry write when the buffer hasn't moved from the
1542 * last known offset in target_bo.
1545 do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1546 drm_intel_bo *target_bo, uint32_t target_offset,
1547 uint32_t read_domains, uint32_t write_domain,
1550 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1551 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1552 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
1553 bool fenced_command;
1555 if (bo_gem->has_error)
1558 if (target_bo_gem->has_error) {
1559 bo_gem->has_error = true;
1563 /* We never use HW fences for rendering on 965+ */
1564 if (bufmgr_gem->gen >= 4)
1567 fenced_command = need_fence;
1568 if (target_bo_gem->tiling_mode == I915_TILING_NONE)
1571 /* Create a new relocation list if needed */
1572 if (bo_gem->relocs == NULL && drm_intel_setup_reloc_list(bo))
1575 /* Check overflow */
1576 assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
1579 assert(offset <= bo->size - 4);
1580 assert((write_domain & (write_domain - 1)) == 0);
1582 /* Make sure that we're not adding a reloc to something whose size has
1583 * already been accounted for.
1585 assert(!bo_gem->used_as_reloc_target);
1586 if (target_bo_gem != bo_gem) {
1587 target_bo_gem->used_as_reloc_target = true;
1588 bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
1590 /* An object needing a fence is a tiled buffer, so it won't have
1591 * relocs to other buffers.
1594 target_bo_gem->reloc_tree_fences = 1;
1595 bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
1597 bo_gem->relocs[bo_gem->reloc_count].offset = offset;
1598 bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
1599 bo_gem->relocs[bo_gem->reloc_count].target_handle =
1600 target_bo_gem->gem_handle;
1601 bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
1602 bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
1603 bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset;
1605 bo_gem->reloc_target_info[bo_gem->reloc_count].bo = target_bo;
1606 if (target_bo != bo)
1607 drm_intel_gem_bo_reference(target_bo);
1609 bo_gem->reloc_target_info[bo_gem->reloc_count].flags =
1610 DRM_INTEL_RELOC_FENCE;
1612 bo_gem->reloc_target_info[bo_gem->reloc_count].flags = 0;
1614 bo_gem->reloc_count++;
1620 drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1621 drm_intel_bo *target_bo, uint32_t target_offset,
1622 uint32_t read_domains, uint32_t write_domain)
1624 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
1626 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
1627 read_domains, write_domain,
1628 !bufmgr_gem->fenced_relocs);
1632 drm_intel_gem_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
1633 drm_intel_bo *target_bo,
1634 uint32_t target_offset,
1635 uint32_t read_domains, uint32_t write_domain)
1637 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
1638 read_domains, write_domain, true);
1642 drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo)
1644 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1646 return bo_gem->reloc_count;
1650 * Removes existing relocation entries in the BO after "start".
1652 * This allows a user to avoid a two-step process for state setup with
1653 * counting up all the buffer objects and doing a
1654 * drm_intel_bufmgr_check_aperture_space() before emitting any of the
1655 * relocations for the state setup. Instead, save the state of the
1656 * batchbuffer including drm_intel_gem_get_reloc_count(), emit all the
1657 * state, and then check if it still fits in the aperture.
1659 * Any further drm_intel_bufmgr_check_aperture_space() queries
1660 * involving this buffer in the tree are undefined after this call.
1663 drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start)
1665 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1667 struct timespec time;
1669 clock_gettime(CLOCK_MONOTONIC, &time);
1671 assert(bo_gem->reloc_count >= start);
1672 /* Unreference the cleared target buffers */
1673 for (i = start; i < bo_gem->reloc_count; i++) {
1674 if (bo_gem->reloc_target_info[i].bo != bo) {
1675 drm_intel_gem_bo_unreference_locked_timed(bo_gem->
1676 reloc_target_info[i].bo,
1680 bo_gem->reloc_count = start;
1684 * Walk the tree of relocations rooted at BO and accumulate the list of
1685 * validations to be performed and update the relocation buffers with
1686 * index values into the validation list.
1689 drm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
1691 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1694 if (bo_gem->relocs == NULL)
1697 for (i = 0; i < bo_gem->reloc_count; i++) {
1698 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
1700 if (target_bo == bo)
1703 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1705 /* Continue walking the tree depth-first. */
1706 drm_intel_gem_bo_process_reloc(target_bo);
1708 /* Add the target to the validate list */
1709 drm_intel_add_validate_buffer(target_bo);
1714 drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo)
1716 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1719 if (bo_gem->relocs == NULL)
1722 for (i = 0; i < bo_gem->reloc_count; i++) {
1723 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
1726 if (target_bo == bo)
1729 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1731 /* Continue walking the tree depth-first. */
1732 drm_intel_gem_bo_process_reloc2(target_bo);
1734 need_fence = (bo_gem->reloc_target_info[i].flags &
1735 DRM_INTEL_RELOC_FENCE);
1737 /* Add the target to the validate list */
1738 drm_intel_add_validate_buffer2(target_bo, need_fence);
1744 drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem)
1748 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1749 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1750 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1752 /* Update the buffer offset */
1753 if (bufmgr_gem->exec_objects[i].offset != bo->offset) {
1754 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
1755 bo_gem->gem_handle, bo_gem->name, bo->offset,
1756 (unsigned long long)bufmgr_gem->exec_objects[i].
1758 bo->offset = bufmgr_gem->exec_objects[i].offset;
1764 drm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem)
1768 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1769 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1770 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1772 /* Update the buffer offset */
1773 if (bufmgr_gem->exec2_objects[i].offset != bo->offset) {
1774 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
1775 bo_gem->gem_handle, bo_gem->name, bo->offset,
1776 (unsigned long long)bufmgr_gem->exec2_objects[i].offset);
1777 bo->offset = bufmgr_gem->exec2_objects[i].offset;
1783 aub_out(drm_intel_bufmgr_gem *bufmgr_gem, uint32_t data)
1785 fwrite(&data, 1, 4, bufmgr_gem->aub_file);
1789 aub_out_data(drm_intel_bufmgr_gem *bufmgr_gem, void *data, size_t size)
1791 fwrite(data, 1, size, bufmgr_gem->aub_file);
1795 aub_write_bo_data(drm_intel_bo *bo, uint32_t offset, uint32_t size)
1797 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1798 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1802 data = malloc(bo->size);
1803 drm_intel_bo_get_subdata(bo, offset, size, data);
1805 /* Easy mode: write out bo with no relocations */
1806 if (!bo_gem->reloc_count) {
1807 aub_out_data(bufmgr_gem, data, size);
1812 /* Otherwise, handle the relocations while writing. */
1813 for (i = 0; i < size / 4; i++) {
1815 for (r = 0; r < bo_gem->reloc_count; r++) {
1816 struct drm_i915_gem_relocation_entry *reloc;
1817 drm_intel_reloc_target *info;
1819 reloc = &bo_gem->relocs[r];
1820 info = &bo_gem->reloc_target_info[r];
1822 if (reloc->offset == offset + i * 4) {
1823 drm_intel_bo_gem *target_gem;
1826 target_gem = (drm_intel_bo_gem *)info->bo;
1829 val += target_gem->aub_offset;
1831 aub_out(bufmgr_gem, val);
1836 if (r == bo_gem->reloc_count) {
1837 /* no relocation, just the data */
1838 aub_out(bufmgr_gem, data[i]);
1846 aub_bo_get_address(drm_intel_bo *bo)
1848 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1849 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1851 /* Give the object a graphics address in the AUB file. We
1852 * don't just use the GEM object address because we do AUB
1853 * dumping before execution -- we want to successfully log
1854 * when the hardware might hang, and we might even want to aub
1855 * capture for a driver trying to execute on a different
1856 * generation of hardware by disabling the actual kernel exec
1859 bo_gem->aub_offset = bufmgr_gem->aub_offset;
1860 bufmgr_gem->aub_offset += bo->size;
1861 /* XXX: Handle aperture overflow. */
1862 assert(bufmgr_gem->aub_offset < 256 * 1024 * 1024);
1866 aub_write_trace_block(drm_intel_bo *bo, uint32_t type, uint32_t subtype,
1867 uint32_t offset, uint32_t size)
1869 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1870 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1873 CMD_AUB_TRACE_HEADER_BLOCK |
1876 AUB_TRACE_MEMTYPE_GTT | type | AUB_TRACE_OP_DATA_WRITE);
1877 aub_out(bufmgr_gem, subtype);
1878 aub_out(bufmgr_gem, bo_gem->aub_offset + offset);
1879 aub_out(bufmgr_gem, size);
1880 aub_write_bo_data(bo, offset, size);
1884 aub_write_bo(drm_intel_bo *bo)
1886 uint32_t block_size;
1889 aub_bo_get_address(bo);
1891 /* Break up large objects into multiple writes. Otherwise a
1892 * 128kb VBO would overflow the 16 bits of size field in the
1893 * packet header and everything goes badly after that.
1895 for (offset = 0; offset < bo->size; offset += block_size) {
1896 block_size = bo->size - offset;
1898 if (block_size > 8 * 4096)
1899 block_size = 8 * 4096;
1901 aub_write_trace_block(bo, AUB_TRACE_TYPE_NOTYPE, 0,
1902 offset, block_size);
1907 * Make a ringbuffer on fly and dump it
1910 aub_build_dump_ringbuffer(drm_intel_bufmgr_gem *bufmgr_gem,
1911 uint32_t batch_buffer, int ring_flag)
1913 uint32_t ringbuffer[4096];
1914 int ring = AUB_TRACE_TYPE_RING_PRB0; /* The default ring */
1917 if (ring_flag == I915_EXEC_BSD)
1918 ring = AUB_TRACE_TYPE_RING_PRB1;
1920 /* Make a ring buffer to execute our batchbuffer. */
1921 memset(ringbuffer, 0, sizeof(ringbuffer));
1922 ringbuffer[ring_count++] = AUB_MI_BATCH_BUFFER_START;
1923 ringbuffer[ring_count++] = batch_buffer;
1925 /* Write out the ring. This appears to trigger execution of
1926 * the ring in the simulator.
1929 CMD_AUB_TRACE_HEADER_BLOCK |
1932 AUB_TRACE_MEMTYPE_GTT | ring | AUB_TRACE_OP_COMMAND_WRITE);
1933 aub_out(bufmgr_gem, 0); /* general/surface subtype */
1934 aub_out(bufmgr_gem, bufmgr_gem->aub_offset);
1935 aub_out(bufmgr_gem, ring_count * 4);
1937 /* FIXME: Need some flush operations here? */
1938 aub_out_data(bufmgr_gem, ringbuffer, ring_count * 4);
1940 /* Update offset pointer */
1941 bufmgr_gem->aub_offset += 4096;
1945 drm_intel_gem_bo_aub_dump_bmp(drm_intel_bo *bo,
1946 int x1, int y1, int width, int height,
1947 enum aub_dump_bmp_format format,
1948 int pitch, int offset)
1950 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1951 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1955 case AUB_DUMP_BMP_FORMAT_8BIT:
1958 case AUB_DUMP_BMP_FORMAT_ARGB_4444:
1961 case AUB_DUMP_BMP_FORMAT_ARGB_0888:
1962 case AUB_DUMP_BMP_FORMAT_ARGB_8888:
1966 printf("Unknown AUB dump format %d\n", format);
1970 if (!bufmgr_gem->aub_file)
1973 aub_out(bufmgr_gem, CMD_AUB_DUMP_BMP | 4);
1974 aub_out(bufmgr_gem, (y1 << 16) | x1);
1979 aub_out(bufmgr_gem, (height << 16) | width);
1980 aub_out(bufmgr_gem, bo_gem->aub_offset + offset);
1982 ((bo_gem->tiling_mode != I915_TILING_NONE) ? (1 << 2) : 0) |
1983 ((bo_gem->tiling_mode == I915_TILING_Y) ? (1 << 3) : 0));
1987 aub_exec(drm_intel_bo *bo, int ring_flag, int used)
1989 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1990 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1993 if (!bufmgr_gem->aub_file)
1996 /* Write out all but the batchbuffer to AUB memory */
1997 for (i = 0; i < bufmgr_gem->exec_count - 1; i++) {
1998 if (bufmgr_gem->exec_bos[i] != bo)
1999 aub_write_bo(bufmgr_gem->exec_bos[i]);
2002 aub_bo_get_address(bo);
2004 /* Dump the batchbuffer. */
2005 aub_write_trace_block(bo, AUB_TRACE_TYPE_BATCH, 0,
2007 aub_write_trace_block(bo, AUB_TRACE_TYPE_NOTYPE, 0,
2008 used, bo->size - used);
2010 /* Dump ring buffer */
2011 aub_build_dump_ringbuffer(bufmgr_gem, bo_gem->aub_offset, ring_flag);
2013 fflush(bufmgr_gem->aub_file);
2016 * One frame has been dumped. So reset the aub_offset for the next frame.
2018 * FIXME: Can we do this?
2020 bufmgr_gem->aub_offset = 0x10000;
2024 drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
2025 drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
2027 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2028 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2029 struct drm_i915_gem_execbuffer execbuf;
2032 if (bo_gem->has_error)
2035 pthread_mutex_lock(&bufmgr_gem->lock);
2036 /* Update indices and set up the validate list. */
2037 drm_intel_gem_bo_process_reloc(bo);
2039 /* Add the batch buffer to the validation list. There are no
2040 * relocations pointing to it.
2042 drm_intel_add_validate_buffer(bo);
2045 execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects;
2046 execbuf.buffer_count = bufmgr_gem->exec_count;
2047 execbuf.batch_start_offset = 0;
2048 execbuf.batch_len = used;
2049 execbuf.cliprects_ptr = (uintptr_t) cliprects;
2050 execbuf.num_cliprects = num_cliprects;
2054 ret = drmIoctl(bufmgr_gem->fd,
2055 DRM_IOCTL_I915_GEM_EXECBUFFER,
2059 if (errno == ENOSPC) {
2060 DBG("Execbuffer fails to pin. "
2061 "Estimate: %u. Actual: %u. Available: %u\n",
2062 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
2065 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
2068 (unsigned int)bufmgr_gem->gtt_size);
2071 drm_intel_update_buffer_offsets(bufmgr_gem);
2073 if (bufmgr_gem->bufmgr.debug)
2074 drm_intel_gem_dump_validation_list(bufmgr_gem);
2076 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2077 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
2078 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2080 /* Disconnect the buffer from the validate list */
2081 bo_gem->validate_index = -1;
2082 bufmgr_gem->exec_bos[i] = NULL;
2084 bufmgr_gem->exec_count = 0;
2085 pthread_mutex_unlock(&bufmgr_gem->lock);
2091 drm_intel_gem_bo_mrb_exec2(drm_intel_bo *bo, int used,
2092 drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
2095 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
2096 struct drm_i915_gem_execbuffer2 execbuf;
2100 switch (flags & 0x7) {
2104 if (!bufmgr_gem->has_blt)
2108 if (!bufmgr_gem->has_bsd)
2111 case I915_EXEC_RENDER:
2112 case I915_EXEC_DEFAULT:
2116 pthread_mutex_lock(&bufmgr_gem->lock);
2117 /* Update indices and set up the validate list. */
2118 drm_intel_gem_bo_process_reloc2(bo);
2120 /* Add the batch buffer to the validation list. There are no relocations
2123 drm_intel_add_validate_buffer2(bo, 0);
2126 execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec2_objects;
2127 execbuf.buffer_count = bufmgr_gem->exec_count;
2128 execbuf.batch_start_offset = 0;
2129 execbuf.batch_len = used;
2130 execbuf.cliprects_ptr = (uintptr_t)cliprects;
2131 execbuf.num_cliprects = num_cliprects;
2134 execbuf.flags = flags;
2138 aub_exec(bo, flags, used);
2140 if (bufmgr_gem->no_exec)
2141 goto skip_execution;
2143 ret = drmIoctl(bufmgr_gem->fd,
2144 DRM_IOCTL_I915_GEM_EXECBUFFER2,
2148 if (ret == -ENOSPC) {
2149 DBG("Execbuffer fails to pin. "
2150 "Estimate: %u. Actual: %u. Available: %u\n",
2151 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
2152 bufmgr_gem->exec_count),
2153 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
2154 bufmgr_gem->exec_count),
2155 (unsigned int) bufmgr_gem->gtt_size);
2158 drm_intel_update_buffer_offsets2(bufmgr_gem);
2161 if (bufmgr_gem->bufmgr.debug)
2162 drm_intel_gem_dump_validation_list(bufmgr_gem);
2164 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2165 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
2166 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
2168 /* Disconnect the buffer from the validate list */
2169 bo_gem->validate_index = -1;
2170 bufmgr_gem->exec_bos[i] = NULL;
2172 bufmgr_gem->exec_count = 0;
2173 pthread_mutex_unlock(&bufmgr_gem->lock);
2179 drm_intel_gem_bo_exec2(drm_intel_bo *bo, int used,
2180 drm_clip_rect_t *cliprects, int num_cliprects,
2183 return drm_intel_gem_bo_mrb_exec2(bo, used,
2184 cliprects, num_cliprects, DR4,
2189 drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
2191 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2192 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2193 struct drm_i915_gem_pin pin;
2197 pin.handle = bo_gem->gem_handle;
2198 pin.alignment = alignment;
2200 ret = drmIoctl(bufmgr_gem->fd,
2201 DRM_IOCTL_I915_GEM_PIN,
2206 bo->offset = pin.offset;
2211 drm_intel_gem_bo_unpin(drm_intel_bo *bo)
2213 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2214 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2215 struct drm_i915_gem_unpin unpin;
2219 unpin.handle = bo_gem->gem_handle;
2221 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
2229 drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
2230 uint32_t tiling_mode,
2233 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2234 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2235 struct drm_i915_gem_set_tiling set_tiling;
2238 if (bo_gem->global_name == 0 &&
2239 tiling_mode == bo_gem->tiling_mode &&
2240 stride == bo_gem->stride)
2243 memset(&set_tiling, 0, sizeof(set_tiling));
2245 /* set_tiling is slightly broken and overwrites the
2246 * input on the error path, so we have to open code
2249 set_tiling.handle = bo_gem->gem_handle;
2250 set_tiling.tiling_mode = tiling_mode;
2251 set_tiling.stride = stride;
2253 ret = ioctl(bufmgr_gem->fd,
2254 DRM_IOCTL_I915_GEM_SET_TILING,
2256 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
2260 bo_gem->tiling_mode = set_tiling.tiling_mode;
2261 bo_gem->swizzle_mode = set_tiling.swizzle_mode;
2262 bo_gem->stride = set_tiling.stride;
2267 drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
2270 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2271 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2274 /* Linear buffers have no stride. By ensuring that we only ever use
2275 * stride 0 with linear buffers, we simplify our code.
2277 if (*tiling_mode == I915_TILING_NONE)
2280 ret = drm_intel_gem_bo_set_tiling_internal(bo, *tiling_mode, stride);
2282 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
2284 *tiling_mode = bo_gem->tiling_mode;
2289 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
2290 uint32_t * swizzle_mode)
2292 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2294 *tiling_mode = bo_gem->tiling_mode;
2295 *swizzle_mode = bo_gem->swizzle_mode;
2300 drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
2302 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2303 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2306 if (!bo_gem->global_name) {
2307 struct drm_gem_flink flink;
2310 flink.handle = bo_gem->gem_handle;
2312 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink);
2316 bo_gem->global_name = flink.name;
2317 bo_gem->reusable = false;
2319 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
2322 *name = bo_gem->global_name;
2327 * Enables unlimited caching of buffer objects for reuse.
2329 * This is potentially very memory expensive, as the cache at each bucket
2330 * size is only bounded by how many buffers of that size we've managed to have
2331 * in flight at once.
2334 drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
2336 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
2338 bufmgr_gem->bo_reuse = true;
2342 * Enable use of fenced reloc type.
2344 * New code should enable this to avoid unnecessary fence register
2345 * allocation. If this option is not enabled, all relocs will have fence
2346 * register allocated.
2349 drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr)
2351 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2353 if (bufmgr_gem->bufmgr.bo_exec == drm_intel_gem_bo_exec2)
2354 bufmgr_gem->fenced_relocs = true;
2358 * Return the additional aperture space required by the tree of buffer objects
2362 drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
2364 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2368 if (bo == NULL || bo_gem->included_in_check_aperture)
2372 bo_gem->included_in_check_aperture = true;
2374 for (i = 0; i < bo_gem->reloc_count; i++)
2376 drm_intel_gem_bo_get_aperture_space(bo_gem->
2377 reloc_target_info[i].bo);
2383 * Count the number of buffers in this list that need a fence reg
2385 * If the count is greater than the number of available regs, we'll have
2386 * to ask the caller to resubmit a batch with fewer tiled buffers.
2388 * This function over-counts if the same buffer is used multiple times.
2391 drm_intel_gem_total_fences(drm_intel_bo ** bo_array, int count)
2394 unsigned int total = 0;
2396 for (i = 0; i < count; i++) {
2397 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
2402 total += bo_gem->reloc_tree_fences;
2408 * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready
2409 * for the next drm_intel_bufmgr_check_aperture_space() call.
2412 drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo)
2414 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2417 if (bo == NULL || !bo_gem->included_in_check_aperture)
2420 bo_gem->included_in_check_aperture = false;
2422 for (i = 0; i < bo_gem->reloc_count; i++)
2423 drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->
2424 reloc_target_info[i].bo);
2428 * Return a conservative estimate for the amount of aperture required
2429 * for a collection of buffers. This may double-count some buffers.
2432 drm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count)
2435 unsigned int total = 0;
2437 for (i = 0; i < count; i++) {
2438 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
2440 total += bo_gem->reloc_tree_size;
2446 * Return the amount of aperture needed for a collection of buffers.
2447 * This avoids double counting any buffers, at the cost of looking
2448 * at every buffer in the set.
2451 drm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count)
2454 unsigned int total = 0;
2456 for (i = 0; i < count; i++) {
2457 total += drm_intel_gem_bo_get_aperture_space(bo_array[i]);
2458 /* For the first buffer object in the array, we get an
2459 * accurate count back for its reloc_tree size (since nothing
2460 * had been flagged as being counted yet). We can save that
2461 * value out as a more conservative reloc_tree_size that
2462 * avoids double-counting target buffers. Since the first
2463 * buffer happens to usually be the batch buffer in our
2464 * callers, this can pull us back from doing the tree
2465 * walk on every new batch emit.
2468 drm_intel_bo_gem *bo_gem =
2469 (drm_intel_bo_gem *) bo_array[i];
2470 bo_gem->reloc_tree_size = total;
2474 for (i = 0; i < count; i++)
2475 drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]);
2480 * Return -1 if the batchbuffer should be flushed before attempting to
2481 * emit rendering referencing the buffers pointed to by bo_array.
2483 * This is required because if we try to emit a batchbuffer with relocations
2484 * to a tree of buffers that won't simultaneously fit in the aperture,
2485 * the rendering will return an error at a point where the software is not
2486 * prepared to recover from it.
2488 * However, we also want to emit the batchbuffer significantly before we reach
2489 * the limit, as a series of batchbuffers each of which references buffers
2490 * covering almost all of the aperture means that at each emit we end up
2491 * waiting to evict a buffer from the last rendering, and we get synchronous
2492 * performance. By emitting smaller batchbuffers, we eat some CPU overhead to
2493 * get better parallelism.
2496 drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
2498 drm_intel_bufmgr_gem *bufmgr_gem =
2499 (drm_intel_bufmgr_gem *) bo_array[0]->bufmgr;
2500 unsigned int total = 0;
2501 unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
2504 /* Check for fence reg constraints if necessary */
2505 if (bufmgr_gem->available_fences) {
2506 total_fences = drm_intel_gem_total_fences(bo_array, count);
2507 if (total_fences > bufmgr_gem->available_fences)
2511 total = drm_intel_gem_estimate_batch_space(bo_array, count);
2513 if (total > threshold)
2514 total = drm_intel_gem_compute_batch_space(bo_array, count);
2516 if (total > threshold) {
2517 DBG("check_space: overflowed available aperture, "
2519 total / 1024, (int)bufmgr_gem->gtt_size / 1024);
2522 DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024,
2523 (int)bufmgr_gem->gtt_size / 1024);
2529 * Disable buffer reuse for objects which are shared with the kernel
2530 * as scanout buffers
2533 drm_intel_gem_bo_disable_reuse(drm_intel_bo *bo)
2535 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2537 bo_gem->reusable = false;
2542 drm_intel_gem_bo_is_reusable(drm_intel_bo *bo)
2544 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2546 return bo_gem->reusable;
2550 _drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
2552 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2555 for (i = 0; i < bo_gem->reloc_count; i++) {
2556 if (bo_gem->reloc_target_info[i].bo == target_bo)
2558 if (bo == bo_gem->reloc_target_info[i].bo)
2560 if (_drm_intel_gem_bo_references(bo_gem->reloc_target_info[i].bo,
2568 /** Return true if target_bo is referenced by bo's relocation tree. */
2570 drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
2572 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
2574 if (bo == NULL || target_bo == NULL)
2576 if (target_bo_gem->used_as_reloc_target)
2577 return _drm_intel_gem_bo_references(bo, target_bo);
2582 add_bucket(drm_intel_bufmgr_gem *bufmgr_gem, int size)
2584 unsigned int i = bufmgr_gem->num_buckets;
2586 assert(i < ARRAY_SIZE(bufmgr_gem->cache_bucket));
2588 DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head);
2589 bufmgr_gem->cache_bucket[i].size = size;
2590 bufmgr_gem->num_buckets++;
2594 init_cache_buckets(drm_intel_bufmgr_gem *bufmgr_gem)
2596 unsigned long size, cache_max_size = 64 * 1024 * 1024;
2598 /* OK, so power of two buckets was too wasteful of memory.
2599 * Give 3 other sizes between each power of two, to hopefully
2600 * cover things accurately enough. (The alternative is
2601 * probably to just go for exact matching of sizes, and assume
2602 * that for things like composited window resize the tiled
2603 * width/height alignment and rounding of sizes to pages will
2604 * get us useful cache hit rates anyway)
2606 add_bucket(bufmgr_gem, 4096);
2607 add_bucket(bufmgr_gem, 4096 * 2);
2608 add_bucket(bufmgr_gem, 4096 * 3);
2610 /* Initialize the linked lists for BO reuse cache. */
2611 for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
2612 add_bucket(bufmgr_gem, size);
2614 add_bucket(bufmgr_gem, size + size * 1 / 4);
2615 add_bucket(bufmgr_gem, size + size * 2 / 4);
2616 add_bucket(bufmgr_gem, size + size * 3 / 4);
2621 drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr, int limit)
2623 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2625 bufmgr_gem->vma_max = limit;
2627 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
2631 * Get the PCI ID for the device. This can be overridden by setting the
2632 * INTEL_DEVID_OVERRIDE environment variable to the desired ID.
2635 get_pci_device_id(drm_intel_bufmgr_gem *bufmgr_gem)
2637 char *devid_override;
2640 drm_i915_getparam_t gp;
2642 if (geteuid() == getuid()) {
2643 devid_override = getenv("INTEL_DEVID_OVERRIDE");
2644 if (devid_override) {
2645 bufmgr_gem->no_exec = true;
2646 return strtod(devid_override, NULL);
2651 gp.param = I915_PARAM_CHIPSET_ID;
2653 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2655 fprintf(stderr, "get chip id failed: %d [%d]\n", ret, errno);
2656 fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
2662 drm_intel_bufmgr_gem_get_devid(drm_intel_bufmgr *bufmgr)
2664 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2666 return bufmgr_gem->pci_device;
2670 * Sets up AUB dumping.
2672 * This is a trace file format that can be used with the simulator.
2673 * Packets are emitted in a format somewhat like GPU command packets.
2674 * You can set up a GTT and upload your objects into the referenced
2675 * space, then send off batchbuffers and get BMPs out the other end.
2678 drm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr *bufmgr, int enable)
2680 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2681 int entry = 0x200003;
2683 int gtt_size = 0x10000;
2686 if (bufmgr_gem->aub_file) {
2687 fclose(bufmgr_gem->aub_file);
2688 bufmgr_gem->aub_file = NULL;
2692 if (geteuid() != getuid())
2695 bufmgr_gem->aub_file = fopen("intel.aub", "w+");
2696 if (!bufmgr_gem->aub_file)
2699 /* Start allocating objects from just after the GTT. */
2700 bufmgr_gem->aub_offset = gtt_size;
2702 /* Start with a (required) version packet. */
2703 aub_out(bufmgr_gem, CMD_AUB_HEADER | (13 - 2));
2705 (4 << AUB_HEADER_MAJOR_SHIFT) |
2706 (0 << AUB_HEADER_MINOR_SHIFT));
2707 for (i = 0; i < 8; i++) {
2708 aub_out(bufmgr_gem, 0); /* app name */
2710 aub_out(bufmgr_gem, 0); /* timestamp */
2711 aub_out(bufmgr_gem, 0); /* timestamp */
2712 aub_out(bufmgr_gem, 0); /* comment len */
2714 /* Set up the GTT. The max we can handle is 256M */
2715 aub_out(bufmgr_gem, CMD_AUB_TRACE_HEADER_BLOCK | (5 - 2));
2716 aub_out(bufmgr_gem, AUB_TRACE_MEMTYPE_NONLOCAL | 0 | AUB_TRACE_OP_DATA_WRITE);
2717 aub_out(bufmgr_gem, 0); /* subtype */
2718 aub_out(bufmgr_gem, 0); /* offset */
2719 aub_out(bufmgr_gem, gtt_size); /* size */
2720 for (i = 0x000; i < gtt_size; i += 4, entry += 0x1000) {
2721 aub_out(bufmgr_gem, entry);
2726 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
2727 * and manage map buffer objections.
2729 * \param fd File descriptor of the opened DRM device.
2732 drm_intel_bufmgr_gem_init(int fd, int batch_size)
2734 drm_intel_bufmgr_gem *bufmgr_gem;
2735 struct drm_i915_gem_get_aperture aperture;
2736 drm_i915_getparam_t gp;
2740 bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
2741 if (bufmgr_gem == NULL)
2744 bufmgr_gem->fd = fd;
2746 if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
2751 ret = drmIoctl(bufmgr_gem->fd,
2752 DRM_IOCTL_I915_GEM_GET_APERTURE,
2756 bufmgr_gem->gtt_size = aperture.aper_available_size;
2758 fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
2760 bufmgr_gem->gtt_size = 128 * 1024 * 1024;
2761 fprintf(stderr, "Assuming %dkB available aperture size.\n"
2762 "May lead to reduced performance or incorrect "
2764 (int)bufmgr_gem->gtt_size / 1024);
2767 bufmgr_gem->pci_device = get_pci_device_id(bufmgr_gem);
2769 if (IS_GEN2(bufmgr_gem->pci_device))
2770 bufmgr_gem->gen = 2;
2771 else if (IS_GEN3(bufmgr_gem->pci_device))
2772 bufmgr_gem->gen = 3;
2773 else if (IS_GEN4(bufmgr_gem->pci_device))
2774 bufmgr_gem->gen = 4;
2775 else if (IS_GEN5(bufmgr_gem->pci_device))
2776 bufmgr_gem->gen = 5;
2777 else if (IS_GEN6(bufmgr_gem->pci_device))
2778 bufmgr_gem->gen = 6;
2779 else if (IS_GEN7(bufmgr_gem->pci_device))
2780 bufmgr_gem->gen = 7;
2784 if (IS_GEN3(bufmgr_gem->pci_device) &&
2785 bufmgr_gem->gtt_size > 256*1024*1024) {
2786 /* The unmappable part of gtt on gen 3 (i.e. above 256MB) can't
2787 * be used for tiled blits. To simplify the accounting, just
2788 * substract the unmappable part (fixed to 256MB on all known
2789 * gen3 devices) if the kernel advertises it. */
2790 bufmgr_gem->gtt_size -= 256*1024*1024;
2795 gp.param = I915_PARAM_HAS_EXECBUF2;
2796 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2800 gp.param = I915_PARAM_HAS_BSD;
2801 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2802 bufmgr_gem->has_bsd = ret == 0;
2804 gp.param = I915_PARAM_HAS_BLT;
2805 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2806 bufmgr_gem->has_blt = ret == 0;
2808 gp.param = I915_PARAM_HAS_RELAXED_FENCING;
2809 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2810 bufmgr_gem->has_relaxed_fencing = ret == 0;
2812 gp.param = I915_PARAM_HAS_LLC;
2813 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2815 /* Kernel does not supports HAS_LLC query, fallback to GPU
2816 * generation detection and assume that we have LLC on GEN6/7
2818 bufmgr_gem->has_llc = (IS_GEN6(bufmgr_gem->pci_device) |
2819 IS_GEN7(bufmgr_gem->pci_device));
2821 bufmgr_gem->has_llc = ret == 0;
2823 if (bufmgr_gem->gen < 4) {
2824 gp.param = I915_PARAM_NUM_FENCES_AVAIL;
2825 gp.value = &bufmgr_gem->available_fences;
2826 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2828 fprintf(stderr, "get fences failed: %d [%d]\n", ret,
2830 fprintf(stderr, "param: %d, val: %d\n", gp.param,
2832 bufmgr_gem->available_fences = 0;
2834 /* XXX The kernel reports the total number of fences,
2835 * including any that may be pinned.
2837 * We presume that there will be at least one pinned
2838 * fence for the scanout buffer, but there may be more
2839 * than one scanout and the user may be manually
2840 * pinning buffers. Let's move to execbuffer2 and
2841 * thereby forget the insanity of using fences...
2843 bufmgr_gem->available_fences -= 2;
2844 if (bufmgr_gem->available_fences < 0)
2845 bufmgr_gem->available_fences = 0;
2849 /* Let's go with one relocation per every 2 dwords (but round down a bit
2850 * since a power of two will mean an extra page allocation for the reloc
2853 * Every 4 was too few for the blender benchmark.
2855 bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
2857 bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
2858 bufmgr_gem->bufmgr.bo_alloc_for_render =
2859 drm_intel_gem_bo_alloc_for_render;
2860 bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled;
2861 bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
2862 bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
2863 bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
2864 bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap;
2865 bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata;
2866 bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata;
2867 bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering;
2868 bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc;
2869 bufmgr_gem->bufmgr.bo_emit_reloc_fence = drm_intel_gem_bo_emit_reloc_fence;
2870 bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin;
2871 bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin;
2872 bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
2873 bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
2874 bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
2875 /* Use the new one if available */
2877 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec2;
2878 bufmgr_gem->bufmgr.bo_mrb_exec = drm_intel_gem_bo_mrb_exec2;
2880 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
2881 bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
2882 bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
2883 bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy;
2884 bufmgr_gem->bufmgr.debug = 0;
2885 bufmgr_gem->bufmgr.check_aperture_space =
2886 drm_intel_gem_check_aperture_space;
2887 bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse;
2888 bufmgr_gem->bufmgr.bo_is_reusable = drm_intel_gem_bo_is_reusable;
2889 bufmgr_gem->bufmgr.get_pipe_from_crtc_id =
2890 drm_intel_gem_get_pipe_from_crtc_id;
2891 bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references;
2893 DRMINITLISTHEAD(&bufmgr_gem->named);
2894 init_cache_buckets(bufmgr_gem);
2896 DRMINITLISTHEAD(&bufmgr_gem->vma_cache);
2897 bufmgr_gem->vma_max = -1; /* unlimited by default */
2899 return &bufmgr_gem->bufmgr;