1 /**************************************************************************
3 * Copyright © 2007 Red Hat Inc.
4 * Copyright © 2007-2012 Intel Corporation
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
29 **************************************************************************/
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
42 #include <xf86atomic.h>
50 #include <sys/ioctl.h>
52 #include <sys/types.h>
57 #define ETIME ETIMEDOUT
60 #include "libdrm_lists.h"
61 #include "intel_bufmgr.h"
62 #include "intel_bufmgr_priv.h"
63 #include "intel_chipset.h"
64 #include "intel_aub.h"
77 #define VG_CLEAR(s) VG(memset(&s, 0, sizeof(s)))
79 #define DBG(...) do { \
80 if (bufmgr_gem->bufmgr.debug) \
81 fprintf(stderr, __VA_ARGS__); \
84 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
86 typedef struct _drm_intel_bo_gem drm_intel_bo_gem;
88 struct drm_intel_gem_bo_bucket {
93 typedef struct _drm_intel_bufmgr_gem {
94 drm_intel_bufmgr bufmgr;
102 pthread_mutex_t lock;
104 struct drm_i915_gem_exec_object *exec_objects;
105 struct drm_i915_gem_exec_object2 *exec2_objects;
106 drm_intel_bo **exec_bos;
110 /** Array of lists of cached gem objects of power-of-two sizes */
111 struct drm_intel_gem_bo_bucket cache_bucket[14 * 4];
115 drmMMListHead managers;
118 drmMMListHead vma_cache;
119 int vma_count, vma_open, vma_max;
122 int available_fences;
125 unsigned int has_bsd : 1;
126 unsigned int has_blt : 1;
127 unsigned int has_relaxed_fencing : 1;
128 unsigned int has_llc : 1;
129 unsigned int has_wait_timeout : 1;
130 unsigned int bo_reuse : 1;
131 unsigned int no_exec : 1;
132 unsigned int has_vebox : 1;
138 } drm_intel_bufmgr_gem;
140 #define DRM_INTEL_RELOC_FENCE (1<<0)
142 typedef struct _drm_intel_reloc_target_info {
145 } drm_intel_reloc_target;
147 struct _drm_intel_bo_gem {
155 * Kenel-assigned global name for this object
157 * List contains both flink named and prime fd'd objects
159 unsigned int global_name;
160 drmMMListHead name_list;
163 * Index of the buffer within the validation list while preparing a
164 * batchbuffer execution.
169 * Current tiling mode
171 uint32_t tiling_mode;
172 uint32_t swizzle_mode;
173 unsigned long stride;
177 /** Array passed to the DRM containing relocation information. */
178 struct drm_i915_gem_relocation_entry *relocs;
180 * Array of info structs corresponding to relocs[i].target_handle etc
182 drm_intel_reloc_target *reloc_target_info;
183 /** Number of entries in relocs */
185 /** Mapped address for the buffer, saved across map/unmap cycles */
187 /** GTT virtual address for the buffer, saved across map/unmap cycles */
190 * Virtual address of the buffer allocated by user, used for userptr
195 drmMMListHead vma_list;
201 * Boolean of whether this BO and its children have been included in
202 * the current drm_intel_bufmgr_check_aperture_space() total.
204 bool included_in_check_aperture;
207 * Boolean of whether this buffer has been used as a relocation
208 * target and had its size accounted for, and thus can't have any
209 * further relocations added to it.
211 bool used_as_reloc_target;
214 * Boolean of whether we have encountered an error whilst building the relocation tree.
219 * Boolean of whether this buffer can be re-used
224 * Boolean of whether the GPU is definitely not accessing the buffer.
226 * This is only valid when reusable, since non-reusable
227 * buffers are those that have been shared wth other
228 * processes, so we don't know their state.
233 * Boolean of whether this buffer was allocated with userptr
238 * Size in bytes of this buffer and its relocation descendents.
240 * Used to avoid costly tree walking in
241 * drm_intel_bufmgr_check_aperture in the common case.
246 * Number of potential fence registers required by this buffer and its
249 int reloc_tree_fences;
251 /** Flags that we may need to do the SW_FINSIH ioctl on unmap. */
252 bool mapped_cpu_write;
256 drm_intel_aub_annotation *aub_annotations;
257 unsigned aub_annotation_count;
261 drm_intel_gem_estimate_batch_space(drm_intel_bo ** bo_array, int count);
264 drm_intel_gem_compute_batch_space(drm_intel_bo ** bo_array, int count);
267 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
268 uint32_t * swizzle_mode);
271 drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
272 uint32_t tiling_mode,
275 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
278 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo);
280 static void drm_intel_gem_bo_free(drm_intel_bo *bo);
283 drm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size,
284 uint32_t *tiling_mode)
286 unsigned long min_size, max_size;
289 if (*tiling_mode == I915_TILING_NONE)
292 /* 965+ just need multiples of page size for tiling */
293 if (bufmgr_gem->gen >= 4)
294 return ROUND_UP_TO(size, 4096);
296 /* Older chips need powers of two, of at least 512k or 1M */
297 if (bufmgr_gem->gen == 3) {
298 min_size = 1024*1024;
299 max_size = 128*1024*1024;
302 max_size = 64*1024*1024;
305 if (size > max_size) {
306 *tiling_mode = I915_TILING_NONE;
310 /* Do we need to allocate every page for the fence? */
311 if (bufmgr_gem->has_relaxed_fencing)
312 return ROUND_UP_TO(size, 4096);
314 for (i = min_size; i < size; i <<= 1)
321 * Round a given pitch up to the minimum required for X tiling on a
322 * given chip. We use 512 as the minimum to allow for a later tiling
326 drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem,
327 unsigned long pitch, uint32_t *tiling_mode)
329 unsigned long tile_width;
332 /* If untiled, then just align it so that we can do rendering
333 * to it with the 3D engine.
335 if (*tiling_mode == I915_TILING_NONE)
336 return ALIGN(pitch, 64);
338 if (*tiling_mode == I915_TILING_X
339 || (IS_915(bufmgr_gem->pci_device)
340 && *tiling_mode == I915_TILING_Y))
345 /* 965 is flexible */
346 if (bufmgr_gem->gen >= 4)
347 return ROUND_UP_TO(pitch, tile_width);
349 /* The older hardware has a maximum pitch of 8192 with tiled
350 * surfaces, so fallback to untiled if it's too large.
353 *tiling_mode = I915_TILING_NONE;
354 return ALIGN(pitch, 64);
357 /* Pre-965 needs power of two tile width */
358 for (i = tile_width; i < pitch; i <<= 1)
364 static struct drm_intel_gem_bo_bucket *
365 drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
370 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
371 struct drm_intel_gem_bo_bucket *bucket =
372 &bufmgr_gem->cache_bucket[i];
373 if (bucket->size >= size) {
382 drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
386 for (i = 0; i < bufmgr_gem->exec_count; i++) {
387 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
388 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
390 if (bo_gem->relocs == NULL) {
391 DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle,
396 for (j = 0; j < bo_gem->reloc_count; j++) {
397 drm_intel_bo *target_bo = bo_gem->reloc_target_info[j].bo;
398 drm_intel_bo_gem *target_gem =
399 (drm_intel_bo_gem *) target_bo;
401 DBG("%2d: %d (%s)@0x%08llx -> "
402 "%d (%s)@0x%08lx + 0x%08x\n",
404 bo_gem->gem_handle, bo_gem->name,
405 (unsigned long long)bo_gem->relocs[j].offset,
406 target_gem->gem_handle,
409 bo_gem->relocs[j].delta);
415 drm_intel_gem_bo_reference(drm_intel_bo *bo)
417 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
419 atomic_inc(&bo_gem->refcount);
423 * Adds the given buffer to the list of buffers to be validated (moved into the
424 * appropriate memory type) with the next batch submission.
426 * If a buffer is validated multiple times in a batch submission, it ends up
427 * with the intersection of the memory type flags and the union of the
431 drm_intel_add_validate_buffer(drm_intel_bo *bo)
433 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
434 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
437 if (bo_gem->validate_index != -1)
440 /* Extend the array of validation entries as necessary. */
441 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
442 int new_size = bufmgr_gem->exec_size * 2;
447 bufmgr_gem->exec_objects =
448 realloc(bufmgr_gem->exec_objects,
449 sizeof(*bufmgr_gem->exec_objects) * new_size);
450 bufmgr_gem->exec_bos =
451 realloc(bufmgr_gem->exec_bos,
452 sizeof(*bufmgr_gem->exec_bos) * new_size);
453 bufmgr_gem->exec_size = new_size;
456 index = bufmgr_gem->exec_count;
457 bo_gem->validate_index = index;
458 /* Fill in array entry */
459 bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
460 bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
461 bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs;
462 bufmgr_gem->exec_objects[index].alignment = 0;
463 bufmgr_gem->exec_objects[index].offset = 0;
464 bufmgr_gem->exec_bos[index] = bo;
465 bufmgr_gem->exec_count++;
469 drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence)
471 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
472 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
475 if (bo_gem->validate_index != -1) {
477 bufmgr_gem->exec2_objects[bo_gem->validate_index].flags |=
478 EXEC_OBJECT_NEEDS_FENCE;
482 /* Extend the array of validation entries as necessary. */
483 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
484 int new_size = bufmgr_gem->exec_size * 2;
489 bufmgr_gem->exec2_objects =
490 realloc(bufmgr_gem->exec2_objects,
491 sizeof(*bufmgr_gem->exec2_objects) * new_size);
492 bufmgr_gem->exec_bos =
493 realloc(bufmgr_gem->exec_bos,
494 sizeof(*bufmgr_gem->exec_bos) * new_size);
495 bufmgr_gem->exec_size = new_size;
498 index = bufmgr_gem->exec_count;
499 bo_gem->validate_index = index;
500 /* Fill in array entry */
501 bufmgr_gem->exec2_objects[index].handle = bo_gem->gem_handle;
502 bufmgr_gem->exec2_objects[index].relocation_count = bo_gem->reloc_count;
503 bufmgr_gem->exec2_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
504 bufmgr_gem->exec2_objects[index].alignment = 0;
505 bufmgr_gem->exec2_objects[index].offset = 0;
506 bufmgr_gem->exec_bos[index] = bo;
507 bufmgr_gem->exec2_objects[index].flags = 0;
508 bufmgr_gem->exec2_objects[index].rsvd1 = 0;
509 bufmgr_gem->exec2_objects[index].rsvd2 = 0;
511 bufmgr_gem->exec2_objects[index].flags |=
512 EXEC_OBJECT_NEEDS_FENCE;
514 bufmgr_gem->exec_count++;
517 #define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
521 drm_intel_bo_gem_set_in_aperture_size(drm_intel_bufmgr_gem *bufmgr_gem,
522 drm_intel_bo_gem *bo_gem)
526 assert(!bo_gem->used_as_reloc_target);
528 /* The older chipsets are far-less flexible in terms of tiling,
529 * and require tiled buffer to be size aligned in the aperture.
530 * This means that in the worst possible case we will need a hole
531 * twice as large as the object in order for it to fit into the
532 * aperture. Optimal packing is for wimps.
534 size = bo_gem->bo.size;
535 if (bufmgr_gem->gen < 4 && bo_gem->tiling_mode != I915_TILING_NONE) {
538 if (bufmgr_gem->has_relaxed_fencing) {
539 if (bufmgr_gem->gen == 3)
540 min_size = 1024*1024;
544 while (min_size < size)
549 /* Account for worst-case alignment. */
553 bo_gem->reloc_tree_size = size;
557 drm_intel_setup_reloc_list(drm_intel_bo *bo)
559 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
560 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
561 unsigned int max_relocs = bufmgr_gem->max_relocs;
563 if (bo->size / 4 < max_relocs)
564 max_relocs = bo->size / 4;
566 bo_gem->relocs = malloc(max_relocs *
567 sizeof(struct drm_i915_gem_relocation_entry));
568 bo_gem->reloc_target_info = malloc(max_relocs *
569 sizeof(drm_intel_reloc_target));
570 if (bo_gem->relocs == NULL || bo_gem->reloc_target_info == NULL) {
571 bo_gem->has_error = true;
573 free (bo_gem->relocs);
574 bo_gem->relocs = NULL;
576 free (bo_gem->reloc_target_info);
577 bo_gem->reloc_target_info = NULL;
586 drm_intel_gem_bo_busy(drm_intel_bo *bo)
588 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
589 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
590 struct drm_i915_gem_busy busy;
593 if (bo_gem->reusable && bo_gem->idle)
597 busy.handle = bo_gem->gem_handle;
599 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
601 bo_gem->idle = !busy.busy;
606 return (ret == 0 && busy.busy);
610 drm_intel_gem_bo_madvise_internal(drm_intel_bufmgr_gem *bufmgr_gem,
611 drm_intel_bo_gem *bo_gem, int state)
613 struct drm_i915_gem_madvise madv;
616 madv.handle = bo_gem->gem_handle;
619 drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
621 return madv.retained;
625 drm_intel_gem_bo_madvise(drm_intel_bo *bo, int madv)
627 return drm_intel_gem_bo_madvise_internal
628 ((drm_intel_bufmgr_gem *) bo->bufmgr,
629 (drm_intel_bo_gem *) bo,
633 /* drop the oldest entries that have been purged by the kernel */
635 drm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem *bufmgr_gem,
636 struct drm_intel_gem_bo_bucket *bucket)
638 while (!DRMLISTEMPTY(&bucket->head)) {
639 drm_intel_bo_gem *bo_gem;
641 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
642 bucket->head.next, head);
643 if (drm_intel_gem_bo_madvise_internal
644 (bufmgr_gem, bo_gem, I915_MADV_DONTNEED))
647 DRMLISTDEL(&bo_gem->head);
648 drm_intel_gem_bo_free(&bo_gem->bo);
652 static drm_intel_bo *
653 drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
657 uint32_t tiling_mode,
658 unsigned long stride)
660 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
661 drm_intel_bo_gem *bo_gem;
662 unsigned int page_size = getpagesize();
664 struct drm_intel_gem_bo_bucket *bucket;
665 bool alloc_from_cache;
666 unsigned long bo_size;
667 bool for_render = false;
669 if (flags & BO_ALLOC_FOR_RENDER)
672 /* Round the allocated size up to a power of two number of pages. */
673 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size);
675 /* If we don't have caching at this size, don't actually round the
678 if (bucket == NULL) {
680 if (bo_size < page_size)
683 bo_size = bucket->size;
686 pthread_mutex_lock(&bufmgr_gem->lock);
687 /* Get a buffer out of the cache if available */
689 alloc_from_cache = false;
690 if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) {
692 /* Allocate new render-target BOs from the tail (MRU)
693 * of the list, as it will likely be hot in the GPU
694 * cache and in the aperture for us.
696 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
697 bucket->head.prev, head);
698 DRMLISTDEL(&bo_gem->head);
699 alloc_from_cache = true;
701 /* For non-render-target BOs (where we're probably
702 * going to map it first thing in order to fill it
703 * with data), check if the last BO in the cache is
704 * unbusy, and only reuse in that case. Otherwise,
705 * allocating a new buffer is probably faster than
706 * waiting for the GPU to finish.
708 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
709 bucket->head.next, head);
710 if (!drm_intel_gem_bo_busy(&bo_gem->bo)) {
711 alloc_from_cache = true;
712 DRMLISTDEL(&bo_gem->head);
716 if (alloc_from_cache) {
717 if (!drm_intel_gem_bo_madvise_internal
718 (bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) {
719 drm_intel_gem_bo_free(&bo_gem->bo);
720 drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem,
725 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
728 drm_intel_gem_bo_free(&bo_gem->bo);
733 pthread_mutex_unlock(&bufmgr_gem->lock);
735 if (!alloc_from_cache) {
736 struct drm_i915_gem_create create;
738 bo_gem = calloc(1, sizeof(*bo_gem));
742 bo_gem->bo.size = bo_size;
745 create.size = bo_size;
747 ret = drmIoctl(bufmgr_gem->fd,
748 DRM_IOCTL_I915_GEM_CREATE,
750 bo_gem->gem_handle = create.handle;
751 bo_gem->bo.handle = bo_gem->gem_handle;
756 bo_gem->bo.bufmgr = bufmgr;
758 bo_gem->tiling_mode = I915_TILING_NONE;
759 bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
762 /* drm_intel_gem_bo_free calls DRMLISTDEL() for an uninitialized
763 list (vma_list), so better set the list head here */
764 DRMINITLISTHEAD(&bo_gem->name_list);
765 DRMINITLISTHEAD(&bo_gem->vma_list);
766 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
769 drm_intel_gem_bo_free(&bo_gem->bo);
775 atomic_set(&bo_gem->refcount, 1);
776 bo_gem->validate_index = -1;
777 bo_gem->reloc_tree_fences = 0;
778 bo_gem->used_as_reloc_target = false;
779 bo_gem->has_error = false;
780 bo_gem->reusable = true;
781 bo_gem->aub_annotations = NULL;
782 bo_gem->aub_annotation_count = 0;
784 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
786 DBG("bo_create: buf %d (%s) %ldb\n",
787 bo_gem->gem_handle, bo_gem->name, size);
792 static drm_intel_bo *
793 drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
796 unsigned int alignment)
798 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size,
800 I915_TILING_NONE, 0);
803 static drm_intel_bo *
804 drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr,
807 unsigned int alignment)
809 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0,
810 I915_TILING_NONE, 0);
813 static drm_intel_bo *
814 drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
815 int x, int y, int cpp, uint32_t *tiling_mode,
816 unsigned long *pitch, unsigned long flags)
818 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
819 unsigned long size, stride;
823 unsigned long aligned_y, height_alignment;
825 tiling = *tiling_mode;
827 /* If we're tiled, our allocations are in 8 or 32-row blocks,
828 * so failure to align our height means that we won't allocate
831 * If we're untiled, we still have to align to 2 rows high
832 * because the data port accesses 2x2 blocks even if the
833 * bottom row isn't to be rendered, so failure to align means
834 * we could walk off the end of the GTT and fault. This is
835 * documented on 965, and may be the case on older chipsets
836 * too so we try to be careful.
839 height_alignment = 2;
841 if ((bufmgr_gem->gen == 2) && tiling != I915_TILING_NONE)
842 height_alignment = 16;
843 else if (tiling == I915_TILING_X
844 || (IS_915(bufmgr_gem->pci_device)
845 && tiling == I915_TILING_Y))
846 height_alignment = 8;
847 else if (tiling == I915_TILING_Y)
848 height_alignment = 32;
849 aligned_y = ALIGN(y, height_alignment);
852 stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, tiling_mode);
853 size = stride * aligned_y;
854 size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, tiling_mode);
855 } while (*tiling_mode != tiling);
858 if (tiling == I915_TILING_NONE)
861 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags,
865 static drm_intel_bo *
866 drm_intel_gem_bo_alloc_userptr(drm_intel_bufmgr *bufmgr,
869 uint32_t tiling_mode,
874 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
875 drm_intel_bo_gem *bo_gem;
877 struct drm_i915_gem_userptr userptr;
879 /* Tiling with userptr surfaces is not supported
880 * on all hardware so refuse it for time being.
882 if (tiling_mode != I915_TILING_NONE)
885 bo_gem = calloc(1, sizeof(*bo_gem));
889 bo_gem->bo.size = size;
892 userptr.user_ptr = (__u64)((unsigned long)addr);
893 userptr.user_size = size;
894 userptr.flags = flags;
896 ret = drmIoctl(bufmgr_gem->fd,
897 DRM_IOCTL_I915_GEM_USERPTR,
900 DBG("bo_create_userptr: "
901 "ioctl failed with user ptr %p size 0x%lx, "
902 "user flags 0x%lx\n", addr, size, flags);
907 bo_gem->gem_handle = userptr.handle;
908 bo_gem->bo.handle = bo_gem->gem_handle;
909 bo_gem->bo.bufmgr = bufmgr;
910 bo_gem->is_userptr = true;
911 bo_gem->bo.virtual = addr;
912 /* Save the address provided by user */
913 bo_gem->user_virtual = addr;
914 bo_gem->tiling_mode = I915_TILING_NONE;
915 bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
918 DRMINITLISTHEAD(&bo_gem->name_list);
919 DRMINITLISTHEAD(&bo_gem->vma_list);
922 atomic_set(&bo_gem->refcount, 1);
923 bo_gem->validate_index = -1;
924 bo_gem->reloc_tree_fences = 0;
925 bo_gem->used_as_reloc_target = false;
926 bo_gem->has_error = false;
927 bo_gem->reusable = false;
929 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
931 DBG("bo_create_userptr: "
932 "ptr %p buf %d (%s) size %ldb, stride 0x%x, tile mode %d\n",
933 addr, bo_gem->gem_handle, bo_gem->name,
934 size, stride, tiling_mode);
940 * Returns a drm_intel_bo wrapping the given buffer object handle.
942 * This can be used when one application needs to pass a buffer object
945 drm_public drm_intel_bo *
946 drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
950 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
951 drm_intel_bo_gem *bo_gem;
953 struct drm_gem_open open_arg;
954 struct drm_i915_gem_get_tiling get_tiling;
957 /* At the moment most applications only have a few named bo.
958 * For instance, in a DRI client only the render buffers passed
959 * between X and the client are named. And since X returns the
960 * alternating names for the front/back buffer a linear search
961 * provides a sufficiently fast match.
963 pthread_mutex_lock(&bufmgr_gem->lock);
964 for (list = bufmgr_gem->named.next;
965 list != &bufmgr_gem->named;
967 bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
968 if (bo_gem->global_name == handle) {
969 drm_intel_gem_bo_reference(&bo_gem->bo);
970 pthread_mutex_unlock(&bufmgr_gem->lock);
976 open_arg.name = handle;
977 ret = drmIoctl(bufmgr_gem->fd,
981 DBG("Couldn't reference %s handle 0x%08x: %s\n",
982 name, handle, strerror(errno));
983 pthread_mutex_unlock(&bufmgr_gem->lock);
986 /* Now see if someone has used a prime handle to get this
987 * object from the kernel before by looking through the list
988 * again for a matching gem_handle
990 for (list = bufmgr_gem->named.next;
991 list != &bufmgr_gem->named;
993 bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
994 if (bo_gem->gem_handle == open_arg.handle) {
995 drm_intel_gem_bo_reference(&bo_gem->bo);
996 pthread_mutex_unlock(&bufmgr_gem->lock);
1001 bo_gem = calloc(1, sizeof(*bo_gem));
1003 pthread_mutex_unlock(&bufmgr_gem->lock);
1007 bo_gem->bo.size = open_arg.size;
1008 bo_gem->bo.offset = 0;
1009 bo_gem->bo.offset64 = 0;
1010 bo_gem->bo.virtual = NULL;
1011 bo_gem->bo.bufmgr = bufmgr;
1012 bo_gem->name = name;
1013 atomic_set(&bo_gem->refcount, 1);
1014 bo_gem->validate_index = -1;
1015 bo_gem->gem_handle = open_arg.handle;
1016 bo_gem->bo.handle = open_arg.handle;
1017 bo_gem->global_name = handle;
1018 bo_gem->reusable = false;
1020 VG_CLEAR(get_tiling);
1021 get_tiling.handle = bo_gem->gem_handle;
1022 ret = drmIoctl(bufmgr_gem->fd,
1023 DRM_IOCTL_I915_GEM_GET_TILING,
1026 drm_intel_gem_bo_unreference(&bo_gem->bo);
1027 pthread_mutex_unlock(&bufmgr_gem->lock);
1030 bo_gem->tiling_mode = get_tiling.tiling_mode;
1031 bo_gem->swizzle_mode = get_tiling.swizzle_mode;
1032 /* XXX stride is unknown */
1033 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
1035 DRMINITLISTHEAD(&bo_gem->vma_list);
1036 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
1037 pthread_mutex_unlock(&bufmgr_gem->lock);
1038 DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
1044 drm_intel_gem_bo_free(drm_intel_bo *bo)
1046 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1047 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1048 struct drm_gem_close close;
1051 DRMLISTDEL(&bo_gem->vma_list);
1052 if (bo_gem->mem_virtual) {
1053 VG(VALGRIND_FREELIKE_BLOCK(bo_gem->mem_virtual, 0));
1054 drm_munmap(bo_gem->mem_virtual, bo_gem->bo.size);
1055 bufmgr_gem->vma_count--;
1057 if (bo_gem->gtt_virtual) {
1058 drm_munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
1059 bufmgr_gem->vma_count--;
1062 /* Close this object */
1064 close.handle = bo_gem->gem_handle;
1065 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
1067 DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
1068 bo_gem->gem_handle, bo_gem->name, strerror(errno));
1070 free(bo_gem->aub_annotations);
1075 drm_intel_gem_bo_mark_mmaps_incoherent(drm_intel_bo *bo)
1078 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1080 if (bo_gem->mem_virtual)
1081 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->mem_virtual, bo->size);
1083 if (bo_gem->gtt_virtual)
1084 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->gtt_virtual, bo->size);
1088 /** Frees all cached buffers significantly older than @time. */
1090 drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
1094 if (bufmgr_gem->time == time)
1097 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
1098 struct drm_intel_gem_bo_bucket *bucket =
1099 &bufmgr_gem->cache_bucket[i];
1101 while (!DRMLISTEMPTY(&bucket->head)) {
1102 drm_intel_bo_gem *bo_gem;
1104 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1105 bucket->head.next, head);
1106 if (time - bo_gem->free_time <= 1)
1109 DRMLISTDEL(&bo_gem->head);
1111 drm_intel_gem_bo_free(&bo_gem->bo);
1115 bufmgr_gem->time = time;
1118 static void drm_intel_gem_bo_purge_vma_cache(drm_intel_bufmgr_gem *bufmgr_gem)
1122 DBG("%s: cached=%d, open=%d, limit=%d\n", __FUNCTION__,
1123 bufmgr_gem->vma_count, bufmgr_gem->vma_open, bufmgr_gem->vma_max);
1125 if (bufmgr_gem->vma_max < 0)
1128 /* We may need to evict a few entries in order to create new mmaps */
1129 limit = bufmgr_gem->vma_max - 2*bufmgr_gem->vma_open;
1133 while (bufmgr_gem->vma_count > limit) {
1134 drm_intel_bo_gem *bo_gem;
1136 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1137 bufmgr_gem->vma_cache.next,
1139 assert(bo_gem->map_count == 0);
1140 DRMLISTDELINIT(&bo_gem->vma_list);
1142 if (bo_gem->mem_virtual) {
1143 drm_munmap(bo_gem->mem_virtual, bo_gem->bo.size);
1144 bo_gem->mem_virtual = NULL;
1145 bufmgr_gem->vma_count--;
1147 if (bo_gem->gtt_virtual) {
1148 drm_munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
1149 bo_gem->gtt_virtual = NULL;
1150 bufmgr_gem->vma_count--;
1155 static void drm_intel_gem_bo_close_vma(drm_intel_bufmgr_gem *bufmgr_gem,
1156 drm_intel_bo_gem *bo_gem)
1158 bufmgr_gem->vma_open--;
1159 DRMLISTADDTAIL(&bo_gem->vma_list, &bufmgr_gem->vma_cache);
1160 if (bo_gem->mem_virtual)
1161 bufmgr_gem->vma_count++;
1162 if (bo_gem->gtt_virtual)
1163 bufmgr_gem->vma_count++;
1164 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
1167 static void drm_intel_gem_bo_open_vma(drm_intel_bufmgr_gem *bufmgr_gem,
1168 drm_intel_bo_gem *bo_gem)
1170 bufmgr_gem->vma_open++;
1171 DRMLISTDEL(&bo_gem->vma_list);
1172 if (bo_gem->mem_virtual)
1173 bufmgr_gem->vma_count--;
1174 if (bo_gem->gtt_virtual)
1175 bufmgr_gem->vma_count--;
1176 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
1180 drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
1182 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1183 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1184 struct drm_intel_gem_bo_bucket *bucket;
1187 /* Unreference all the target buffers */
1188 for (i = 0; i < bo_gem->reloc_count; i++) {
1189 if (bo_gem->reloc_target_info[i].bo != bo) {
1190 drm_intel_gem_bo_unreference_locked_timed(bo_gem->
1191 reloc_target_info[i].bo,
1195 bo_gem->reloc_count = 0;
1196 bo_gem->used_as_reloc_target = false;
1198 DBG("bo_unreference final: %d (%s)\n",
1199 bo_gem->gem_handle, bo_gem->name);
1201 /* release memory associated with this object */
1202 if (bo_gem->reloc_target_info) {
1203 free(bo_gem->reloc_target_info);
1204 bo_gem->reloc_target_info = NULL;
1206 if (bo_gem->relocs) {
1207 free(bo_gem->relocs);
1208 bo_gem->relocs = NULL;
1211 /* Clear any left-over mappings */
1212 if (bo_gem->map_count) {
1213 DBG("bo freed with non-zero map-count %d\n", bo_gem->map_count);
1214 bo_gem->map_count = 0;
1215 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1216 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1219 DRMLISTDEL(&bo_gem->name_list);
1221 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
1222 /* Put the buffer into our internal cache for reuse if we can. */
1223 if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
1224 drm_intel_gem_bo_madvise_internal(bufmgr_gem, bo_gem,
1225 I915_MADV_DONTNEED)) {
1226 bo_gem->free_time = time;
1228 bo_gem->name = NULL;
1229 bo_gem->validate_index = -1;
1231 DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
1233 drm_intel_gem_bo_free(bo);
1237 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
1240 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1242 assert(atomic_read(&bo_gem->refcount) > 0);
1243 if (atomic_dec_and_test(&bo_gem->refcount))
1244 drm_intel_gem_bo_unreference_final(bo, time);
1247 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
1249 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1251 assert(atomic_read(&bo_gem->refcount) > 0);
1253 if (atomic_add_unless(&bo_gem->refcount, -1, 1)) {
1254 drm_intel_bufmgr_gem *bufmgr_gem =
1255 (drm_intel_bufmgr_gem *) bo->bufmgr;
1256 struct timespec time;
1258 clock_gettime(CLOCK_MONOTONIC, &time);
1260 pthread_mutex_lock(&bufmgr_gem->lock);
1262 if (atomic_dec_and_test(&bo_gem->refcount)) {
1263 drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
1264 drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
1267 pthread_mutex_unlock(&bufmgr_gem->lock);
1271 static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
1273 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1274 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1275 struct drm_i915_gem_set_domain set_domain;
1278 if (bo_gem->is_userptr) {
1279 /* Return the same user ptr */
1280 bo->virtual = bo_gem->user_virtual;
1284 pthread_mutex_lock(&bufmgr_gem->lock);
1286 if (bo_gem->map_count++ == 0)
1287 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
1289 if (!bo_gem->mem_virtual) {
1290 struct drm_i915_gem_mmap mmap_arg;
1292 DBG("bo_map: %d (%s), map_count=%d\n",
1293 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
1296 mmap_arg.handle = bo_gem->gem_handle;
1297 mmap_arg.offset = 0;
1298 mmap_arg.size = bo->size;
1299 ret = drmIoctl(bufmgr_gem->fd,
1300 DRM_IOCTL_I915_GEM_MMAP,
1304 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1305 __FILE__, __LINE__, bo_gem->gem_handle,
1306 bo_gem->name, strerror(errno));
1307 if (--bo_gem->map_count == 0)
1308 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1309 pthread_mutex_unlock(&bufmgr_gem->lock);
1312 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
1313 bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
1315 DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1316 bo_gem->mem_virtual);
1317 bo->virtual = bo_gem->mem_virtual;
1319 VG_CLEAR(set_domain);
1320 set_domain.handle = bo_gem->gem_handle;
1321 set_domain.read_domains = I915_GEM_DOMAIN_CPU;
1323 set_domain.write_domain = I915_GEM_DOMAIN_CPU;
1325 set_domain.write_domain = 0;
1326 ret = drmIoctl(bufmgr_gem->fd,
1327 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1330 DBG("%s:%d: Error setting to CPU domain %d: %s\n",
1331 __FILE__, __LINE__, bo_gem->gem_handle,
1336 bo_gem->mapped_cpu_write = true;
1338 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1339 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->mem_virtual, bo->size));
1340 pthread_mutex_unlock(&bufmgr_gem->lock);
1346 map_gtt(drm_intel_bo *bo)
1348 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1349 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1352 if (bo_gem->is_userptr)
1355 if (bo_gem->map_count++ == 0)
1356 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
1358 /* Get a mapping of the buffer if we haven't before. */
1359 if (bo_gem->gtt_virtual == NULL) {
1360 struct drm_i915_gem_mmap_gtt mmap_arg;
1362 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
1363 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
1366 mmap_arg.handle = bo_gem->gem_handle;
1368 /* Get the fake offset back... */
1369 ret = drmIoctl(bufmgr_gem->fd,
1370 DRM_IOCTL_I915_GEM_MMAP_GTT,
1374 DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
1376 bo_gem->gem_handle, bo_gem->name,
1378 if (--bo_gem->map_count == 0)
1379 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1384 bo_gem->gtt_virtual = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
1385 MAP_SHARED, bufmgr_gem->fd,
1387 if (bo_gem->gtt_virtual == MAP_FAILED) {
1388 bo_gem->gtt_virtual = NULL;
1390 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1392 bo_gem->gem_handle, bo_gem->name,
1394 if (--bo_gem->map_count == 0)
1395 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1400 bo->virtual = bo_gem->gtt_virtual;
1402 DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1403 bo_gem->gtt_virtual);
1409 drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
1411 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1412 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1413 struct drm_i915_gem_set_domain set_domain;
1416 pthread_mutex_lock(&bufmgr_gem->lock);
1420 pthread_mutex_unlock(&bufmgr_gem->lock);
1424 /* Now move it to the GTT domain so that the GPU and CPU
1425 * caches are flushed and the GPU isn't actively using the
1428 * The pagefault handler does this domain change for us when
1429 * it has unbound the BO from the GTT, but it's up to us to
1430 * tell it when we're about to use things if we had done
1431 * rendering and it still happens to be bound to the GTT.
1433 VG_CLEAR(set_domain);
1434 set_domain.handle = bo_gem->gem_handle;
1435 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1436 set_domain.write_domain = I915_GEM_DOMAIN_GTT;
1437 ret = drmIoctl(bufmgr_gem->fd,
1438 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1441 DBG("%s:%d: Error setting domain %d: %s\n",
1442 __FILE__, __LINE__, bo_gem->gem_handle,
1446 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1447 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->gtt_virtual, bo->size));
1448 pthread_mutex_unlock(&bufmgr_gem->lock);
1454 * Performs a mapping of the buffer object like the normal GTT
1455 * mapping, but avoids waiting for the GPU to be done reading from or
1456 * rendering to the buffer.
1458 * This is used in the implementation of GL_ARB_map_buffer_range: The
1459 * user asks to create a buffer, then does a mapping, fills some
1460 * space, runs a drawing command, then asks to map it again without
1461 * synchronizing because it guarantees that it won't write over the
1462 * data that the GPU is busy using (or, more specifically, that if it
1463 * does write over the data, it acknowledges that rendering is
1468 drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo)
1470 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1471 #ifdef HAVE_VALGRIND
1472 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1476 /* If the CPU cache isn't coherent with the GTT, then use a
1477 * regular synchronized mapping. The problem is that we don't
1478 * track where the buffer was last used on the CPU side in
1479 * terms of drm_intel_bo_map vs drm_intel_gem_bo_map_gtt, so
1480 * we would potentially corrupt the buffer even when the user
1481 * does reasonable things.
1483 if (!bufmgr_gem->has_llc)
1484 return drm_intel_gem_bo_map_gtt(bo);
1486 pthread_mutex_lock(&bufmgr_gem->lock);
1490 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1491 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->gtt_virtual, bo->size));
1494 pthread_mutex_unlock(&bufmgr_gem->lock);
1499 static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
1501 drm_intel_bufmgr_gem *bufmgr_gem;
1502 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1508 if (bo_gem->is_userptr)
1511 bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1513 pthread_mutex_lock(&bufmgr_gem->lock);
1515 if (bo_gem->map_count <= 0) {
1516 DBG("attempted to unmap an unmapped bo\n");
1517 pthread_mutex_unlock(&bufmgr_gem->lock);
1518 /* Preserve the old behaviour of just treating this as a
1519 * no-op rather than reporting the error.
1524 if (bo_gem->mapped_cpu_write) {
1525 struct drm_i915_gem_sw_finish sw_finish;
1527 /* Cause a flush to happen if the buffer's pinned for
1528 * scanout, so the results show up in a timely manner.
1529 * Unlike GTT set domains, this only does work if the
1530 * buffer should be scanout-related.
1532 VG_CLEAR(sw_finish);
1533 sw_finish.handle = bo_gem->gem_handle;
1534 ret = drmIoctl(bufmgr_gem->fd,
1535 DRM_IOCTL_I915_GEM_SW_FINISH,
1537 ret = ret == -1 ? -errno : 0;
1539 bo_gem->mapped_cpu_write = false;
1542 /* We need to unmap after every innovation as we cannot track
1543 * an open vma for every bo as that will exhaasut the system
1544 * limits and cause later failures.
1546 if (--bo_gem->map_count == 0) {
1547 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1548 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1551 pthread_mutex_unlock(&bufmgr_gem->lock);
1557 drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
1559 return drm_intel_gem_bo_unmap(bo);
1563 drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
1564 unsigned long size, const void *data)
1566 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1567 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1568 struct drm_i915_gem_pwrite pwrite;
1571 if (bo_gem->is_userptr)
1575 pwrite.handle = bo_gem->gem_handle;
1576 pwrite.offset = offset;
1578 pwrite.data_ptr = (uint64_t) (uintptr_t) data;
1579 ret = drmIoctl(bufmgr_gem->fd,
1580 DRM_IOCTL_I915_GEM_PWRITE,
1584 DBG("%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
1585 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1586 (int)size, strerror(errno));
1593 drm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
1595 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1596 struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id;
1599 VG_CLEAR(get_pipe_from_crtc_id);
1600 get_pipe_from_crtc_id.crtc_id = crtc_id;
1601 ret = drmIoctl(bufmgr_gem->fd,
1602 DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID,
1603 &get_pipe_from_crtc_id);
1605 /* We return -1 here to signal that we don't
1606 * know which pipe is associated with this crtc.
1607 * This lets the caller know that this information
1608 * isn't available; using the wrong pipe for
1609 * vblank waiting can cause the chipset to lock up
1614 return get_pipe_from_crtc_id.pipe;
1618 drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
1619 unsigned long size, void *data)
1621 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1622 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1623 struct drm_i915_gem_pread pread;
1626 if (bo_gem->is_userptr)
1630 pread.handle = bo_gem->gem_handle;
1631 pread.offset = offset;
1633 pread.data_ptr = (uint64_t) (uintptr_t) data;
1634 ret = drmIoctl(bufmgr_gem->fd,
1635 DRM_IOCTL_I915_GEM_PREAD,
1639 DBG("%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
1640 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1641 (int)size, strerror(errno));
1647 /** Waits for all GPU rendering with the object to have completed. */
1649 drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo)
1651 drm_intel_gem_bo_start_gtt_access(bo, 1);
1655 * Waits on a BO for the given amount of time.
1657 * @bo: buffer object to wait for
1658 * @timeout_ns: amount of time to wait in nanoseconds.
1659 * If value is less than 0, an infinite wait will occur.
1661 * Returns 0 if the wait was successful ie. the last batch referencing the
1662 * object has completed within the allotted time. Otherwise some negative return
1663 * value describes the error. Of particular interest is -ETIME when the wait has
1664 * failed to yield the desired result.
1666 * Similar to drm_intel_gem_bo_wait_rendering except a timeout parameter allows
1667 * the operation to give up after a certain amount of time. Another subtle
1668 * difference is the internal locking semantics are different (this variant does
1669 * not hold the lock for the duration of the wait). This makes the wait subject
1670 * to a larger userspace race window.
1672 * The implementation shall wait until the object is no longer actively
1673 * referenced within a batch buffer at the time of the call. The wait will
1674 * not guarantee that the buffer is re-issued via another thread, or an flinked
1675 * handle. Userspace must make sure this race does not occur if such precision
1679 drm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns)
1681 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1682 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1683 struct drm_i915_gem_wait wait;
1686 if (!bufmgr_gem->has_wait_timeout) {
1687 DBG("%s:%d: Timed wait is not supported. Falling back to "
1688 "infinite wait\n", __FILE__, __LINE__);
1690 drm_intel_gem_bo_wait_rendering(bo);
1693 return drm_intel_gem_bo_busy(bo) ? -ETIME : 0;
1697 wait.bo_handle = bo_gem->gem_handle;
1698 wait.timeout_ns = timeout_ns;
1700 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
1708 * Sets the object to the GTT read and possibly write domain, used by the X
1709 * 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt().
1711 * In combination with drm_intel_gem_bo_pin() and manual fence management, we
1712 * can do tiled pixmaps this way.
1715 drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
1717 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1718 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1719 struct drm_i915_gem_set_domain set_domain;
1722 VG_CLEAR(set_domain);
1723 set_domain.handle = bo_gem->gem_handle;
1724 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1725 set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0;
1726 ret = drmIoctl(bufmgr_gem->fd,
1727 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1730 DBG("%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
1731 __FILE__, __LINE__, bo_gem->gem_handle,
1732 set_domain.read_domains, set_domain.write_domain,
1738 drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
1740 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1743 free(bufmgr_gem->exec2_objects);
1744 free(bufmgr_gem->exec_objects);
1745 free(bufmgr_gem->exec_bos);
1746 free(bufmgr_gem->aub_filename);
1748 pthread_mutex_destroy(&bufmgr_gem->lock);
1750 /* Free any cached buffer objects we were going to reuse */
1751 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
1752 struct drm_intel_gem_bo_bucket *bucket =
1753 &bufmgr_gem->cache_bucket[i];
1754 drm_intel_bo_gem *bo_gem;
1756 while (!DRMLISTEMPTY(&bucket->head)) {
1757 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1758 bucket->head.next, head);
1759 DRMLISTDEL(&bo_gem->head);
1761 drm_intel_gem_bo_free(&bo_gem->bo);
1769 * Adds the target buffer to the validation list and adds the relocation
1770 * to the reloc_buffer's relocation list.
1772 * The relocation entry at the given offset must already contain the
1773 * precomputed relocation value, because the kernel will optimize out
1774 * the relocation entry write when the buffer hasn't moved from the
1775 * last known offset in target_bo.
1778 do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1779 drm_intel_bo *target_bo, uint32_t target_offset,
1780 uint32_t read_domains, uint32_t write_domain,
1783 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1784 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1785 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
1786 bool fenced_command;
1788 if (bo_gem->has_error)
1791 if (target_bo_gem->has_error) {
1792 bo_gem->has_error = true;
1796 /* We never use HW fences for rendering on 965+ */
1797 if (bufmgr_gem->gen >= 4)
1800 fenced_command = need_fence;
1801 if (target_bo_gem->tiling_mode == I915_TILING_NONE)
1804 /* Create a new relocation list if needed */
1805 if (bo_gem->relocs == NULL && drm_intel_setup_reloc_list(bo))
1808 /* Check overflow */
1809 assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
1812 assert(offset <= bo->size - 4);
1813 assert((write_domain & (write_domain - 1)) == 0);
1815 /* Make sure that we're not adding a reloc to something whose size has
1816 * already been accounted for.
1818 assert(!bo_gem->used_as_reloc_target);
1819 if (target_bo_gem != bo_gem) {
1820 target_bo_gem->used_as_reloc_target = true;
1821 bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
1823 /* An object needing a fence is a tiled buffer, so it won't have
1824 * relocs to other buffers.
1827 target_bo_gem->reloc_tree_fences = 1;
1828 bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
1830 bo_gem->relocs[bo_gem->reloc_count].offset = offset;
1831 bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
1832 bo_gem->relocs[bo_gem->reloc_count].target_handle =
1833 target_bo_gem->gem_handle;
1834 bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
1835 bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
1836 bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset64;
1838 bo_gem->reloc_target_info[bo_gem->reloc_count].bo = target_bo;
1839 if (target_bo != bo)
1840 drm_intel_gem_bo_reference(target_bo);
1842 bo_gem->reloc_target_info[bo_gem->reloc_count].flags =
1843 DRM_INTEL_RELOC_FENCE;
1845 bo_gem->reloc_target_info[bo_gem->reloc_count].flags = 0;
1847 bo_gem->reloc_count++;
1853 drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1854 drm_intel_bo *target_bo, uint32_t target_offset,
1855 uint32_t read_domains, uint32_t write_domain)
1857 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
1859 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
1860 read_domains, write_domain,
1861 !bufmgr_gem->fenced_relocs);
1865 drm_intel_gem_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
1866 drm_intel_bo *target_bo,
1867 uint32_t target_offset,
1868 uint32_t read_domains, uint32_t write_domain)
1870 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
1871 read_domains, write_domain, true);
1875 drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo)
1877 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1879 return bo_gem->reloc_count;
1883 * Removes existing relocation entries in the BO after "start".
1885 * This allows a user to avoid a two-step process for state setup with
1886 * counting up all the buffer objects and doing a
1887 * drm_intel_bufmgr_check_aperture_space() before emitting any of the
1888 * relocations for the state setup. Instead, save the state of the
1889 * batchbuffer including drm_intel_gem_get_reloc_count(), emit all the
1890 * state, and then check if it still fits in the aperture.
1892 * Any further drm_intel_bufmgr_check_aperture_space() queries
1893 * involving this buffer in the tree are undefined after this call.
1896 drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start)
1898 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1899 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1901 struct timespec time;
1903 clock_gettime(CLOCK_MONOTONIC, &time);
1905 assert(bo_gem->reloc_count >= start);
1907 /* Unreference the cleared target buffers */
1908 pthread_mutex_lock(&bufmgr_gem->lock);
1910 for (i = start; i < bo_gem->reloc_count; i++) {
1911 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) bo_gem->reloc_target_info[i].bo;
1912 if (&target_bo_gem->bo != bo) {
1913 bo_gem->reloc_tree_fences -= target_bo_gem->reloc_tree_fences;
1914 drm_intel_gem_bo_unreference_locked_timed(&target_bo_gem->bo,
1918 bo_gem->reloc_count = start;
1920 pthread_mutex_unlock(&bufmgr_gem->lock);
1925 * Walk the tree of relocations rooted at BO and accumulate the list of
1926 * validations to be performed and update the relocation buffers with
1927 * index values into the validation list.
1930 drm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
1932 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1935 if (bo_gem->relocs == NULL)
1938 for (i = 0; i < bo_gem->reloc_count; i++) {
1939 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
1941 if (target_bo == bo)
1944 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1946 /* Continue walking the tree depth-first. */
1947 drm_intel_gem_bo_process_reloc(target_bo);
1949 /* Add the target to the validate list */
1950 drm_intel_add_validate_buffer(target_bo);
1955 drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo)
1957 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1960 if (bo_gem->relocs == NULL)
1963 for (i = 0; i < bo_gem->reloc_count; i++) {
1964 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
1967 if (target_bo == bo)
1970 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1972 /* Continue walking the tree depth-first. */
1973 drm_intel_gem_bo_process_reloc2(target_bo);
1975 need_fence = (bo_gem->reloc_target_info[i].flags &
1976 DRM_INTEL_RELOC_FENCE);
1978 /* Add the target to the validate list */
1979 drm_intel_add_validate_buffer2(target_bo, need_fence);
1985 drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem)
1989 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1990 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1991 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1993 /* Update the buffer offset */
1994 if (bufmgr_gem->exec_objects[i].offset != bo->offset64) {
1995 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
1996 bo_gem->gem_handle, bo_gem->name, bo->offset64,
1997 (unsigned long long)bufmgr_gem->exec_objects[i].
1999 bo->offset64 = bufmgr_gem->exec_objects[i].offset;
2000 bo->offset = bufmgr_gem->exec_objects[i].offset;
2006 drm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem)
2010 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2011 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
2012 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
2014 /* Update the buffer offset */
2015 if (bufmgr_gem->exec2_objects[i].offset != bo->offset64) {
2016 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
2017 bo_gem->gem_handle, bo_gem->name, bo->offset64,
2018 (unsigned long long)bufmgr_gem->exec2_objects[i].offset);
2019 bo->offset64 = bufmgr_gem->exec2_objects[i].offset;
2020 bo->offset = bufmgr_gem->exec2_objects[i].offset;
2026 aub_out(drm_intel_bufmgr_gem *bufmgr_gem, uint32_t data)
2028 fwrite(&data, 1, 4, bufmgr_gem->aub_file);
2032 aub_out_data(drm_intel_bufmgr_gem *bufmgr_gem, void *data, size_t size)
2034 fwrite(data, 1, size, bufmgr_gem->aub_file);
2038 aub_write_bo_data(drm_intel_bo *bo, uint32_t offset, uint32_t size)
2040 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2041 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2045 data = malloc(bo->size);
2046 drm_intel_bo_get_subdata(bo, offset, size, data);
2048 /* Easy mode: write out bo with no relocations */
2049 if (!bo_gem->reloc_count) {
2050 aub_out_data(bufmgr_gem, data, size);
2055 /* Otherwise, handle the relocations while writing. */
2056 for (i = 0; i < size / 4; i++) {
2058 for (r = 0; r < bo_gem->reloc_count; r++) {
2059 struct drm_i915_gem_relocation_entry *reloc;
2060 drm_intel_reloc_target *info;
2062 reloc = &bo_gem->relocs[r];
2063 info = &bo_gem->reloc_target_info[r];
2065 if (reloc->offset == offset + i * 4) {
2066 drm_intel_bo_gem *target_gem;
2069 target_gem = (drm_intel_bo_gem *)info->bo;
2072 val += target_gem->aub_offset;
2074 aub_out(bufmgr_gem, val);
2079 if (r == bo_gem->reloc_count) {
2080 /* no relocation, just the data */
2081 aub_out(bufmgr_gem, data[i]);
2089 aub_bo_get_address(drm_intel_bo *bo)
2091 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2092 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2094 /* Give the object a graphics address in the AUB file. We
2095 * don't just use the GEM object address because we do AUB
2096 * dumping before execution -- we want to successfully log
2097 * when the hardware might hang, and we might even want to aub
2098 * capture for a driver trying to execute on a different
2099 * generation of hardware by disabling the actual kernel exec
2102 bo_gem->aub_offset = bufmgr_gem->aub_offset;
2103 bufmgr_gem->aub_offset += bo->size;
2104 /* XXX: Handle aperture overflow. */
2105 assert(bufmgr_gem->aub_offset < 256 * 1024 * 1024);
2109 aub_write_trace_block(drm_intel_bo *bo, uint32_t type, uint32_t subtype,
2110 uint32_t offset, uint32_t size)
2112 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2113 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2116 CMD_AUB_TRACE_HEADER_BLOCK |
2117 ((bufmgr_gem->gen >= 8 ? 6 : 5) - 2));
2119 AUB_TRACE_MEMTYPE_GTT | type | AUB_TRACE_OP_DATA_WRITE);
2120 aub_out(bufmgr_gem, subtype);
2121 aub_out(bufmgr_gem, bo_gem->aub_offset + offset);
2122 aub_out(bufmgr_gem, size);
2123 if (bufmgr_gem->gen >= 8)
2124 aub_out(bufmgr_gem, 0);
2125 aub_write_bo_data(bo, offset, size);
2129 * Break up large objects into multiple writes. Otherwise a 128kb VBO
2130 * would overflow the 16 bits of size field in the packet header and
2131 * everything goes badly after that.
2134 aub_write_large_trace_block(drm_intel_bo *bo, uint32_t type, uint32_t subtype,
2135 uint32_t offset, uint32_t size)
2137 uint32_t block_size;
2138 uint32_t sub_offset;
2140 for (sub_offset = 0; sub_offset < size; sub_offset += block_size) {
2141 block_size = size - sub_offset;
2143 if (block_size > 8 * 4096)
2144 block_size = 8 * 4096;
2146 aub_write_trace_block(bo, type, subtype, offset + sub_offset,
2152 aub_write_bo(drm_intel_bo *bo)
2154 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2155 uint32_t offset = 0;
2158 aub_bo_get_address(bo);
2160 /* Write out each annotated section separately. */
2161 for (i = 0; i < bo_gem->aub_annotation_count; ++i) {
2162 drm_intel_aub_annotation *annotation =
2163 &bo_gem->aub_annotations[i];
2164 uint32_t ending_offset = annotation->ending_offset;
2165 if (ending_offset > bo->size)
2166 ending_offset = bo->size;
2167 if (ending_offset > offset) {
2168 aub_write_large_trace_block(bo, annotation->type,
2169 annotation->subtype,
2171 ending_offset - offset);
2172 offset = ending_offset;
2176 /* Write out any remaining unannotated data */
2177 if (offset < bo->size) {
2178 aub_write_large_trace_block(bo, AUB_TRACE_TYPE_NOTYPE, 0,
2179 offset, bo->size - offset);
2184 * Make a ringbuffer on fly and dump it
2187 aub_build_dump_ringbuffer(drm_intel_bufmgr_gem *bufmgr_gem,
2188 uint32_t batch_buffer, int ring_flag)
2190 uint32_t ringbuffer[4096];
2191 int ring = AUB_TRACE_TYPE_RING_PRB0; /* The default ring */
2194 if (ring_flag == I915_EXEC_BSD)
2195 ring = AUB_TRACE_TYPE_RING_PRB1;
2196 else if (ring_flag == I915_EXEC_BLT)
2197 ring = AUB_TRACE_TYPE_RING_PRB2;
2199 /* Make a ring buffer to execute our batchbuffer. */
2200 memset(ringbuffer, 0, sizeof(ringbuffer));
2201 if (bufmgr_gem->gen >= 8) {
2202 ringbuffer[ring_count++] = AUB_MI_BATCH_BUFFER_START | (3 - 2);
2203 ringbuffer[ring_count++] = batch_buffer;
2204 ringbuffer[ring_count++] = 0;
2206 ringbuffer[ring_count++] = AUB_MI_BATCH_BUFFER_START;
2207 ringbuffer[ring_count++] = batch_buffer;
2210 /* Write out the ring. This appears to trigger execution of
2211 * the ring in the simulator.
2214 CMD_AUB_TRACE_HEADER_BLOCK |
2215 ((bufmgr_gem->gen >= 8 ? 6 : 5) - 2));
2217 AUB_TRACE_MEMTYPE_GTT | ring | AUB_TRACE_OP_COMMAND_WRITE);
2218 aub_out(bufmgr_gem, 0); /* general/surface subtype */
2219 aub_out(bufmgr_gem, bufmgr_gem->aub_offset);
2220 aub_out(bufmgr_gem, ring_count * 4);
2221 if (bufmgr_gem->gen >= 8)
2222 aub_out(bufmgr_gem, 0);
2224 /* FIXME: Need some flush operations here? */
2225 aub_out_data(bufmgr_gem, ringbuffer, ring_count * 4);
2227 /* Update offset pointer */
2228 bufmgr_gem->aub_offset += 4096;
2232 drm_intel_gem_bo_aub_dump_bmp(drm_intel_bo *bo,
2233 int x1, int y1, int width, int height,
2234 enum aub_dump_bmp_format format,
2235 int pitch, int offset)
2237 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2238 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
2242 case AUB_DUMP_BMP_FORMAT_8BIT:
2245 case AUB_DUMP_BMP_FORMAT_ARGB_4444:
2248 case AUB_DUMP_BMP_FORMAT_ARGB_0888:
2249 case AUB_DUMP_BMP_FORMAT_ARGB_8888:
2253 printf("Unknown AUB dump format %d\n", format);
2257 if (!bufmgr_gem->aub_file)
2260 aub_out(bufmgr_gem, CMD_AUB_DUMP_BMP | 4);
2261 aub_out(bufmgr_gem, (y1 << 16) | x1);
2266 aub_out(bufmgr_gem, (height << 16) | width);
2267 aub_out(bufmgr_gem, bo_gem->aub_offset + offset);
2269 ((bo_gem->tiling_mode != I915_TILING_NONE) ? (1 << 2) : 0) |
2270 ((bo_gem->tiling_mode == I915_TILING_Y) ? (1 << 3) : 0));
2274 aub_exec(drm_intel_bo *bo, int ring_flag, int used)
2276 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2277 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2279 bool batch_buffer_needs_annotations;
2281 if (!bufmgr_gem->aub_file)
2284 /* If batch buffer is not annotated, annotate it the best we
2287 batch_buffer_needs_annotations = bo_gem->aub_annotation_count == 0;
2288 if (batch_buffer_needs_annotations) {
2289 drm_intel_aub_annotation annotations[2] = {
2290 { AUB_TRACE_TYPE_BATCH, 0, used },
2291 { AUB_TRACE_TYPE_NOTYPE, 0, bo->size }
2293 drm_intel_bufmgr_gem_set_aub_annotations(bo, annotations, 2);
2296 /* Write out all buffers to AUB memory */
2297 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2298 aub_write_bo(bufmgr_gem->exec_bos[i]);
2301 /* Remove any annotations we added */
2302 if (batch_buffer_needs_annotations)
2303 drm_intel_bufmgr_gem_set_aub_annotations(bo, NULL, 0);
2305 /* Dump ring buffer */
2306 aub_build_dump_ringbuffer(bufmgr_gem, bo_gem->aub_offset, ring_flag);
2308 fflush(bufmgr_gem->aub_file);
2311 * One frame has been dumped. So reset the aub_offset for the next frame.
2313 * FIXME: Can we do this?
2315 bufmgr_gem->aub_offset = 0x10000;
2319 drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
2320 drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
2322 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2323 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2324 struct drm_i915_gem_execbuffer execbuf;
2327 if (bo_gem->has_error)
2330 pthread_mutex_lock(&bufmgr_gem->lock);
2331 /* Update indices and set up the validate list. */
2332 drm_intel_gem_bo_process_reloc(bo);
2334 /* Add the batch buffer to the validation list. There are no
2335 * relocations pointing to it.
2337 drm_intel_add_validate_buffer(bo);
2340 execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects;
2341 execbuf.buffer_count = bufmgr_gem->exec_count;
2342 execbuf.batch_start_offset = 0;
2343 execbuf.batch_len = used;
2344 execbuf.cliprects_ptr = (uintptr_t) cliprects;
2345 execbuf.num_cliprects = num_cliprects;
2349 ret = drmIoctl(bufmgr_gem->fd,
2350 DRM_IOCTL_I915_GEM_EXECBUFFER,
2354 if (errno == ENOSPC) {
2355 DBG("Execbuffer fails to pin. "
2356 "Estimate: %u. Actual: %u. Available: %u\n",
2357 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
2360 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
2363 (unsigned int)bufmgr_gem->gtt_size);
2366 drm_intel_update_buffer_offsets(bufmgr_gem);
2368 if (bufmgr_gem->bufmgr.debug)
2369 drm_intel_gem_dump_validation_list(bufmgr_gem);
2371 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2372 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
2373 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2375 bo_gem->idle = false;
2377 /* Disconnect the buffer from the validate list */
2378 bo_gem->validate_index = -1;
2379 bufmgr_gem->exec_bos[i] = NULL;
2381 bufmgr_gem->exec_count = 0;
2382 pthread_mutex_unlock(&bufmgr_gem->lock);
2388 do_exec2(drm_intel_bo *bo, int used, drm_intel_context *ctx,
2389 drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
2392 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
2393 struct drm_i915_gem_execbuffer2 execbuf;
2397 switch (flags & 0x7) {
2401 if (!bufmgr_gem->has_blt)
2405 if (!bufmgr_gem->has_bsd)
2408 case I915_EXEC_VEBOX:
2409 if (!bufmgr_gem->has_vebox)
2412 case I915_EXEC_RENDER:
2413 case I915_EXEC_DEFAULT:
2417 pthread_mutex_lock(&bufmgr_gem->lock);
2418 /* Update indices and set up the validate list. */
2419 drm_intel_gem_bo_process_reloc2(bo);
2421 /* Add the batch buffer to the validation list. There are no relocations
2424 drm_intel_add_validate_buffer2(bo, 0);
2427 execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec2_objects;
2428 execbuf.buffer_count = bufmgr_gem->exec_count;
2429 execbuf.batch_start_offset = 0;
2430 execbuf.batch_len = used;
2431 execbuf.cliprects_ptr = (uintptr_t)cliprects;
2432 execbuf.num_cliprects = num_cliprects;
2435 execbuf.flags = flags;
2437 i915_execbuffer2_set_context_id(execbuf, 0);
2439 i915_execbuffer2_set_context_id(execbuf, ctx->ctx_id);
2442 aub_exec(bo, flags, used);
2444 if (bufmgr_gem->no_exec)
2445 goto skip_execution;
2447 ret = drmIoctl(bufmgr_gem->fd,
2448 DRM_IOCTL_I915_GEM_EXECBUFFER2,
2452 if (ret == -ENOSPC) {
2453 DBG("Execbuffer fails to pin. "
2454 "Estimate: %u. Actual: %u. Available: %u\n",
2455 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
2456 bufmgr_gem->exec_count),
2457 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
2458 bufmgr_gem->exec_count),
2459 (unsigned int) bufmgr_gem->gtt_size);
2462 drm_intel_update_buffer_offsets2(bufmgr_gem);
2465 if (bufmgr_gem->bufmgr.debug)
2466 drm_intel_gem_dump_validation_list(bufmgr_gem);
2468 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2469 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
2470 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
2472 bo_gem->idle = false;
2474 /* Disconnect the buffer from the validate list */
2475 bo_gem->validate_index = -1;
2476 bufmgr_gem->exec_bos[i] = NULL;
2478 bufmgr_gem->exec_count = 0;
2479 pthread_mutex_unlock(&bufmgr_gem->lock);
2485 drm_intel_gem_bo_exec2(drm_intel_bo *bo, int used,
2486 drm_clip_rect_t *cliprects, int num_cliprects,
2489 return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4,
2494 drm_intel_gem_bo_mrb_exec2(drm_intel_bo *bo, int used,
2495 drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
2498 return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4,
2503 drm_intel_gem_bo_context_exec(drm_intel_bo *bo, drm_intel_context *ctx,
2504 int used, unsigned int flags)
2506 return do_exec2(bo, used, ctx, NULL, 0, 0, flags);
2510 drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
2512 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2513 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2514 struct drm_i915_gem_pin pin;
2518 pin.handle = bo_gem->gem_handle;
2519 pin.alignment = alignment;
2521 ret = drmIoctl(bufmgr_gem->fd,
2522 DRM_IOCTL_I915_GEM_PIN,
2527 bo->offset64 = pin.offset;
2528 bo->offset = pin.offset;
2533 drm_intel_gem_bo_unpin(drm_intel_bo *bo)
2535 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2536 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2537 struct drm_i915_gem_unpin unpin;
2541 unpin.handle = bo_gem->gem_handle;
2543 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
2551 drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
2552 uint32_t tiling_mode,
2555 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2556 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2557 struct drm_i915_gem_set_tiling set_tiling;
2560 if (bo_gem->global_name == 0 &&
2561 tiling_mode == bo_gem->tiling_mode &&
2562 stride == bo_gem->stride)
2565 memset(&set_tiling, 0, sizeof(set_tiling));
2567 /* set_tiling is slightly broken and overwrites the
2568 * input on the error path, so we have to open code
2571 set_tiling.handle = bo_gem->gem_handle;
2572 set_tiling.tiling_mode = tiling_mode;
2573 set_tiling.stride = stride;
2575 ret = ioctl(bufmgr_gem->fd,
2576 DRM_IOCTL_I915_GEM_SET_TILING,
2578 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
2582 bo_gem->tiling_mode = set_tiling.tiling_mode;
2583 bo_gem->swizzle_mode = set_tiling.swizzle_mode;
2584 bo_gem->stride = set_tiling.stride;
2589 drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
2592 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2593 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2596 /* Tiling with userptr surfaces is not supported
2597 * on all hardware so refuse it for time being.
2599 if (bo_gem->is_userptr)
2602 /* Linear buffers have no stride. By ensuring that we only ever use
2603 * stride 0 with linear buffers, we simplify our code.
2605 if (*tiling_mode == I915_TILING_NONE)
2608 ret = drm_intel_gem_bo_set_tiling_internal(bo, *tiling_mode, stride);
2610 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
2612 *tiling_mode = bo_gem->tiling_mode;
2617 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
2618 uint32_t * swizzle_mode)
2620 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2622 *tiling_mode = bo_gem->tiling_mode;
2623 *swizzle_mode = bo_gem->swizzle_mode;
2627 drm_public drm_intel_bo *
2628 drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int size)
2630 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
2633 drm_intel_bo_gem *bo_gem;
2634 struct drm_i915_gem_get_tiling get_tiling;
2635 drmMMListHead *list;
2637 ret = drmPrimeFDToHandle(bufmgr_gem->fd, prime_fd, &handle);
2640 * See if the kernel has already returned this buffer to us. Just as
2641 * for named buffers, we must not create two bo's pointing at the same
2644 pthread_mutex_lock(&bufmgr_gem->lock);
2645 for (list = bufmgr_gem->named.next;
2646 list != &bufmgr_gem->named;
2647 list = list->next) {
2648 bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
2649 if (bo_gem->gem_handle == handle) {
2650 drm_intel_gem_bo_reference(&bo_gem->bo);
2651 pthread_mutex_unlock(&bufmgr_gem->lock);
2657 fprintf(stderr,"ret is %d %d\n", ret, errno);
2658 pthread_mutex_unlock(&bufmgr_gem->lock);
2662 bo_gem = calloc(1, sizeof(*bo_gem));
2664 pthread_mutex_unlock(&bufmgr_gem->lock);
2667 /* Determine size of bo. The fd-to-handle ioctl really should
2668 * return the size, but it doesn't. If we have kernel 3.12 or
2669 * later, we can lseek on the prime fd to get the size. Older
2670 * kernels will just fail, in which case we fall back to the
2671 * provided (estimated or guess size). */
2672 ret = lseek(prime_fd, 0, SEEK_END);
2674 bo_gem->bo.size = ret;
2676 bo_gem->bo.size = size;
2678 bo_gem->bo.handle = handle;
2679 bo_gem->bo.bufmgr = bufmgr;
2681 bo_gem->gem_handle = handle;
2683 atomic_set(&bo_gem->refcount, 1);
2685 bo_gem->name = "prime";
2686 bo_gem->validate_index = -1;
2687 bo_gem->reloc_tree_fences = 0;
2688 bo_gem->used_as_reloc_target = false;
2689 bo_gem->has_error = false;
2690 bo_gem->reusable = false;
2692 DRMINITLISTHEAD(&bo_gem->vma_list);
2693 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
2694 pthread_mutex_unlock(&bufmgr_gem->lock);
2696 VG_CLEAR(get_tiling);
2697 get_tiling.handle = bo_gem->gem_handle;
2698 ret = drmIoctl(bufmgr_gem->fd,
2699 DRM_IOCTL_I915_GEM_GET_TILING,
2702 drm_intel_gem_bo_unreference(&bo_gem->bo);
2705 bo_gem->tiling_mode = get_tiling.tiling_mode;
2706 bo_gem->swizzle_mode = get_tiling.swizzle_mode;
2707 /* XXX stride is unknown */
2708 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
2714 drm_intel_bo_gem_export_to_prime(drm_intel_bo *bo, int *prime_fd)
2716 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2717 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2719 pthread_mutex_lock(&bufmgr_gem->lock);
2720 if (DRMLISTEMPTY(&bo_gem->name_list))
2721 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
2722 pthread_mutex_unlock(&bufmgr_gem->lock);
2724 if (drmPrimeHandleToFD(bufmgr_gem->fd, bo_gem->gem_handle,
2725 DRM_CLOEXEC, prime_fd) != 0)
2728 bo_gem->reusable = false;
2734 drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
2736 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2737 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2740 if (!bo_gem->global_name) {
2741 struct drm_gem_flink flink;
2744 flink.handle = bo_gem->gem_handle;
2746 pthread_mutex_lock(&bufmgr_gem->lock);
2748 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink);
2750 pthread_mutex_unlock(&bufmgr_gem->lock);
2754 bo_gem->global_name = flink.name;
2755 bo_gem->reusable = false;
2757 if (DRMLISTEMPTY(&bo_gem->name_list))
2758 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
2759 pthread_mutex_unlock(&bufmgr_gem->lock);
2762 *name = bo_gem->global_name;
2767 * Enables unlimited caching of buffer objects for reuse.
2769 * This is potentially very memory expensive, as the cache at each bucket
2770 * size is only bounded by how many buffers of that size we've managed to have
2771 * in flight at once.
2774 drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
2776 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
2778 bufmgr_gem->bo_reuse = true;
2782 * Enable use of fenced reloc type.
2784 * New code should enable this to avoid unnecessary fence register
2785 * allocation. If this option is not enabled, all relocs will have fence
2786 * register allocated.
2789 drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr)
2791 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2793 if (bufmgr_gem->bufmgr.bo_exec == drm_intel_gem_bo_exec2)
2794 bufmgr_gem->fenced_relocs = true;
2798 * Return the additional aperture space required by the tree of buffer objects
2802 drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
2804 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2808 if (bo == NULL || bo_gem->included_in_check_aperture)
2812 bo_gem->included_in_check_aperture = true;
2814 for (i = 0; i < bo_gem->reloc_count; i++)
2816 drm_intel_gem_bo_get_aperture_space(bo_gem->
2817 reloc_target_info[i].bo);
2823 * Count the number of buffers in this list that need a fence reg
2825 * If the count is greater than the number of available regs, we'll have
2826 * to ask the caller to resubmit a batch with fewer tiled buffers.
2828 * This function over-counts if the same buffer is used multiple times.
2831 drm_intel_gem_total_fences(drm_intel_bo ** bo_array, int count)
2834 unsigned int total = 0;
2836 for (i = 0; i < count; i++) {
2837 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
2842 total += bo_gem->reloc_tree_fences;
2848 * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready
2849 * for the next drm_intel_bufmgr_check_aperture_space() call.
2852 drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo)
2854 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2857 if (bo == NULL || !bo_gem->included_in_check_aperture)
2860 bo_gem->included_in_check_aperture = false;
2862 for (i = 0; i < bo_gem->reloc_count; i++)
2863 drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->
2864 reloc_target_info[i].bo);
2868 * Return a conservative estimate for the amount of aperture required
2869 * for a collection of buffers. This may double-count some buffers.
2872 drm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count)
2875 unsigned int total = 0;
2877 for (i = 0; i < count; i++) {
2878 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
2880 total += bo_gem->reloc_tree_size;
2886 * Return the amount of aperture needed for a collection of buffers.
2887 * This avoids double counting any buffers, at the cost of looking
2888 * at every buffer in the set.
2891 drm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count)
2894 unsigned int total = 0;
2896 for (i = 0; i < count; i++) {
2897 total += drm_intel_gem_bo_get_aperture_space(bo_array[i]);
2898 /* For the first buffer object in the array, we get an
2899 * accurate count back for its reloc_tree size (since nothing
2900 * had been flagged as being counted yet). We can save that
2901 * value out as a more conservative reloc_tree_size that
2902 * avoids double-counting target buffers. Since the first
2903 * buffer happens to usually be the batch buffer in our
2904 * callers, this can pull us back from doing the tree
2905 * walk on every new batch emit.
2908 drm_intel_bo_gem *bo_gem =
2909 (drm_intel_bo_gem *) bo_array[i];
2910 bo_gem->reloc_tree_size = total;
2914 for (i = 0; i < count; i++)
2915 drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]);
2920 * Return -1 if the batchbuffer should be flushed before attempting to
2921 * emit rendering referencing the buffers pointed to by bo_array.
2923 * This is required because if we try to emit a batchbuffer with relocations
2924 * to a tree of buffers that won't simultaneously fit in the aperture,
2925 * the rendering will return an error at a point where the software is not
2926 * prepared to recover from it.
2928 * However, we also want to emit the batchbuffer significantly before we reach
2929 * the limit, as a series of batchbuffers each of which references buffers
2930 * covering almost all of the aperture means that at each emit we end up
2931 * waiting to evict a buffer from the last rendering, and we get synchronous
2932 * performance. By emitting smaller batchbuffers, we eat some CPU overhead to
2933 * get better parallelism.
2936 drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
2938 drm_intel_bufmgr_gem *bufmgr_gem =
2939 (drm_intel_bufmgr_gem *) bo_array[0]->bufmgr;
2940 unsigned int total = 0;
2941 unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
2944 /* Check for fence reg constraints if necessary */
2945 if (bufmgr_gem->available_fences) {
2946 total_fences = drm_intel_gem_total_fences(bo_array, count);
2947 if (total_fences > bufmgr_gem->available_fences)
2951 total = drm_intel_gem_estimate_batch_space(bo_array, count);
2953 if (total > threshold)
2954 total = drm_intel_gem_compute_batch_space(bo_array, count);
2956 if (total > threshold) {
2957 DBG("check_space: overflowed available aperture, "
2959 total / 1024, (int)bufmgr_gem->gtt_size / 1024);
2962 DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024,
2963 (int)bufmgr_gem->gtt_size / 1024);
2969 * Disable buffer reuse for objects which are shared with the kernel
2970 * as scanout buffers
2973 drm_intel_gem_bo_disable_reuse(drm_intel_bo *bo)
2975 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2977 bo_gem->reusable = false;
2982 drm_intel_gem_bo_is_reusable(drm_intel_bo *bo)
2984 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2986 return bo_gem->reusable;
2990 _drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
2992 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2995 for (i = 0; i < bo_gem->reloc_count; i++) {
2996 if (bo_gem->reloc_target_info[i].bo == target_bo)
2998 if (bo == bo_gem->reloc_target_info[i].bo)
3000 if (_drm_intel_gem_bo_references(bo_gem->reloc_target_info[i].bo,
3008 /** Return true if target_bo is referenced by bo's relocation tree. */
3010 drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
3012 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
3014 if (bo == NULL || target_bo == NULL)
3016 if (target_bo_gem->used_as_reloc_target)
3017 return _drm_intel_gem_bo_references(bo, target_bo);
3022 add_bucket(drm_intel_bufmgr_gem *bufmgr_gem, int size)
3024 unsigned int i = bufmgr_gem->num_buckets;
3026 assert(i < ARRAY_SIZE(bufmgr_gem->cache_bucket));
3028 DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head);
3029 bufmgr_gem->cache_bucket[i].size = size;
3030 bufmgr_gem->num_buckets++;
3034 init_cache_buckets(drm_intel_bufmgr_gem *bufmgr_gem)
3036 unsigned long size, cache_max_size = 64 * 1024 * 1024;
3038 /* OK, so power of two buckets was too wasteful of memory.
3039 * Give 3 other sizes between each power of two, to hopefully
3040 * cover things accurately enough. (The alternative is
3041 * probably to just go for exact matching of sizes, and assume
3042 * that for things like composited window resize the tiled
3043 * width/height alignment and rounding of sizes to pages will
3044 * get us useful cache hit rates anyway)
3046 add_bucket(bufmgr_gem, 4096);
3047 add_bucket(bufmgr_gem, 4096 * 2);
3048 add_bucket(bufmgr_gem, 4096 * 3);
3050 /* Initialize the linked lists for BO reuse cache. */
3051 for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
3052 add_bucket(bufmgr_gem, size);
3054 add_bucket(bufmgr_gem, size + size * 1 / 4);
3055 add_bucket(bufmgr_gem, size + size * 2 / 4);
3056 add_bucket(bufmgr_gem, size + size * 3 / 4);
3061 drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr, int limit)
3063 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3065 bufmgr_gem->vma_max = limit;
3067 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
3071 * Get the PCI ID for the device. This can be overridden by setting the
3072 * INTEL_DEVID_OVERRIDE environment variable to the desired ID.
3075 get_pci_device_id(drm_intel_bufmgr_gem *bufmgr_gem)
3077 char *devid_override;
3080 drm_i915_getparam_t gp;
3082 if (geteuid() == getuid()) {
3083 devid_override = getenv("INTEL_DEVID_OVERRIDE");
3084 if (devid_override) {
3085 bufmgr_gem->no_exec = true;
3086 return strtod(devid_override, NULL);
3092 gp.param = I915_PARAM_CHIPSET_ID;
3094 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3096 fprintf(stderr, "get chip id failed: %d [%d]\n", ret, errno);
3097 fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
3103 drm_intel_bufmgr_gem_get_devid(drm_intel_bufmgr *bufmgr)
3105 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3107 return bufmgr_gem->pci_device;
3111 * Sets the AUB filename.
3113 * This function has to be called before drm_intel_bufmgr_gem_set_aub_dump()
3114 * for it to have any effect.
3117 drm_intel_bufmgr_gem_set_aub_filename(drm_intel_bufmgr *bufmgr,
3118 const char *filename)
3120 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3122 free(bufmgr_gem->aub_filename);
3124 bufmgr_gem->aub_filename = strdup(filename);
3128 * Sets up AUB dumping.
3130 * This is a trace file format that can be used with the simulator.
3131 * Packets are emitted in a format somewhat like GPU command packets.
3132 * You can set up a GTT and upload your objects into the referenced
3133 * space, then send off batchbuffers and get BMPs out the other end.
3136 drm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr *bufmgr, int enable)
3138 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3139 int entry = 0x200003;
3141 int gtt_size = 0x10000;
3142 const char *filename;
3145 if (bufmgr_gem->aub_file) {
3146 fclose(bufmgr_gem->aub_file);
3147 bufmgr_gem->aub_file = NULL;
3152 if (geteuid() != getuid())
3155 if (bufmgr_gem->aub_filename)
3156 filename = bufmgr_gem->aub_filename;
3158 filename = "intel.aub";
3159 bufmgr_gem->aub_file = fopen(filename, "w+");
3160 if (!bufmgr_gem->aub_file)
3163 /* Start allocating objects from just after the GTT. */
3164 bufmgr_gem->aub_offset = gtt_size;
3166 /* Start with a (required) version packet. */
3167 aub_out(bufmgr_gem, CMD_AUB_HEADER | (13 - 2));
3169 (4 << AUB_HEADER_MAJOR_SHIFT) |
3170 (0 << AUB_HEADER_MINOR_SHIFT));
3171 for (i = 0; i < 8; i++) {
3172 aub_out(bufmgr_gem, 0); /* app name */
3174 aub_out(bufmgr_gem, 0); /* timestamp */
3175 aub_out(bufmgr_gem, 0); /* timestamp */
3176 aub_out(bufmgr_gem, 0); /* comment len */
3178 /* Set up the GTT. The max we can handle is 256M */
3179 aub_out(bufmgr_gem, CMD_AUB_TRACE_HEADER_BLOCK | ((bufmgr_gem->gen >= 8 ? 6 : 5) - 2));
3180 aub_out(bufmgr_gem, AUB_TRACE_MEMTYPE_NONLOCAL | 0 | AUB_TRACE_OP_DATA_WRITE);
3181 aub_out(bufmgr_gem, 0); /* subtype */
3182 aub_out(bufmgr_gem, 0); /* offset */
3183 aub_out(bufmgr_gem, gtt_size); /* size */
3184 if (bufmgr_gem->gen >= 8)
3185 aub_out(bufmgr_gem, 0);
3186 for (i = 0x000; i < gtt_size; i += 4, entry += 0x1000) {
3187 aub_out(bufmgr_gem, entry);
3191 drm_public drm_intel_context *
3192 drm_intel_gem_context_create(drm_intel_bufmgr *bufmgr)
3194 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3195 struct drm_i915_gem_context_create create;
3196 drm_intel_context *context = NULL;
3199 context = calloc(1, sizeof(*context));
3204 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
3206 DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n",
3212 context->ctx_id = create.ctx_id;
3213 context->bufmgr = bufmgr;
3219 drm_intel_gem_context_destroy(drm_intel_context *ctx)
3221 drm_intel_bufmgr_gem *bufmgr_gem;
3222 struct drm_i915_gem_context_destroy destroy;
3230 bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr;
3231 destroy.ctx_id = ctx->ctx_id;
3232 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY,
3235 fprintf(stderr, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n",
3242 drm_intel_get_reset_stats(drm_intel_context *ctx,
3243 uint32_t *reset_count,
3247 drm_intel_bufmgr_gem *bufmgr_gem;
3248 struct drm_i915_reset_stats stats;
3254 memset(&stats, 0, sizeof(stats));
3256 bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr;
3257 stats.ctx_id = ctx->ctx_id;
3258 ret = drmIoctl(bufmgr_gem->fd,
3259 DRM_IOCTL_I915_GET_RESET_STATS,
3262 if (reset_count != NULL)
3263 *reset_count = stats.reset_count;
3266 *active = stats.batch_active;
3268 if (pending != NULL)
3269 *pending = stats.batch_pending;
3276 drm_intel_reg_read(drm_intel_bufmgr *bufmgr,
3280 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3281 struct drm_i915_reg_read reg_read;
3285 reg_read.offset = offset;
3287 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_REG_READ, ®_read);
3289 *result = reg_read.val;
3295 * Annotate the given bo for use in aub dumping.
3297 * \param annotations is an array of drm_intel_aub_annotation objects
3298 * describing the type of data in various sections of the bo. Each
3299 * element of the array specifies the type and subtype of a section of
3300 * the bo, and the past-the-end offset of that section. The elements
3301 * of \c annotations must be sorted so that ending_offset is
3304 * \param count is the number of elements in the \c annotations array.
3305 * If \c count is zero, then \c annotations will not be dereferenced.
3307 * Annotations are copied into a private data structure, so caller may
3308 * re-use the memory pointed to by \c annotations after the call
3311 * Annotations are stored for the lifetime of the bo; to reset to the
3312 * default state (no annotations), call this function with a \c count
3316 drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo,
3317 drm_intel_aub_annotation *annotations,
3320 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
3321 unsigned size = sizeof(*annotations) * count;
3322 drm_intel_aub_annotation *new_annotations =
3323 count > 0 ? realloc(bo_gem->aub_annotations, size) : NULL;
3324 if (new_annotations == NULL) {
3325 free(bo_gem->aub_annotations);
3326 bo_gem->aub_annotations = NULL;
3327 bo_gem->aub_annotation_count = 0;
3330 memcpy(new_annotations, annotations, size);
3331 bo_gem->aub_annotations = new_annotations;
3332 bo_gem->aub_annotation_count = count;
3335 static pthread_mutex_t bufmgr_list_mutex = PTHREAD_MUTEX_INITIALIZER;
3336 static drmMMListHead bufmgr_list = { &bufmgr_list, &bufmgr_list };
3338 static drm_intel_bufmgr_gem *
3339 drm_intel_bufmgr_gem_find(int fd)
3341 drm_intel_bufmgr_gem *bufmgr_gem;
3343 DRMLISTFOREACHENTRY(bufmgr_gem, &bufmgr_list, managers) {
3344 if (bufmgr_gem->fd == fd) {
3345 atomic_inc(&bufmgr_gem->refcount);
3354 drm_intel_bufmgr_gem_unref(drm_intel_bufmgr *bufmgr)
3356 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3358 if (atomic_add_unless(&bufmgr_gem->refcount, -1, 1)) {
3359 pthread_mutex_lock(&bufmgr_list_mutex);
3361 if (atomic_dec_and_test(&bufmgr_gem->refcount)) {
3362 DRMLISTDEL(&bufmgr_gem->managers);
3363 drm_intel_bufmgr_gem_destroy(bufmgr);
3366 pthread_mutex_unlock(&bufmgr_list_mutex);
3371 has_userptr(drm_intel_bufmgr_gem *bufmgr_gem)
3376 struct drm_i915_gem_userptr userptr;
3377 struct drm_gem_close close_bo;
3379 pgsz = sysconf(_SC_PAGESIZE);
3382 ret = posix_memalign(&ptr, pgsz, pgsz);
3384 DBG("Failed to get a page (%ld) for userptr detection!\n",
3389 memset(&userptr, 0, sizeof(userptr));
3390 userptr.user_ptr = (__u64)(unsigned long)ptr;
3391 userptr.user_size = pgsz;
3394 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_USERPTR, &userptr);
3396 if (errno == ENODEV && userptr.flags == 0) {
3397 userptr.flags = I915_USERPTR_UNSYNCHRONIZED;
3404 close_bo.handle = userptr.handle;
3405 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close_bo);
3408 fprintf(stderr, "Failed to release test userptr object! (%d) "
3409 "i915 kernel driver may not be sane!\n", errno);
3417 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
3418 * and manage map buffer objections.
3420 * \param fd File descriptor of the opened DRM device.
3422 drm_public drm_intel_bufmgr *
3423 drm_intel_bufmgr_gem_init(int fd, int batch_size)
3425 drm_intel_bufmgr_gem *bufmgr_gem;
3426 struct drm_i915_gem_get_aperture aperture;
3427 drm_i915_getparam_t gp;
3431 pthread_mutex_lock(&bufmgr_list_mutex);
3433 bufmgr_gem = drm_intel_bufmgr_gem_find(fd);
3437 bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
3438 if (bufmgr_gem == NULL)
3441 bufmgr_gem->fd = fd;
3442 atomic_set(&bufmgr_gem->refcount, 1);
3444 if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
3450 ret = drmIoctl(bufmgr_gem->fd,
3451 DRM_IOCTL_I915_GEM_GET_APERTURE,
3455 bufmgr_gem->gtt_size = aperture.aper_available_size;
3457 fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
3459 bufmgr_gem->gtt_size = 128 * 1024 * 1024;
3460 fprintf(stderr, "Assuming %dkB available aperture size.\n"
3461 "May lead to reduced performance or incorrect "
3463 (int)bufmgr_gem->gtt_size / 1024);
3466 bufmgr_gem->pci_device = get_pci_device_id(bufmgr_gem);
3468 if (IS_GEN2(bufmgr_gem->pci_device))
3469 bufmgr_gem->gen = 2;
3470 else if (IS_GEN3(bufmgr_gem->pci_device))
3471 bufmgr_gem->gen = 3;
3472 else if (IS_GEN4(bufmgr_gem->pci_device))
3473 bufmgr_gem->gen = 4;
3474 else if (IS_GEN5(bufmgr_gem->pci_device))
3475 bufmgr_gem->gen = 5;
3476 else if (IS_GEN6(bufmgr_gem->pci_device))
3477 bufmgr_gem->gen = 6;
3478 else if (IS_GEN7(bufmgr_gem->pci_device))
3479 bufmgr_gem->gen = 7;
3480 else if (IS_GEN8(bufmgr_gem->pci_device))
3481 bufmgr_gem->gen = 8;
3482 else if (IS_GEN9(bufmgr_gem->pci_device))
3483 bufmgr_gem->gen = 9;
3490 if (IS_GEN3(bufmgr_gem->pci_device) &&
3491 bufmgr_gem->gtt_size > 256*1024*1024) {
3492 /* The unmappable part of gtt on gen 3 (i.e. above 256MB) can't
3493 * be used for tiled blits. To simplify the accounting, just
3494 * substract the unmappable part (fixed to 256MB on all known
3495 * gen3 devices) if the kernel advertises it. */
3496 bufmgr_gem->gtt_size -= 256*1024*1024;
3502 gp.param = I915_PARAM_HAS_EXECBUF2;
3503 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3507 gp.param = I915_PARAM_HAS_BSD;
3508 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3509 bufmgr_gem->has_bsd = ret == 0;
3511 gp.param = I915_PARAM_HAS_BLT;
3512 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3513 bufmgr_gem->has_blt = ret == 0;
3515 gp.param = I915_PARAM_HAS_RELAXED_FENCING;
3516 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3517 bufmgr_gem->has_relaxed_fencing = ret == 0;
3519 if (has_userptr(bufmgr_gem))
3520 bufmgr_gem->bufmgr.bo_alloc_userptr =
3521 drm_intel_gem_bo_alloc_userptr;
3523 gp.param = I915_PARAM_HAS_WAIT_TIMEOUT;
3524 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3525 bufmgr_gem->has_wait_timeout = ret == 0;
3527 gp.param = I915_PARAM_HAS_LLC;
3528 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3530 /* Kernel does not supports HAS_LLC query, fallback to GPU
3531 * generation detection and assume that we have LLC on GEN6/7
3533 bufmgr_gem->has_llc = (IS_GEN6(bufmgr_gem->pci_device) |
3534 IS_GEN7(bufmgr_gem->pci_device));
3536 bufmgr_gem->has_llc = *gp.value;
3538 gp.param = I915_PARAM_HAS_VEBOX;
3539 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3540 bufmgr_gem->has_vebox = (ret == 0) & (*gp.value > 0);
3542 if (bufmgr_gem->gen < 4) {
3543 gp.param = I915_PARAM_NUM_FENCES_AVAIL;
3544 gp.value = &bufmgr_gem->available_fences;
3545 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3547 fprintf(stderr, "get fences failed: %d [%d]\n", ret,
3549 fprintf(stderr, "param: %d, val: %d\n", gp.param,
3551 bufmgr_gem->available_fences = 0;
3553 /* XXX The kernel reports the total number of fences,
3554 * including any that may be pinned.
3556 * We presume that there will be at least one pinned
3557 * fence for the scanout buffer, but there may be more
3558 * than one scanout and the user may be manually
3559 * pinning buffers. Let's move to execbuffer2 and
3560 * thereby forget the insanity of using fences...
3562 bufmgr_gem->available_fences -= 2;
3563 if (bufmgr_gem->available_fences < 0)
3564 bufmgr_gem->available_fences = 0;
3568 /* Let's go with one relocation per every 2 dwords (but round down a bit
3569 * since a power of two will mean an extra page allocation for the reloc
3572 * Every 4 was too few for the blender benchmark.
3574 bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
3576 bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
3577 bufmgr_gem->bufmgr.bo_alloc_for_render =
3578 drm_intel_gem_bo_alloc_for_render;
3579 bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled;
3580 bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
3581 bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
3582 bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
3583 bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap;
3584 bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata;
3585 bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata;
3586 bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering;
3587 bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc;
3588 bufmgr_gem->bufmgr.bo_emit_reloc_fence = drm_intel_gem_bo_emit_reloc_fence;
3589 bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin;
3590 bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin;
3591 bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
3592 bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
3593 bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
3594 /* Use the new one if available */
3596 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec2;
3597 bufmgr_gem->bufmgr.bo_mrb_exec = drm_intel_gem_bo_mrb_exec2;
3599 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
3600 bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
3601 bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
3602 bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_unref;
3603 bufmgr_gem->bufmgr.debug = 0;
3604 bufmgr_gem->bufmgr.check_aperture_space =
3605 drm_intel_gem_check_aperture_space;
3606 bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse;
3607 bufmgr_gem->bufmgr.bo_is_reusable = drm_intel_gem_bo_is_reusable;
3608 bufmgr_gem->bufmgr.get_pipe_from_crtc_id =
3609 drm_intel_gem_get_pipe_from_crtc_id;
3610 bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references;
3612 DRMINITLISTHEAD(&bufmgr_gem->named);
3613 init_cache_buckets(bufmgr_gem);
3615 DRMINITLISTHEAD(&bufmgr_gem->vma_cache);
3616 bufmgr_gem->vma_max = -1; /* unlimited by default */
3618 DRMLISTADD(&bufmgr_gem->managers, &bufmgr_list);
3621 pthread_mutex_unlock(&bufmgr_list_mutex);
3623 return bufmgr_gem != NULL ? &bufmgr_gem->bufmgr : NULL;