1 /**************************************************************************
3 * Copyright © 2007 Red Hat Inc.
4 * Copyright © 2007-2012 Intel Corporation
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
29 **************************************************************************/
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
42 #include <xf86atomic.h>
50 #include <sys/ioctl.h>
52 #include <sys/types.h>
57 #define ETIME ETIMEDOUT
59 #include "libdrm_macros.h"
60 #include "libdrm_lists.h"
61 #include "intel_bufmgr.h"
62 #include "intel_bufmgr_priv.h"
63 #include "intel_chipset.h"
76 #define memclear(s) memset(&s, 0, sizeof(s))
78 #define DBG(...) do { \
79 if (bufmgr_gem->bufmgr.debug) \
80 fprintf(stderr, __VA_ARGS__); \
83 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
84 #define MAX2(A, B) ((A) > (B) ? (A) : (B))
86 typedef struct _drm_intel_bo_gem drm_intel_bo_gem;
88 struct drm_intel_gem_bo_bucket {
93 typedef struct _drm_intel_bufmgr_gem {
94 drm_intel_bufmgr bufmgr;
102 pthread_mutex_t lock;
104 struct drm_i915_gem_exec_object *exec_objects;
105 struct drm_i915_gem_exec_object2 *exec2_objects;
106 drm_intel_bo **exec_bos;
110 /** Array of lists of cached gem objects of power-of-two sizes */
111 struct drm_intel_gem_bo_bucket cache_bucket[14 * 4];
115 drmMMListHead managers;
118 drmMMListHead vma_cache;
119 int vma_count, vma_open, vma_max;
122 int available_fences;
125 unsigned int has_bsd : 1;
126 unsigned int has_blt : 1;
127 unsigned int has_relaxed_fencing : 1;
128 unsigned int has_llc : 1;
129 unsigned int has_wait_timeout : 1;
130 unsigned int bo_reuse : 1;
131 unsigned int no_exec : 1;
132 unsigned int has_vebox : 1;
140 } drm_intel_bufmgr_gem;
142 #define DRM_INTEL_RELOC_FENCE (1<<0)
144 typedef struct _drm_intel_reloc_target_info {
147 } drm_intel_reloc_target;
149 struct _drm_intel_bo_gem {
157 * Kenel-assigned global name for this object
159 * List contains both flink named and prime fd'd objects
161 unsigned int global_name;
162 drmMMListHead name_list;
165 * Index of the buffer within the validation list while preparing a
166 * batchbuffer execution.
171 * Current tiling mode
173 uint32_t tiling_mode;
174 uint32_t swizzle_mode;
175 unsigned long stride;
179 /** Array passed to the DRM containing relocation information. */
180 struct drm_i915_gem_relocation_entry *relocs;
182 * Array of info structs corresponding to relocs[i].target_handle etc
184 drm_intel_reloc_target *reloc_target_info;
185 /** Number of entries in relocs */
187 /** Mapped address for the buffer, saved across map/unmap cycles */
189 /** GTT virtual address for the buffer, saved across map/unmap cycles */
192 * Virtual address of the buffer allocated by user, used for userptr
197 drmMMListHead vma_list;
203 * Boolean of whether this BO and its children have been included in
204 * the current drm_intel_bufmgr_check_aperture_space() total.
206 bool included_in_check_aperture;
209 * Boolean of whether this buffer has been used as a relocation
210 * target and had its size accounted for, and thus can't have any
211 * further relocations added to it.
213 bool used_as_reloc_target;
216 * Boolean of whether we have encountered an error whilst building the relocation tree.
221 * Boolean of whether this buffer can be re-used
226 * Boolean of whether the GPU is definitely not accessing the buffer.
228 * This is only valid when reusable, since non-reusable
229 * buffers are those that have been shared wth other
230 * processes, so we don't know their state.
235 * Boolean of whether this buffer was allocated with userptr
240 * Size in bytes of this buffer and its relocation descendents.
242 * Used to avoid costly tree walking in
243 * drm_intel_bufmgr_check_aperture in the common case.
248 * Number of potential fence registers required by this buffer and its
251 int reloc_tree_fences;
253 /** Flags that we may need to do the SW_FINSIH ioctl on unmap. */
254 bool mapped_cpu_write;
258 drm_intel_gem_estimate_batch_space(drm_intel_bo ** bo_array, int count);
261 drm_intel_gem_compute_batch_space(drm_intel_bo ** bo_array, int count);
264 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
265 uint32_t * swizzle_mode);
268 drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
269 uint32_t tiling_mode,
272 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
275 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo);
277 static void drm_intel_gem_bo_free(drm_intel_bo *bo);
280 drm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size,
281 uint32_t *tiling_mode)
283 unsigned long min_size, max_size;
286 if (*tiling_mode == I915_TILING_NONE)
289 /* 965+ just need multiples of page size for tiling */
290 if (bufmgr_gem->gen >= 4)
291 return ROUND_UP_TO(size, 4096);
293 /* Older chips need powers of two, of at least 512k or 1M */
294 if (bufmgr_gem->gen == 3) {
295 min_size = 1024*1024;
296 max_size = 128*1024*1024;
299 max_size = 64*1024*1024;
302 if (size > max_size) {
303 *tiling_mode = I915_TILING_NONE;
307 /* Do we need to allocate every page for the fence? */
308 if (bufmgr_gem->has_relaxed_fencing)
309 return ROUND_UP_TO(size, 4096);
311 for (i = min_size; i < size; i <<= 1)
318 * Round a given pitch up to the minimum required for X tiling on a
319 * given chip. We use 512 as the minimum to allow for a later tiling
323 drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem,
324 unsigned long pitch, uint32_t *tiling_mode)
326 unsigned long tile_width;
329 /* If untiled, then just align it so that we can do rendering
330 * to it with the 3D engine.
332 if (*tiling_mode == I915_TILING_NONE)
333 return ALIGN(pitch, 64);
335 if (*tiling_mode == I915_TILING_X
336 || (IS_915(bufmgr_gem->pci_device)
337 && *tiling_mode == I915_TILING_Y))
342 /* 965 is flexible */
343 if (bufmgr_gem->gen >= 4)
344 return ROUND_UP_TO(pitch, tile_width);
346 /* The older hardware has a maximum pitch of 8192 with tiled
347 * surfaces, so fallback to untiled if it's too large.
350 *tiling_mode = I915_TILING_NONE;
351 return ALIGN(pitch, 64);
354 /* Pre-965 needs power of two tile width */
355 for (i = tile_width; i < pitch; i <<= 1)
361 static struct drm_intel_gem_bo_bucket *
362 drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
367 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
368 struct drm_intel_gem_bo_bucket *bucket =
369 &bufmgr_gem->cache_bucket[i];
370 if (bucket->size >= size) {
379 drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
383 for (i = 0; i < bufmgr_gem->exec_count; i++) {
384 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
385 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
387 if (bo_gem->relocs == NULL) {
388 DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle,
393 for (j = 0; j < bo_gem->reloc_count; j++) {
394 drm_intel_bo *target_bo = bo_gem->reloc_target_info[j].bo;
395 drm_intel_bo_gem *target_gem =
396 (drm_intel_bo_gem *) target_bo;
398 DBG("%2d: %d (%s)@0x%08llx -> "
399 "%d (%s)@0x%08lx + 0x%08x\n",
401 bo_gem->gem_handle, bo_gem->name,
402 (unsigned long long)bo_gem->relocs[j].offset,
403 target_gem->gem_handle,
406 bo_gem->relocs[j].delta);
412 drm_intel_gem_bo_reference(drm_intel_bo *bo)
414 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
416 atomic_inc(&bo_gem->refcount);
420 * Adds the given buffer to the list of buffers to be validated (moved into the
421 * appropriate memory type) with the next batch submission.
423 * If a buffer is validated multiple times in a batch submission, it ends up
424 * with the intersection of the memory type flags and the union of the
428 drm_intel_add_validate_buffer(drm_intel_bo *bo)
430 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
431 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
434 if (bo_gem->validate_index != -1)
437 /* Extend the array of validation entries as necessary. */
438 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
439 int new_size = bufmgr_gem->exec_size * 2;
444 bufmgr_gem->exec_objects =
445 realloc(bufmgr_gem->exec_objects,
446 sizeof(*bufmgr_gem->exec_objects) * new_size);
447 bufmgr_gem->exec_bos =
448 realloc(bufmgr_gem->exec_bos,
449 sizeof(*bufmgr_gem->exec_bos) * new_size);
450 bufmgr_gem->exec_size = new_size;
453 index = bufmgr_gem->exec_count;
454 bo_gem->validate_index = index;
455 /* Fill in array entry */
456 bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
457 bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
458 bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs;
459 bufmgr_gem->exec_objects[index].alignment = bo->align;
460 bufmgr_gem->exec_objects[index].offset = 0;
461 bufmgr_gem->exec_bos[index] = bo;
462 bufmgr_gem->exec_count++;
466 drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence)
468 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
469 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
472 if (bo_gem->validate_index != -1) {
474 bufmgr_gem->exec2_objects[bo_gem->validate_index].flags |=
475 EXEC_OBJECT_NEEDS_FENCE;
479 /* Extend the array of validation entries as necessary. */
480 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
481 int new_size = bufmgr_gem->exec_size * 2;
486 bufmgr_gem->exec2_objects =
487 realloc(bufmgr_gem->exec2_objects,
488 sizeof(*bufmgr_gem->exec2_objects) * new_size);
489 bufmgr_gem->exec_bos =
490 realloc(bufmgr_gem->exec_bos,
491 sizeof(*bufmgr_gem->exec_bos) * new_size);
492 bufmgr_gem->exec_size = new_size;
495 index = bufmgr_gem->exec_count;
496 bo_gem->validate_index = index;
497 /* Fill in array entry */
498 bufmgr_gem->exec2_objects[index].handle = bo_gem->gem_handle;
499 bufmgr_gem->exec2_objects[index].relocation_count = bo_gem->reloc_count;
500 bufmgr_gem->exec2_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
501 bufmgr_gem->exec2_objects[index].alignment = bo->align;
502 bufmgr_gem->exec2_objects[index].offset = 0;
503 bufmgr_gem->exec_bos[index] = bo;
504 bufmgr_gem->exec2_objects[index].flags = 0;
505 bufmgr_gem->exec2_objects[index].rsvd1 = 0;
506 bufmgr_gem->exec2_objects[index].rsvd2 = 0;
508 bufmgr_gem->exec2_objects[index].flags |=
509 EXEC_OBJECT_NEEDS_FENCE;
511 bufmgr_gem->exec_count++;
514 #define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
518 drm_intel_bo_gem_set_in_aperture_size(drm_intel_bufmgr_gem *bufmgr_gem,
519 drm_intel_bo_gem *bo_gem,
520 unsigned int alignment)
524 assert(!bo_gem->used_as_reloc_target);
526 /* The older chipsets are far-less flexible in terms of tiling,
527 * and require tiled buffer to be size aligned in the aperture.
528 * This means that in the worst possible case we will need a hole
529 * twice as large as the object in order for it to fit into the
530 * aperture. Optimal packing is for wimps.
532 size = bo_gem->bo.size;
533 if (bufmgr_gem->gen < 4 && bo_gem->tiling_mode != I915_TILING_NONE) {
534 unsigned int min_size;
536 if (bufmgr_gem->has_relaxed_fencing) {
537 if (bufmgr_gem->gen == 3)
538 min_size = 1024*1024;
542 while (min_size < size)
547 /* Account for worst-case alignment. */
548 alignment = MAX2(alignment, min_size);
551 bo_gem->reloc_tree_size = size + alignment;
555 drm_intel_setup_reloc_list(drm_intel_bo *bo)
557 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
558 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
559 unsigned int max_relocs = bufmgr_gem->max_relocs;
561 if (bo->size / 4 < max_relocs)
562 max_relocs = bo->size / 4;
564 bo_gem->relocs = malloc(max_relocs *
565 sizeof(struct drm_i915_gem_relocation_entry));
566 bo_gem->reloc_target_info = malloc(max_relocs *
567 sizeof(drm_intel_reloc_target));
568 if (bo_gem->relocs == NULL || bo_gem->reloc_target_info == NULL) {
569 bo_gem->has_error = true;
571 free (bo_gem->relocs);
572 bo_gem->relocs = NULL;
574 free (bo_gem->reloc_target_info);
575 bo_gem->reloc_target_info = NULL;
584 drm_intel_gem_bo_busy(drm_intel_bo *bo)
586 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
587 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
588 struct drm_i915_gem_busy busy;
591 if (bo_gem->reusable && bo_gem->idle)
595 busy.handle = bo_gem->gem_handle;
597 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
599 bo_gem->idle = !busy.busy;
604 return (ret == 0 && busy.busy);
608 drm_intel_gem_bo_madvise_internal(drm_intel_bufmgr_gem *bufmgr_gem,
609 drm_intel_bo_gem *bo_gem, int state)
611 struct drm_i915_gem_madvise madv;
614 madv.handle = bo_gem->gem_handle;
617 drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
619 return madv.retained;
623 drm_intel_gem_bo_madvise(drm_intel_bo *bo, int madv)
625 return drm_intel_gem_bo_madvise_internal
626 ((drm_intel_bufmgr_gem *) bo->bufmgr,
627 (drm_intel_bo_gem *) bo,
631 /* drop the oldest entries that have been purged by the kernel */
633 drm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem *bufmgr_gem,
634 struct drm_intel_gem_bo_bucket *bucket)
636 while (!DRMLISTEMPTY(&bucket->head)) {
637 drm_intel_bo_gem *bo_gem;
639 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
640 bucket->head.next, head);
641 if (drm_intel_gem_bo_madvise_internal
642 (bufmgr_gem, bo_gem, I915_MADV_DONTNEED))
645 DRMLISTDEL(&bo_gem->head);
646 drm_intel_gem_bo_free(&bo_gem->bo);
650 static drm_intel_bo *
651 drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
655 uint32_t tiling_mode,
656 unsigned long stride,
657 unsigned int alignment)
659 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
660 drm_intel_bo_gem *bo_gem;
661 unsigned int page_size = getpagesize();
663 struct drm_intel_gem_bo_bucket *bucket;
664 bool alloc_from_cache;
665 unsigned long bo_size;
666 bool for_render = false;
668 if (flags & BO_ALLOC_FOR_RENDER)
671 /* Round the allocated size up to a power of two number of pages. */
672 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size);
674 /* If we don't have caching at this size, don't actually round the
677 if (bucket == NULL) {
679 if (bo_size < page_size)
682 bo_size = bucket->size;
685 pthread_mutex_lock(&bufmgr_gem->lock);
686 /* Get a buffer out of the cache if available */
688 alloc_from_cache = false;
689 if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) {
691 /* Allocate new render-target BOs from the tail (MRU)
692 * of the list, as it will likely be hot in the GPU
693 * cache and in the aperture for us.
695 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
696 bucket->head.prev, head);
697 DRMLISTDEL(&bo_gem->head);
698 alloc_from_cache = true;
699 bo_gem->bo.align = alignment;
701 assert(alignment == 0);
702 /* For non-render-target BOs (where we're probably
703 * going to map it first thing in order to fill it
704 * with data), check if the last BO in the cache is
705 * unbusy, and only reuse in that case. Otherwise,
706 * allocating a new buffer is probably faster than
707 * waiting for the GPU to finish.
709 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
710 bucket->head.next, head);
711 if (!drm_intel_gem_bo_busy(&bo_gem->bo)) {
712 alloc_from_cache = true;
713 DRMLISTDEL(&bo_gem->head);
717 if (alloc_from_cache) {
718 if (!drm_intel_gem_bo_madvise_internal
719 (bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) {
720 drm_intel_gem_bo_free(&bo_gem->bo);
721 drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem,
726 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
729 drm_intel_gem_bo_free(&bo_gem->bo);
734 pthread_mutex_unlock(&bufmgr_gem->lock);
736 if (!alloc_from_cache) {
737 struct drm_i915_gem_create create;
739 bo_gem = calloc(1, sizeof(*bo_gem));
743 bo_gem->bo.size = bo_size;
746 create.size = bo_size;
748 ret = drmIoctl(bufmgr_gem->fd,
749 DRM_IOCTL_I915_GEM_CREATE,
751 bo_gem->gem_handle = create.handle;
752 bo_gem->bo.handle = bo_gem->gem_handle;
757 bo_gem->bo.bufmgr = bufmgr;
758 bo_gem->bo.align = alignment;
760 bo_gem->tiling_mode = I915_TILING_NONE;
761 bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
764 /* drm_intel_gem_bo_free calls DRMLISTDEL() for an uninitialized
765 list (vma_list), so better set the list head here */
766 DRMINITLISTHEAD(&bo_gem->name_list);
767 DRMINITLISTHEAD(&bo_gem->vma_list);
768 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
771 drm_intel_gem_bo_free(&bo_gem->bo);
777 atomic_set(&bo_gem->refcount, 1);
778 bo_gem->validate_index = -1;
779 bo_gem->reloc_tree_fences = 0;
780 bo_gem->used_as_reloc_target = false;
781 bo_gem->has_error = false;
782 bo_gem->reusable = true;
784 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, alignment);
786 DBG("bo_create: buf %d (%s) %ldb\n",
787 bo_gem->gem_handle, bo_gem->name, size);
792 static drm_intel_bo *
793 drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
796 unsigned int alignment)
798 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size,
804 static drm_intel_bo *
805 drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr,
808 unsigned int alignment)
810 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0,
811 I915_TILING_NONE, 0, 0);
814 static drm_intel_bo *
815 drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
816 int x, int y, int cpp, uint32_t *tiling_mode,
817 unsigned long *pitch, unsigned long flags)
819 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
820 unsigned long size, stride;
824 unsigned long aligned_y, height_alignment;
826 tiling = *tiling_mode;
828 /* If we're tiled, our allocations are in 8 or 32-row blocks,
829 * so failure to align our height means that we won't allocate
832 * If we're untiled, we still have to align to 2 rows high
833 * because the data port accesses 2x2 blocks even if the
834 * bottom row isn't to be rendered, so failure to align means
835 * we could walk off the end of the GTT and fault. This is
836 * documented on 965, and may be the case on older chipsets
837 * too so we try to be careful.
840 height_alignment = 2;
842 if ((bufmgr_gem->gen == 2) && tiling != I915_TILING_NONE)
843 height_alignment = 16;
844 else if (tiling == I915_TILING_X
845 || (IS_915(bufmgr_gem->pci_device)
846 && tiling == I915_TILING_Y))
847 height_alignment = 8;
848 else if (tiling == I915_TILING_Y)
849 height_alignment = 32;
850 aligned_y = ALIGN(y, height_alignment);
853 stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, tiling_mode);
854 size = stride * aligned_y;
855 size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, tiling_mode);
856 } while (*tiling_mode != tiling);
859 if (tiling == I915_TILING_NONE)
862 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags,
866 static drm_intel_bo *
867 drm_intel_gem_bo_alloc_userptr(drm_intel_bufmgr *bufmgr,
870 uint32_t tiling_mode,
875 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
876 drm_intel_bo_gem *bo_gem;
878 struct drm_i915_gem_userptr userptr;
880 /* Tiling with userptr surfaces is not supported
881 * on all hardware so refuse it for time being.
883 if (tiling_mode != I915_TILING_NONE)
886 bo_gem = calloc(1, sizeof(*bo_gem));
890 bo_gem->bo.size = size;
893 userptr.user_ptr = (__u64)((unsigned long)addr);
894 userptr.user_size = size;
895 userptr.flags = flags;
897 ret = drmIoctl(bufmgr_gem->fd,
898 DRM_IOCTL_I915_GEM_USERPTR,
901 DBG("bo_create_userptr: "
902 "ioctl failed with user ptr %p size 0x%lx, "
903 "user flags 0x%lx\n", addr, size, flags);
908 bo_gem->gem_handle = userptr.handle;
909 bo_gem->bo.handle = bo_gem->gem_handle;
910 bo_gem->bo.bufmgr = bufmgr;
911 bo_gem->is_userptr = true;
912 bo_gem->bo.virtual = addr;
913 /* Save the address provided by user */
914 bo_gem->user_virtual = addr;
915 bo_gem->tiling_mode = I915_TILING_NONE;
916 bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
919 DRMINITLISTHEAD(&bo_gem->name_list);
920 DRMINITLISTHEAD(&bo_gem->vma_list);
923 atomic_set(&bo_gem->refcount, 1);
924 bo_gem->validate_index = -1;
925 bo_gem->reloc_tree_fences = 0;
926 bo_gem->used_as_reloc_target = false;
927 bo_gem->has_error = false;
928 bo_gem->reusable = false;
930 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
932 DBG("bo_create_userptr: "
933 "ptr %p buf %d (%s) size %ldb, stride 0x%x, tile mode %d\n",
934 addr, bo_gem->gem_handle, bo_gem->name,
935 size, stride, tiling_mode);
941 has_userptr(drm_intel_bufmgr_gem *bufmgr_gem)
946 struct drm_i915_gem_userptr userptr;
948 pgsz = sysconf(_SC_PAGESIZE);
951 ret = posix_memalign(&ptr, pgsz, pgsz);
953 DBG("Failed to get a page (%ld) for userptr detection!\n",
959 userptr.user_ptr = (__u64)(unsigned long)ptr;
960 userptr.user_size = pgsz;
963 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_USERPTR, &userptr);
965 if (errno == ENODEV && userptr.flags == 0) {
966 userptr.flags = I915_USERPTR_UNSYNCHRONIZED;
973 /* We don't release the userptr bo here as we want to keep the
974 * kernel mm tracking alive for our lifetime. The first time we
975 * create a userptr object the kernel has to install a mmu_notifer
976 * which is a heavyweight operation (e.g. it requires taking all
977 * mm_locks and stop_machine()).
980 bufmgr_gem->userptr_active.ptr = ptr;
981 bufmgr_gem->userptr_active.handle = userptr.handle;
986 static drm_intel_bo *
987 check_bo_alloc_userptr(drm_intel_bufmgr *bufmgr,
990 uint32_t tiling_mode,
995 if (has_userptr((drm_intel_bufmgr_gem *)bufmgr))
996 bufmgr->bo_alloc_userptr = drm_intel_gem_bo_alloc_userptr;
998 bufmgr->bo_alloc_userptr = NULL;
1000 return drm_intel_bo_alloc_userptr(bufmgr, name, addr,
1001 tiling_mode, stride, size, flags);
1005 * Returns a drm_intel_bo wrapping the given buffer object handle.
1007 * This can be used when one application needs to pass a buffer object
1011 drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
1013 unsigned int handle)
1015 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1016 drm_intel_bo_gem *bo_gem;
1018 struct drm_gem_open open_arg;
1019 struct drm_i915_gem_get_tiling get_tiling;
1020 drmMMListHead *list;
1022 /* At the moment most applications only have a few named bo.
1023 * For instance, in a DRI client only the render buffers passed
1024 * between X and the client are named. And since X returns the
1025 * alternating names for the front/back buffer a linear search
1026 * provides a sufficiently fast match.
1028 pthread_mutex_lock(&bufmgr_gem->lock);
1029 for (list = bufmgr_gem->named.next;
1030 list != &bufmgr_gem->named;
1031 list = list->next) {
1032 bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
1033 if (bo_gem->global_name == handle) {
1034 drm_intel_gem_bo_reference(&bo_gem->bo);
1035 pthread_mutex_unlock(&bufmgr_gem->lock);
1041 open_arg.name = handle;
1042 ret = drmIoctl(bufmgr_gem->fd,
1046 DBG("Couldn't reference %s handle 0x%08x: %s\n",
1047 name, handle, strerror(errno));
1048 pthread_mutex_unlock(&bufmgr_gem->lock);
1051 /* Now see if someone has used a prime handle to get this
1052 * object from the kernel before by looking through the list
1053 * again for a matching gem_handle
1055 for (list = bufmgr_gem->named.next;
1056 list != &bufmgr_gem->named;
1057 list = list->next) {
1058 bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
1059 if (bo_gem->gem_handle == open_arg.handle) {
1060 drm_intel_gem_bo_reference(&bo_gem->bo);
1061 pthread_mutex_unlock(&bufmgr_gem->lock);
1066 bo_gem = calloc(1, sizeof(*bo_gem));
1068 pthread_mutex_unlock(&bufmgr_gem->lock);
1072 bo_gem->bo.size = open_arg.size;
1073 bo_gem->bo.offset = 0;
1074 bo_gem->bo.offset64 = 0;
1075 bo_gem->bo.virtual = NULL;
1076 bo_gem->bo.bufmgr = bufmgr;
1077 bo_gem->name = name;
1078 atomic_set(&bo_gem->refcount, 1);
1079 bo_gem->validate_index = -1;
1080 bo_gem->gem_handle = open_arg.handle;
1081 bo_gem->bo.handle = open_arg.handle;
1082 bo_gem->global_name = handle;
1083 bo_gem->reusable = false;
1085 memclear(get_tiling);
1086 get_tiling.handle = bo_gem->gem_handle;
1087 ret = drmIoctl(bufmgr_gem->fd,
1088 DRM_IOCTL_I915_GEM_GET_TILING,
1091 drm_intel_gem_bo_unreference(&bo_gem->bo);
1092 pthread_mutex_unlock(&bufmgr_gem->lock);
1095 bo_gem->tiling_mode = get_tiling.tiling_mode;
1096 bo_gem->swizzle_mode = get_tiling.swizzle_mode;
1097 /* XXX stride is unknown */
1098 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
1100 DRMINITLISTHEAD(&bo_gem->vma_list);
1101 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
1102 pthread_mutex_unlock(&bufmgr_gem->lock);
1103 DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
1109 drm_intel_gem_bo_free(drm_intel_bo *bo)
1111 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1112 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1113 struct drm_gem_close close;
1116 DRMLISTDEL(&bo_gem->vma_list);
1117 if (bo_gem->mem_virtual) {
1118 VG(VALGRIND_FREELIKE_BLOCK(bo_gem->mem_virtual, 0));
1119 drm_munmap(bo_gem->mem_virtual, bo_gem->bo.size);
1120 bufmgr_gem->vma_count--;
1122 if (bo_gem->gtt_virtual) {
1123 drm_munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
1124 bufmgr_gem->vma_count--;
1127 /* Close this object */
1129 close.handle = bo_gem->gem_handle;
1130 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
1132 DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
1133 bo_gem->gem_handle, bo_gem->name, strerror(errno));
1139 drm_intel_gem_bo_mark_mmaps_incoherent(drm_intel_bo *bo)
1142 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1144 if (bo_gem->mem_virtual)
1145 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->mem_virtual, bo->size);
1147 if (bo_gem->gtt_virtual)
1148 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->gtt_virtual, bo->size);
1152 /** Frees all cached buffers significantly older than @time. */
1154 drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
1158 if (bufmgr_gem->time == time)
1161 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
1162 struct drm_intel_gem_bo_bucket *bucket =
1163 &bufmgr_gem->cache_bucket[i];
1165 while (!DRMLISTEMPTY(&bucket->head)) {
1166 drm_intel_bo_gem *bo_gem;
1168 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1169 bucket->head.next, head);
1170 if (time - bo_gem->free_time <= 1)
1173 DRMLISTDEL(&bo_gem->head);
1175 drm_intel_gem_bo_free(&bo_gem->bo);
1179 bufmgr_gem->time = time;
1182 static void drm_intel_gem_bo_purge_vma_cache(drm_intel_bufmgr_gem *bufmgr_gem)
1186 DBG("%s: cached=%d, open=%d, limit=%d\n", __FUNCTION__,
1187 bufmgr_gem->vma_count, bufmgr_gem->vma_open, bufmgr_gem->vma_max);
1189 if (bufmgr_gem->vma_max < 0)
1192 /* We may need to evict a few entries in order to create new mmaps */
1193 limit = bufmgr_gem->vma_max - 2*bufmgr_gem->vma_open;
1197 while (bufmgr_gem->vma_count > limit) {
1198 drm_intel_bo_gem *bo_gem;
1200 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1201 bufmgr_gem->vma_cache.next,
1203 assert(bo_gem->map_count == 0);
1204 DRMLISTDELINIT(&bo_gem->vma_list);
1206 if (bo_gem->mem_virtual) {
1207 drm_munmap(bo_gem->mem_virtual, bo_gem->bo.size);
1208 bo_gem->mem_virtual = NULL;
1209 bufmgr_gem->vma_count--;
1211 if (bo_gem->gtt_virtual) {
1212 drm_munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
1213 bo_gem->gtt_virtual = NULL;
1214 bufmgr_gem->vma_count--;
1219 static void drm_intel_gem_bo_close_vma(drm_intel_bufmgr_gem *bufmgr_gem,
1220 drm_intel_bo_gem *bo_gem)
1222 bufmgr_gem->vma_open--;
1223 DRMLISTADDTAIL(&bo_gem->vma_list, &bufmgr_gem->vma_cache);
1224 if (bo_gem->mem_virtual)
1225 bufmgr_gem->vma_count++;
1226 if (bo_gem->gtt_virtual)
1227 bufmgr_gem->vma_count++;
1228 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
1231 static void drm_intel_gem_bo_open_vma(drm_intel_bufmgr_gem *bufmgr_gem,
1232 drm_intel_bo_gem *bo_gem)
1234 bufmgr_gem->vma_open++;
1235 DRMLISTDEL(&bo_gem->vma_list);
1236 if (bo_gem->mem_virtual)
1237 bufmgr_gem->vma_count--;
1238 if (bo_gem->gtt_virtual)
1239 bufmgr_gem->vma_count--;
1240 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
1244 drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
1246 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1247 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1248 struct drm_intel_gem_bo_bucket *bucket;
1251 /* Unreference all the target buffers */
1252 for (i = 0; i < bo_gem->reloc_count; i++) {
1253 if (bo_gem->reloc_target_info[i].bo != bo) {
1254 drm_intel_gem_bo_unreference_locked_timed(bo_gem->
1255 reloc_target_info[i].bo,
1259 bo_gem->reloc_count = 0;
1260 bo_gem->used_as_reloc_target = false;
1262 DBG("bo_unreference final: %d (%s)\n",
1263 bo_gem->gem_handle, bo_gem->name);
1265 /* release memory associated with this object */
1266 if (bo_gem->reloc_target_info) {
1267 free(bo_gem->reloc_target_info);
1268 bo_gem->reloc_target_info = NULL;
1270 if (bo_gem->relocs) {
1271 free(bo_gem->relocs);
1272 bo_gem->relocs = NULL;
1275 /* Clear any left-over mappings */
1276 if (bo_gem->map_count) {
1277 DBG("bo freed with non-zero map-count %d\n", bo_gem->map_count);
1278 bo_gem->map_count = 0;
1279 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1280 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1283 DRMLISTDEL(&bo_gem->name_list);
1285 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
1286 /* Put the buffer into our internal cache for reuse if we can. */
1287 if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
1288 drm_intel_gem_bo_madvise_internal(bufmgr_gem, bo_gem,
1289 I915_MADV_DONTNEED)) {
1290 bo_gem->free_time = time;
1292 bo_gem->name = NULL;
1293 bo_gem->validate_index = -1;
1295 DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
1297 drm_intel_gem_bo_free(bo);
1301 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
1304 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1306 assert(atomic_read(&bo_gem->refcount) > 0);
1307 if (atomic_dec_and_test(&bo_gem->refcount))
1308 drm_intel_gem_bo_unreference_final(bo, time);
1311 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
1313 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1315 assert(atomic_read(&bo_gem->refcount) > 0);
1317 if (atomic_add_unless(&bo_gem->refcount, -1, 1)) {
1318 drm_intel_bufmgr_gem *bufmgr_gem =
1319 (drm_intel_bufmgr_gem *) bo->bufmgr;
1320 struct timespec time;
1322 clock_gettime(CLOCK_MONOTONIC, &time);
1324 pthread_mutex_lock(&bufmgr_gem->lock);
1326 if (atomic_dec_and_test(&bo_gem->refcount)) {
1327 drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
1328 drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
1331 pthread_mutex_unlock(&bufmgr_gem->lock);
1335 static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
1337 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1338 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1339 struct drm_i915_gem_set_domain set_domain;
1342 if (bo_gem->is_userptr) {
1343 /* Return the same user ptr */
1344 bo->virtual = bo_gem->user_virtual;
1348 pthread_mutex_lock(&bufmgr_gem->lock);
1350 if (bo_gem->map_count++ == 0)
1351 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
1353 if (!bo_gem->mem_virtual) {
1354 struct drm_i915_gem_mmap mmap_arg;
1356 DBG("bo_map: %d (%s), map_count=%d\n",
1357 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
1360 mmap_arg.handle = bo_gem->gem_handle;
1361 mmap_arg.size = bo->size;
1362 ret = drmIoctl(bufmgr_gem->fd,
1363 DRM_IOCTL_I915_GEM_MMAP,
1367 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1368 __FILE__, __LINE__, bo_gem->gem_handle,
1369 bo_gem->name, strerror(errno));
1370 if (--bo_gem->map_count == 0)
1371 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1372 pthread_mutex_unlock(&bufmgr_gem->lock);
1375 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
1376 bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
1378 DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1379 bo_gem->mem_virtual);
1380 bo->virtual = bo_gem->mem_virtual;
1382 memclear(set_domain);
1383 set_domain.handle = bo_gem->gem_handle;
1384 set_domain.read_domains = I915_GEM_DOMAIN_CPU;
1386 set_domain.write_domain = I915_GEM_DOMAIN_CPU;
1388 set_domain.write_domain = 0;
1389 ret = drmIoctl(bufmgr_gem->fd,
1390 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1393 DBG("%s:%d: Error setting to CPU domain %d: %s\n",
1394 __FILE__, __LINE__, bo_gem->gem_handle,
1399 bo_gem->mapped_cpu_write = true;
1401 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1402 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->mem_virtual, bo->size));
1403 pthread_mutex_unlock(&bufmgr_gem->lock);
1409 map_gtt(drm_intel_bo *bo)
1411 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1412 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1415 if (bo_gem->is_userptr)
1418 if (bo_gem->map_count++ == 0)
1419 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
1421 /* Get a mapping of the buffer if we haven't before. */
1422 if (bo_gem->gtt_virtual == NULL) {
1423 struct drm_i915_gem_mmap_gtt mmap_arg;
1425 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
1426 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
1429 mmap_arg.handle = bo_gem->gem_handle;
1431 /* Get the fake offset back... */
1432 ret = drmIoctl(bufmgr_gem->fd,
1433 DRM_IOCTL_I915_GEM_MMAP_GTT,
1437 DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
1439 bo_gem->gem_handle, bo_gem->name,
1441 if (--bo_gem->map_count == 0)
1442 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1447 bo_gem->gtt_virtual = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
1448 MAP_SHARED, bufmgr_gem->fd,
1450 if (bo_gem->gtt_virtual == MAP_FAILED) {
1451 bo_gem->gtt_virtual = NULL;
1453 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1455 bo_gem->gem_handle, bo_gem->name,
1457 if (--bo_gem->map_count == 0)
1458 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1463 bo->virtual = bo_gem->gtt_virtual;
1465 DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1466 bo_gem->gtt_virtual);
1472 drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
1474 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1475 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1476 struct drm_i915_gem_set_domain set_domain;
1479 pthread_mutex_lock(&bufmgr_gem->lock);
1483 pthread_mutex_unlock(&bufmgr_gem->lock);
1487 /* Now move it to the GTT domain so that the GPU and CPU
1488 * caches are flushed and the GPU isn't actively using the
1491 * The pagefault handler does this domain change for us when
1492 * it has unbound the BO from the GTT, but it's up to us to
1493 * tell it when we're about to use things if we had done
1494 * rendering and it still happens to be bound to the GTT.
1496 memclear(set_domain);
1497 set_domain.handle = bo_gem->gem_handle;
1498 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1499 set_domain.write_domain = I915_GEM_DOMAIN_GTT;
1500 ret = drmIoctl(bufmgr_gem->fd,
1501 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1504 DBG("%s:%d: Error setting domain %d: %s\n",
1505 __FILE__, __LINE__, bo_gem->gem_handle,
1509 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1510 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->gtt_virtual, bo->size));
1511 pthread_mutex_unlock(&bufmgr_gem->lock);
1517 * Performs a mapping of the buffer object like the normal GTT
1518 * mapping, but avoids waiting for the GPU to be done reading from or
1519 * rendering to the buffer.
1521 * This is used in the implementation of GL_ARB_map_buffer_range: The
1522 * user asks to create a buffer, then does a mapping, fills some
1523 * space, runs a drawing command, then asks to map it again without
1524 * synchronizing because it guarantees that it won't write over the
1525 * data that the GPU is busy using (or, more specifically, that if it
1526 * does write over the data, it acknowledges that rendering is
1531 drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo)
1533 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1534 #ifdef HAVE_VALGRIND
1535 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1539 /* If the CPU cache isn't coherent with the GTT, then use a
1540 * regular synchronized mapping. The problem is that we don't
1541 * track where the buffer was last used on the CPU side in
1542 * terms of drm_intel_bo_map vs drm_intel_gem_bo_map_gtt, so
1543 * we would potentially corrupt the buffer even when the user
1544 * does reasonable things.
1546 if (!bufmgr_gem->has_llc)
1547 return drm_intel_gem_bo_map_gtt(bo);
1549 pthread_mutex_lock(&bufmgr_gem->lock);
1553 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1554 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->gtt_virtual, bo->size));
1557 pthread_mutex_unlock(&bufmgr_gem->lock);
1562 static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
1564 drm_intel_bufmgr_gem *bufmgr_gem;
1565 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1571 if (bo_gem->is_userptr)
1574 bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1576 pthread_mutex_lock(&bufmgr_gem->lock);
1578 if (bo_gem->map_count <= 0) {
1579 DBG("attempted to unmap an unmapped bo\n");
1580 pthread_mutex_unlock(&bufmgr_gem->lock);
1581 /* Preserve the old behaviour of just treating this as a
1582 * no-op rather than reporting the error.
1587 if (bo_gem->mapped_cpu_write) {
1588 struct drm_i915_gem_sw_finish sw_finish;
1590 /* Cause a flush to happen if the buffer's pinned for
1591 * scanout, so the results show up in a timely manner.
1592 * Unlike GTT set domains, this only does work if the
1593 * buffer should be scanout-related.
1595 memclear(sw_finish);
1596 sw_finish.handle = bo_gem->gem_handle;
1597 ret = drmIoctl(bufmgr_gem->fd,
1598 DRM_IOCTL_I915_GEM_SW_FINISH,
1600 ret = ret == -1 ? -errno : 0;
1602 bo_gem->mapped_cpu_write = false;
1605 /* We need to unmap after every innovation as we cannot track
1606 * an open vma for every bo as that will exhaasut the system
1607 * limits and cause later failures.
1609 if (--bo_gem->map_count == 0) {
1610 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1611 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1614 pthread_mutex_unlock(&bufmgr_gem->lock);
1620 drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
1622 return drm_intel_gem_bo_unmap(bo);
1626 drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
1627 unsigned long size, const void *data)
1629 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1630 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1631 struct drm_i915_gem_pwrite pwrite;
1634 if (bo_gem->is_userptr)
1638 pwrite.handle = bo_gem->gem_handle;
1639 pwrite.offset = offset;
1641 pwrite.data_ptr = (uint64_t) (uintptr_t) data;
1642 ret = drmIoctl(bufmgr_gem->fd,
1643 DRM_IOCTL_I915_GEM_PWRITE,
1647 DBG("%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
1648 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1649 (int)size, strerror(errno));
1656 drm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
1658 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1659 struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id;
1662 memclear(get_pipe_from_crtc_id);
1663 get_pipe_from_crtc_id.crtc_id = crtc_id;
1664 ret = drmIoctl(bufmgr_gem->fd,
1665 DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID,
1666 &get_pipe_from_crtc_id);
1668 /* We return -1 here to signal that we don't
1669 * know which pipe is associated with this crtc.
1670 * This lets the caller know that this information
1671 * isn't available; using the wrong pipe for
1672 * vblank waiting can cause the chipset to lock up
1677 return get_pipe_from_crtc_id.pipe;
1681 drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
1682 unsigned long size, void *data)
1684 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1685 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1686 struct drm_i915_gem_pread pread;
1689 if (bo_gem->is_userptr)
1693 pread.handle = bo_gem->gem_handle;
1694 pread.offset = offset;
1696 pread.data_ptr = (uint64_t) (uintptr_t) data;
1697 ret = drmIoctl(bufmgr_gem->fd,
1698 DRM_IOCTL_I915_GEM_PREAD,
1702 DBG("%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
1703 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1704 (int)size, strerror(errno));
1710 /** Waits for all GPU rendering with the object to have completed. */
1712 drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo)
1714 drm_intel_gem_bo_start_gtt_access(bo, 1);
1718 * Waits on a BO for the given amount of time.
1720 * @bo: buffer object to wait for
1721 * @timeout_ns: amount of time to wait in nanoseconds.
1722 * If value is less than 0, an infinite wait will occur.
1724 * Returns 0 if the wait was successful ie. the last batch referencing the
1725 * object has completed within the allotted time. Otherwise some negative return
1726 * value describes the error. Of particular interest is -ETIME when the wait has
1727 * failed to yield the desired result.
1729 * Similar to drm_intel_gem_bo_wait_rendering except a timeout parameter allows
1730 * the operation to give up after a certain amount of time. Another subtle
1731 * difference is the internal locking semantics are different (this variant does
1732 * not hold the lock for the duration of the wait). This makes the wait subject
1733 * to a larger userspace race window.
1735 * The implementation shall wait until the object is no longer actively
1736 * referenced within a batch buffer at the time of the call. The wait will
1737 * not guarantee that the buffer is re-issued via another thread, or an flinked
1738 * handle. Userspace must make sure this race does not occur if such precision
1741 * Note that some kernels have broken the inifite wait for negative values
1742 * promise, upgrade to latest stable kernels if this is the case.
1745 drm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns)
1747 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1748 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1749 struct drm_i915_gem_wait wait;
1752 if (!bufmgr_gem->has_wait_timeout) {
1753 DBG("%s:%d: Timed wait is not supported. Falling back to "
1754 "infinite wait\n", __FILE__, __LINE__);
1756 drm_intel_gem_bo_wait_rendering(bo);
1759 return drm_intel_gem_bo_busy(bo) ? -ETIME : 0;
1764 wait.bo_handle = bo_gem->gem_handle;
1765 wait.timeout_ns = timeout_ns;
1766 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
1774 * Sets the object to the GTT read and possibly write domain, used by the X
1775 * 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt().
1777 * In combination with drm_intel_gem_bo_pin() and manual fence management, we
1778 * can do tiled pixmaps this way.
1781 drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
1783 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1784 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1785 struct drm_i915_gem_set_domain set_domain;
1788 memclear(set_domain);
1789 set_domain.handle = bo_gem->gem_handle;
1790 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1791 set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0;
1792 ret = drmIoctl(bufmgr_gem->fd,
1793 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1796 DBG("%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
1797 __FILE__, __LINE__, bo_gem->gem_handle,
1798 set_domain.read_domains, set_domain.write_domain,
1804 drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
1806 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1807 struct drm_gem_close close_bo;
1810 free(bufmgr_gem->exec2_objects);
1811 free(bufmgr_gem->exec_objects);
1812 free(bufmgr_gem->exec_bos);
1814 pthread_mutex_destroy(&bufmgr_gem->lock);
1816 /* Free any cached buffer objects we were going to reuse */
1817 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
1818 struct drm_intel_gem_bo_bucket *bucket =
1819 &bufmgr_gem->cache_bucket[i];
1820 drm_intel_bo_gem *bo_gem;
1822 while (!DRMLISTEMPTY(&bucket->head)) {
1823 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1824 bucket->head.next, head);
1825 DRMLISTDEL(&bo_gem->head);
1827 drm_intel_gem_bo_free(&bo_gem->bo);
1831 /* Release userptr bo kept hanging around for optimisation. */
1832 if (bufmgr_gem->userptr_active.ptr) {
1834 close_bo.handle = bufmgr_gem->userptr_active.handle;
1835 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close_bo);
1836 free(bufmgr_gem->userptr_active.ptr);
1839 "Failed to release test userptr object! (%d) "
1840 "i915 kernel driver may not be sane!\n", errno);
1847 * Adds the target buffer to the validation list and adds the relocation
1848 * to the reloc_buffer's relocation list.
1850 * The relocation entry at the given offset must already contain the
1851 * precomputed relocation value, because the kernel will optimize out
1852 * the relocation entry write when the buffer hasn't moved from the
1853 * last known offset in target_bo.
1856 do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1857 drm_intel_bo *target_bo, uint32_t target_offset,
1858 uint32_t read_domains, uint32_t write_domain,
1861 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1862 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1863 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
1864 bool fenced_command;
1866 if (bo_gem->has_error)
1869 if (target_bo_gem->has_error) {
1870 bo_gem->has_error = true;
1874 /* We never use HW fences for rendering on 965+ */
1875 if (bufmgr_gem->gen >= 4)
1878 fenced_command = need_fence;
1879 if (target_bo_gem->tiling_mode == I915_TILING_NONE)
1882 /* Create a new relocation list if needed */
1883 if (bo_gem->relocs == NULL && drm_intel_setup_reloc_list(bo))
1886 /* Check overflow */
1887 assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
1890 assert(offset <= bo->size - 4);
1891 assert((write_domain & (write_domain - 1)) == 0);
1893 /* An object needing a fence is a tiled buffer, so it won't have
1894 * relocs to other buffers.
1897 assert(target_bo_gem->reloc_count == 0);
1898 target_bo_gem->reloc_tree_fences = 1;
1901 /* Make sure that we're not adding a reloc to something whose size has
1902 * already been accounted for.
1904 assert(!bo_gem->used_as_reloc_target);
1905 if (target_bo_gem != bo_gem) {
1906 target_bo_gem->used_as_reloc_target = true;
1907 bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
1908 bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
1911 bo_gem->relocs[bo_gem->reloc_count].offset = offset;
1912 bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
1913 bo_gem->relocs[bo_gem->reloc_count].target_handle =
1914 target_bo_gem->gem_handle;
1915 bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
1916 bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
1917 bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset64;
1919 bo_gem->reloc_target_info[bo_gem->reloc_count].bo = target_bo;
1920 if (target_bo != bo)
1921 drm_intel_gem_bo_reference(target_bo);
1923 bo_gem->reloc_target_info[bo_gem->reloc_count].flags =
1924 DRM_INTEL_RELOC_FENCE;
1926 bo_gem->reloc_target_info[bo_gem->reloc_count].flags = 0;
1928 bo_gem->reloc_count++;
1934 drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1935 drm_intel_bo *target_bo, uint32_t target_offset,
1936 uint32_t read_domains, uint32_t write_domain)
1938 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
1940 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
1941 read_domains, write_domain,
1942 !bufmgr_gem->fenced_relocs);
1946 drm_intel_gem_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
1947 drm_intel_bo *target_bo,
1948 uint32_t target_offset,
1949 uint32_t read_domains, uint32_t write_domain)
1951 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
1952 read_domains, write_domain, true);
1956 drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo)
1958 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1960 return bo_gem->reloc_count;
1964 * Removes existing relocation entries in the BO after "start".
1966 * This allows a user to avoid a two-step process for state setup with
1967 * counting up all the buffer objects and doing a
1968 * drm_intel_bufmgr_check_aperture_space() before emitting any of the
1969 * relocations for the state setup. Instead, save the state of the
1970 * batchbuffer including drm_intel_gem_get_reloc_count(), emit all the
1971 * state, and then check if it still fits in the aperture.
1973 * Any further drm_intel_bufmgr_check_aperture_space() queries
1974 * involving this buffer in the tree are undefined after this call.
1977 drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start)
1979 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1980 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1982 struct timespec time;
1984 clock_gettime(CLOCK_MONOTONIC, &time);
1986 assert(bo_gem->reloc_count >= start);
1988 /* Unreference the cleared target buffers */
1989 pthread_mutex_lock(&bufmgr_gem->lock);
1991 for (i = start; i < bo_gem->reloc_count; i++) {
1992 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) bo_gem->reloc_target_info[i].bo;
1993 if (&target_bo_gem->bo != bo) {
1994 bo_gem->reloc_tree_fences -= target_bo_gem->reloc_tree_fences;
1995 drm_intel_gem_bo_unreference_locked_timed(&target_bo_gem->bo,
1999 bo_gem->reloc_count = start;
2001 pthread_mutex_unlock(&bufmgr_gem->lock);
2006 * Walk the tree of relocations rooted at BO and accumulate the list of
2007 * validations to be performed and update the relocation buffers with
2008 * index values into the validation list.
2011 drm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
2013 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2016 if (bo_gem->relocs == NULL)
2019 for (i = 0; i < bo_gem->reloc_count; i++) {
2020 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
2022 if (target_bo == bo)
2025 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
2027 /* Continue walking the tree depth-first. */
2028 drm_intel_gem_bo_process_reloc(target_bo);
2030 /* Add the target to the validate list */
2031 drm_intel_add_validate_buffer(target_bo);
2036 drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo)
2038 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
2041 if (bo_gem->relocs == NULL)
2044 for (i = 0; i < bo_gem->reloc_count; i++) {
2045 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
2048 if (target_bo == bo)
2051 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
2053 /* Continue walking the tree depth-first. */
2054 drm_intel_gem_bo_process_reloc2(target_bo);
2056 need_fence = (bo_gem->reloc_target_info[i].flags &
2057 DRM_INTEL_RELOC_FENCE);
2059 /* Add the target to the validate list */
2060 drm_intel_add_validate_buffer2(target_bo, need_fence);
2066 drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem)
2070 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2071 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
2072 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2074 /* Update the buffer offset */
2075 if (bufmgr_gem->exec_objects[i].offset != bo->offset64) {
2076 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
2077 bo_gem->gem_handle, bo_gem->name, bo->offset64,
2078 (unsigned long long)bufmgr_gem->exec_objects[i].
2080 bo->offset64 = bufmgr_gem->exec_objects[i].offset;
2081 bo->offset = bufmgr_gem->exec_objects[i].offset;
2087 drm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem)
2091 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2092 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
2093 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
2095 /* Update the buffer offset */
2096 if (bufmgr_gem->exec2_objects[i].offset != bo->offset64) {
2097 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
2098 bo_gem->gem_handle, bo_gem->name, bo->offset64,
2099 (unsigned long long)bufmgr_gem->exec2_objects[i].offset);
2100 bo->offset64 = bufmgr_gem->exec2_objects[i].offset;
2101 bo->offset = bufmgr_gem->exec2_objects[i].offset;
2107 drm_intel_gem_bo_aub_dump_bmp(drm_intel_bo *bo,
2108 int x1, int y1, int width, int height,
2109 enum aub_dump_bmp_format format,
2110 int pitch, int offset)
2115 drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
2116 drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
2118 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2119 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2120 struct drm_i915_gem_execbuffer execbuf;
2123 if (bo_gem->has_error)
2126 pthread_mutex_lock(&bufmgr_gem->lock);
2127 /* Update indices and set up the validate list. */
2128 drm_intel_gem_bo_process_reloc(bo);
2130 /* Add the batch buffer to the validation list. There are no
2131 * relocations pointing to it.
2133 drm_intel_add_validate_buffer(bo);
2136 execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects;
2137 execbuf.buffer_count = bufmgr_gem->exec_count;
2138 execbuf.batch_start_offset = 0;
2139 execbuf.batch_len = used;
2140 execbuf.cliprects_ptr = (uintptr_t) cliprects;
2141 execbuf.num_cliprects = num_cliprects;
2145 ret = drmIoctl(bufmgr_gem->fd,
2146 DRM_IOCTL_I915_GEM_EXECBUFFER,
2150 if (errno == ENOSPC) {
2151 DBG("Execbuffer fails to pin. "
2152 "Estimate: %u. Actual: %u. Available: %u\n",
2153 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
2156 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
2159 (unsigned int)bufmgr_gem->gtt_size);
2162 drm_intel_update_buffer_offsets(bufmgr_gem);
2164 if (bufmgr_gem->bufmgr.debug)
2165 drm_intel_gem_dump_validation_list(bufmgr_gem);
2167 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2168 bo_gem = (drm_intel_bo_gem *) bufmgr_gem->exec_bos[i];
2170 bo_gem->idle = false;
2172 /* Disconnect the buffer from the validate list */
2173 bo_gem->validate_index = -1;
2174 bufmgr_gem->exec_bos[i] = NULL;
2176 bufmgr_gem->exec_count = 0;
2177 pthread_mutex_unlock(&bufmgr_gem->lock);
2183 do_exec2(drm_intel_bo *bo, int used, drm_intel_context *ctx,
2184 drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
2187 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
2188 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2189 struct drm_i915_gem_execbuffer2 execbuf;
2193 if (bo_gem->has_error)
2196 switch (flags & 0x7) {
2200 if (!bufmgr_gem->has_blt)
2204 if (!bufmgr_gem->has_bsd)
2207 case I915_EXEC_VEBOX:
2208 if (!bufmgr_gem->has_vebox)
2211 case I915_EXEC_RENDER:
2212 case I915_EXEC_DEFAULT:
2216 pthread_mutex_lock(&bufmgr_gem->lock);
2217 /* Update indices and set up the validate list. */
2218 drm_intel_gem_bo_process_reloc2(bo);
2220 /* Add the batch buffer to the validation list. There are no relocations
2223 drm_intel_add_validate_buffer2(bo, 0);
2226 execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec2_objects;
2227 execbuf.buffer_count = bufmgr_gem->exec_count;
2228 execbuf.batch_start_offset = 0;
2229 execbuf.batch_len = used;
2230 execbuf.cliprects_ptr = (uintptr_t)cliprects;
2231 execbuf.num_cliprects = num_cliprects;
2234 execbuf.flags = flags;
2236 i915_execbuffer2_set_context_id(execbuf, 0);
2238 i915_execbuffer2_set_context_id(execbuf, ctx->ctx_id);
2241 if (bufmgr_gem->no_exec)
2242 goto skip_execution;
2244 ret = drmIoctl(bufmgr_gem->fd,
2245 DRM_IOCTL_I915_GEM_EXECBUFFER2,
2249 if (ret == -ENOSPC) {
2250 DBG("Execbuffer fails to pin. "
2251 "Estimate: %u. Actual: %u. Available: %u\n",
2252 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
2253 bufmgr_gem->exec_count),
2254 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
2255 bufmgr_gem->exec_count),
2256 (unsigned int) bufmgr_gem->gtt_size);
2259 drm_intel_update_buffer_offsets2(bufmgr_gem);
2262 if (bufmgr_gem->bufmgr.debug)
2263 drm_intel_gem_dump_validation_list(bufmgr_gem);
2265 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2266 bo_gem = (drm_intel_bo_gem *) bufmgr_gem->exec_bos[i];
2268 bo_gem->idle = false;
2270 /* Disconnect the buffer from the validate list */
2271 bo_gem->validate_index = -1;
2272 bufmgr_gem->exec_bos[i] = NULL;
2274 bufmgr_gem->exec_count = 0;
2275 pthread_mutex_unlock(&bufmgr_gem->lock);
2281 drm_intel_gem_bo_exec2(drm_intel_bo *bo, int used,
2282 drm_clip_rect_t *cliprects, int num_cliprects,
2285 return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4,
2290 drm_intel_gem_bo_mrb_exec2(drm_intel_bo *bo, int used,
2291 drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
2294 return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4,
2299 drm_intel_gem_bo_context_exec(drm_intel_bo *bo, drm_intel_context *ctx,
2300 int used, unsigned int flags)
2302 return do_exec2(bo, used, ctx, NULL, 0, 0, flags);
2306 drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
2308 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2309 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2310 struct drm_i915_gem_pin pin;
2314 pin.handle = bo_gem->gem_handle;
2315 pin.alignment = alignment;
2317 ret = drmIoctl(bufmgr_gem->fd,
2318 DRM_IOCTL_I915_GEM_PIN,
2323 bo->offset64 = pin.offset;
2324 bo->offset = pin.offset;
2329 drm_intel_gem_bo_unpin(drm_intel_bo *bo)
2331 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2332 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2333 struct drm_i915_gem_unpin unpin;
2337 unpin.handle = bo_gem->gem_handle;
2339 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
2347 drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
2348 uint32_t tiling_mode,
2351 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2352 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2353 struct drm_i915_gem_set_tiling set_tiling;
2356 if (bo_gem->global_name == 0 &&
2357 tiling_mode == bo_gem->tiling_mode &&
2358 stride == bo_gem->stride)
2361 memset(&set_tiling, 0, sizeof(set_tiling));
2363 /* set_tiling is slightly broken and overwrites the
2364 * input on the error path, so we have to open code
2367 set_tiling.handle = bo_gem->gem_handle;
2368 set_tiling.tiling_mode = tiling_mode;
2369 set_tiling.stride = stride;
2371 ret = ioctl(bufmgr_gem->fd,
2372 DRM_IOCTL_I915_GEM_SET_TILING,
2374 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
2378 bo_gem->tiling_mode = set_tiling.tiling_mode;
2379 bo_gem->swizzle_mode = set_tiling.swizzle_mode;
2380 bo_gem->stride = set_tiling.stride;
2385 drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
2388 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2389 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2392 /* Tiling with userptr surfaces is not supported
2393 * on all hardware so refuse it for time being.
2395 if (bo_gem->is_userptr)
2398 /* Linear buffers have no stride. By ensuring that we only ever use
2399 * stride 0 with linear buffers, we simplify our code.
2401 if (*tiling_mode == I915_TILING_NONE)
2404 ret = drm_intel_gem_bo_set_tiling_internal(bo, *tiling_mode, stride);
2406 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
2408 *tiling_mode = bo_gem->tiling_mode;
2413 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
2414 uint32_t * swizzle_mode)
2416 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2418 *tiling_mode = bo_gem->tiling_mode;
2419 *swizzle_mode = bo_gem->swizzle_mode;
2424 drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int size)
2426 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
2429 drm_intel_bo_gem *bo_gem;
2430 struct drm_i915_gem_get_tiling get_tiling;
2431 drmMMListHead *list;
2433 pthread_mutex_lock(&bufmgr_gem->lock);
2434 ret = drmPrimeFDToHandle(bufmgr_gem->fd, prime_fd, &handle);
2436 DBG("create_from_prime: failed to obtain handle from fd: %s\n", strerror(errno));
2437 pthread_mutex_unlock(&bufmgr_gem->lock);
2442 * See if the kernel has already returned this buffer to us. Just as
2443 * for named buffers, we must not create two bo's pointing at the same
2446 for (list = bufmgr_gem->named.next;
2447 list != &bufmgr_gem->named;
2448 list = list->next) {
2449 bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
2450 if (bo_gem->gem_handle == handle) {
2451 drm_intel_gem_bo_reference(&bo_gem->bo);
2452 pthread_mutex_unlock(&bufmgr_gem->lock);
2457 bo_gem = calloc(1, sizeof(*bo_gem));
2459 pthread_mutex_unlock(&bufmgr_gem->lock);
2462 /* Determine size of bo. The fd-to-handle ioctl really should
2463 * return the size, but it doesn't. If we have kernel 3.12 or
2464 * later, we can lseek on the prime fd to get the size. Older
2465 * kernels will just fail, in which case we fall back to the
2466 * provided (estimated or guess size). */
2467 ret = lseek(prime_fd, 0, SEEK_END);
2469 bo_gem->bo.size = ret;
2471 bo_gem->bo.size = size;
2473 bo_gem->bo.handle = handle;
2474 bo_gem->bo.bufmgr = bufmgr;
2476 bo_gem->gem_handle = handle;
2478 atomic_set(&bo_gem->refcount, 1);
2480 bo_gem->name = "prime";
2481 bo_gem->validate_index = -1;
2482 bo_gem->reloc_tree_fences = 0;
2483 bo_gem->used_as_reloc_target = false;
2484 bo_gem->has_error = false;
2485 bo_gem->reusable = false;
2487 DRMINITLISTHEAD(&bo_gem->vma_list);
2488 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
2489 pthread_mutex_unlock(&bufmgr_gem->lock);
2491 memclear(get_tiling);
2492 get_tiling.handle = bo_gem->gem_handle;
2493 ret = drmIoctl(bufmgr_gem->fd,
2494 DRM_IOCTL_I915_GEM_GET_TILING,
2497 DBG("create_from_prime: failed to get tiling: %s\n", strerror(errno));
2498 drm_intel_gem_bo_unreference(&bo_gem->bo);
2501 bo_gem->tiling_mode = get_tiling.tiling_mode;
2502 bo_gem->swizzle_mode = get_tiling.swizzle_mode;
2503 /* XXX stride is unknown */
2504 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
2510 drm_intel_bo_gem_export_to_prime(drm_intel_bo *bo, int *prime_fd)
2512 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2513 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2515 pthread_mutex_lock(&bufmgr_gem->lock);
2516 if (DRMLISTEMPTY(&bo_gem->name_list))
2517 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
2518 pthread_mutex_unlock(&bufmgr_gem->lock);
2520 if (drmPrimeHandleToFD(bufmgr_gem->fd, bo_gem->gem_handle,
2521 DRM_CLOEXEC, prime_fd) != 0)
2524 bo_gem->reusable = false;
2530 drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
2532 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2533 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2536 if (!bo_gem->global_name) {
2537 struct drm_gem_flink flink;
2540 flink.handle = bo_gem->gem_handle;
2542 pthread_mutex_lock(&bufmgr_gem->lock);
2544 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink);
2546 pthread_mutex_unlock(&bufmgr_gem->lock);
2550 bo_gem->global_name = flink.name;
2551 bo_gem->reusable = false;
2553 if (DRMLISTEMPTY(&bo_gem->name_list))
2554 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
2555 pthread_mutex_unlock(&bufmgr_gem->lock);
2558 *name = bo_gem->global_name;
2563 * Enables unlimited caching of buffer objects for reuse.
2565 * This is potentially very memory expensive, as the cache at each bucket
2566 * size is only bounded by how many buffers of that size we've managed to have
2567 * in flight at once.
2570 drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
2572 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
2574 bufmgr_gem->bo_reuse = true;
2578 * Enable use of fenced reloc type.
2580 * New code should enable this to avoid unnecessary fence register
2581 * allocation. If this option is not enabled, all relocs will have fence
2582 * register allocated.
2585 drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr)
2587 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2589 if (bufmgr_gem->bufmgr.bo_exec == drm_intel_gem_bo_exec2)
2590 bufmgr_gem->fenced_relocs = true;
2594 * Return the additional aperture space required by the tree of buffer objects
2598 drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
2600 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2604 if (bo == NULL || bo_gem->included_in_check_aperture)
2608 bo_gem->included_in_check_aperture = true;
2610 for (i = 0; i < bo_gem->reloc_count; i++)
2612 drm_intel_gem_bo_get_aperture_space(bo_gem->
2613 reloc_target_info[i].bo);
2619 * Count the number of buffers in this list that need a fence reg
2621 * If the count is greater than the number of available regs, we'll have
2622 * to ask the caller to resubmit a batch with fewer tiled buffers.
2624 * This function over-counts if the same buffer is used multiple times.
2627 drm_intel_gem_total_fences(drm_intel_bo ** bo_array, int count)
2630 unsigned int total = 0;
2632 for (i = 0; i < count; i++) {
2633 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
2638 total += bo_gem->reloc_tree_fences;
2644 * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready
2645 * for the next drm_intel_bufmgr_check_aperture_space() call.
2648 drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo)
2650 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2653 if (bo == NULL || !bo_gem->included_in_check_aperture)
2656 bo_gem->included_in_check_aperture = false;
2658 for (i = 0; i < bo_gem->reloc_count; i++)
2659 drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->
2660 reloc_target_info[i].bo);
2664 * Return a conservative estimate for the amount of aperture required
2665 * for a collection of buffers. This may double-count some buffers.
2668 drm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count)
2671 unsigned int total = 0;
2673 for (i = 0; i < count; i++) {
2674 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
2676 total += bo_gem->reloc_tree_size;
2682 * Return the amount of aperture needed for a collection of buffers.
2683 * This avoids double counting any buffers, at the cost of looking
2684 * at every buffer in the set.
2687 drm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count)
2690 unsigned int total = 0;
2692 for (i = 0; i < count; i++) {
2693 total += drm_intel_gem_bo_get_aperture_space(bo_array[i]);
2694 /* For the first buffer object in the array, we get an
2695 * accurate count back for its reloc_tree size (since nothing
2696 * had been flagged as being counted yet). We can save that
2697 * value out as a more conservative reloc_tree_size that
2698 * avoids double-counting target buffers. Since the first
2699 * buffer happens to usually be the batch buffer in our
2700 * callers, this can pull us back from doing the tree
2701 * walk on every new batch emit.
2704 drm_intel_bo_gem *bo_gem =
2705 (drm_intel_bo_gem *) bo_array[i];
2706 bo_gem->reloc_tree_size = total;
2710 for (i = 0; i < count; i++)
2711 drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]);
2716 * Return -1 if the batchbuffer should be flushed before attempting to
2717 * emit rendering referencing the buffers pointed to by bo_array.
2719 * This is required because if we try to emit a batchbuffer with relocations
2720 * to a tree of buffers that won't simultaneously fit in the aperture,
2721 * the rendering will return an error at a point where the software is not
2722 * prepared to recover from it.
2724 * However, we also want to emit the batchbuffer significantly before we reach
2725 * the limit, as a series of batchbuffers each of which references buffers
2726 * covering almost all of the aperture means that at each emit we end up
2727 * waiting to evict a buffer from the last rendering, and we get synchronous
2728 * performance. By emitting smaller batchbuffers, we eat some CPU overhead to
2729 * get better parallelism.
2732 drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
2734 drm_intel_bufmgr_gem *bufmgr_gem =
2735 (drm_intel_bufmgr_gem *) bo_array[0]->bufmgr;
2736 unsigned int total = 0;
2737 unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
2740 /* Check for fence reg constraints if necessary */
2741 if (bufmgr_gem->available_fences) {
2742 total_fences = drm_intel_gem_total_fences(bo_array, count);
2743 if (total_fences > bufmgr_gem->available_fences)
2747 total = drm_intel_gem_estimate_batch_space(bo_array, count);
2749 if (total > threshold)
2750 total = drm_intel_gem_compute_batch_space(bo_array, count);
2752 if (total > threshold) {
2753 DBG("check_space: overflowed available aperture, "
2755 total / 1024, (int)bufmgr_gem->gtt_size / 1024);
2758 DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024,
2759 (int)bufmgr_gem->gtt_size / 1024);
2765 * Disable buffer reuse for objects which are shared with the kernel
2766 * as scanout buffers
2769 drm_intel_gem_bo_disable_reuse(drm_intel_bo *bo)
2771 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2773 bo_gem->reusable = false;
2778 drm_intel_gem_bo_is_reusable(drm_intel_bo *bo)
2780 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2782 return bo_gem->reusable;
2786 _drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
2788 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2791 for (i = 0; i < bo_gem->reloc_count; i++) {
2792 if (bo_gem->reloc_target_info[i].bo == target_bo)
2794 if (bo == bo_gem->reloc_target_info[i].bo)
2796 if (_drm_intel_gem_bo_references(bo_gem->reloc_target_info[i].bo,
2804 /** Return true if target_bo is referenced by bo's relocation tree. */
2806 drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
2808 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
2810 if (bo == NULL || target_bo == NULL)
2812 if (target_bo_gem->used_as_reloc_target)
2813 return _drm_intel_gem_bo_references(bo, target_bo);
2818 add_bucket(drm_intel_bufmgr_gem *bufmgr_gem, int size)
2820 unsigned int i = bufmgr_gem->num_buckets;
2822 assert(i < ARRAY_SIZE(bufmgr_gem->cache_bucket));
2824 DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head);
2825 bufmgr_gem->cache_bucket[i].size = size;
2826 bufmgr_gem->num_buckets++;
2830 init_cache_buckets(drm_intel_bufmgr_gem *bufmgr_gem)
2832 unsigned long size, cache_max_size = 64 * 1024 * 1024;
2834 /* OK, so power of two buckets was too wasteful of memory.
2835 * Give 3 other sizes between each power of two, to hopefully
2836 * cover things accurately enough. (The alternative is
2837 * probably to just go for exact matching of sizes, and assume
2838 * that for things like composited window resize the tiled
2839 * width/height alignment and rounding of sizes to pages will
2840 * get us useful cache hit rates anyway)
2842 add_bucket(bufmgr_gem, 4096);
2843 add_bucket(bufmgr_gem, 4096 * 2);
2844 add_bucket(bufmgr_gem, 4096 * 3);
2846 /* Initialize the linked lists for BO reuse cache. */
2847 for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
2848 add_bucket(bufmgr_gem, size);
2850 add_bucket(bufmgr_gem, size + size * 1 / 4);
2851 add_bucket(bufmgr_gem, size + size * 2 / 4);
2852 add_bucket(bufmgr_gem, size + size * 3 / 4);
2857 drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr, int limit)
2859 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2861 bufmgr_gem->vma_max = limit;
2863 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
2867 * Get the PCI ID for the device. This can be overridden by setting the
2868 * INTEL_DEVID_OVERRIDE environment variable to the desired ID.
2871 get_pci_device_id(drm_intel_bufmgr_gem *bufmgr_gem)
2873 char *devid_override;
2876 drm_i915_getparam_t gp;
2878 if (geteuid() == getuid()) {
2879 devid_override = getenv("INTEL_DEVID_OVERRIDE");
2880 if (devid_override) {
2881 bufmgr_gem->no_exec = true;
2882 return strtod(devid_override, NULL);
2887 gp.param = I915_PARAM_CHIPSET_ID;
2889 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2891 fprintf(stderr, "get chip id failed: %d [%d]\n", ret, errno);
2892 fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
2898 drm_intel_bufmgr_gem_get_devid(drm_intel_bufmgr *bufmgr)
2900 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2902 return bufmgr_gem->pci_device;
2906 * Sets the AUB filename.
2908 * This function has to be called before drm_intel_bufmgr_gem_set_aub_dump()
2909 * for it to have any effect.
2912 drm_intel_bufmgr_gem_set_aub_filename(drm_intel_bufmgr *bufmgr,
2913 const char *filename)
2918 * Sets up AUB dumping.
2920 * This is a trace file format that can be used with the simulator.
2921 * Packets are emitted in a format somewhat like GPU command packets.
2922 * You can set up a GTT and upload your objects into the referenced
2923 * space, then send off batchbuffers and get BMPs out the other end.
2926 drm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr *bufmgr, int enable)
2928 fprintf(stderr, "libdrm aub dumping is deprecated.\n\n"
2929 "Use intel_aubdump from intel-gpu-tools instead. Install intel-gpu-tools,\n"
2930 "then run (for example)\n\n"
2931 "\t$ intel_aubdump --output=trace.aub glxgears -geometry 500x500\n\n"
2932 "See the intel_aubdump man page for more details.\n");
2936 drm_intel_gem_context_create(drm_intel_bufmgr *bufmgr)
2938 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2939 struct drm_i915_gem_context_create create;
2940 drm_intel_context *context = NULL;
2943 context = calloc(1, sizeof(*context));
2948 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
2950 DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n",
2956 context->ctx_id = create.ctx_id;
2957 context->bufmgr = bufmgr;
2963 drm_intel_gem_context_destroy(drm_intel_context *ctx)
2965 drm_intel_bufmgr_gem *bufmgr_gem;
2966 struct drm_i915_gem_context_destroy destroy;
2974 bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr;
2975 destroy.ctx_id = ctx->ctx_id;
2976 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY,
2979 fprintf(stderr, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n",
2986 drm_intel_get_reset_stats(drm_intel_context *ctx,
2987 uint32_t *reset_count,
2991 drm_intel_bufmgr_gem *bufmgr_gem;
2992 struct drm_i915_reset_stats stats;
3000 bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr;
3001 stats.ctx_id = ctx->ctx_id;
3002 ret = drmIoctl(bufmgr_gem->fd,
3003 DRM_IOCTL_I915_GET_RESET_STATS,
3006 if (reset_count != NULL)
3007 *reset_count = stats.reset_count;
3010 *active = stats.batch_active;
3012 if (pending != NULL)
3013 *pending = stats.batch_pending;
3020 drm_intel_reg_read(drm_intel_bufmgr *bufmgr,
3024 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3025 struct drm_i915_reg_read reg_read;
3029 reg_read.offset = offset;
3031 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_REG_READ, ®_read);
3033 *result = reg_read.val;
3038 drm_intel_get_subslice_total(int fd, unsigned int *subslice_total)
3040 drm_i915_getparam_t gp;
3044 gp.value = (int*)subslice_total;
3045 gp.param = I915_PARAM_SUBSLICE_TOTAL;
3046 ret = drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
3054 drm_intel_get_eu_total(int fd, unsigned int *eu_total)
3056 drm_i915_getparam_t gp;
3060 gp.value = (int*)eu_total;
3061 gp.param = I915_PARAM_EU_TOTAL;
3062 ret = drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
3070 * Annotate the given bo for use in aub dumping.
3072 * \param annotations is an array of drm_intel_aub_annotation objects
3073 * describing the type of data in various sections of the bo. Each
3074 * element of the array specifies the type and subtype of a section of
3075 * the bo, and the past-the-end offset of that section. The elements
3076 * of \c annotations must be sorted so that ending_offset is
3079 * \param count is the number of elements in the \c annotations array.
3080 * If \c count is zero, then \c annotations will not be dereferenced.
3082 * Annotations are copied into a private data structure, so caller may
3083 * re-use the memory pointed to by \c annotations after the call
3086 * Annotations are stored for the lifetime of the bo; to reset to the
3087 * default state (no annotations), call this function with a \c count
3091 drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo,
3092 drm_intel_aub_annotation *annotations,
3097 static pthread_mutex_t bufmgr_list_mutex = PTHREAD_MUTEX_INITIALIZER;
3098 static drmMMListHead bufmgr_list = { &bufmgr_list, &bufmgr_list };
3100 static drm_intel_bufmgr_gem *
3101 drm_intel_bufmgr_gem_find(int fd)
3103 drm_intel_bufmgr_gem *bufmgr_gem;
3105 DRMLISTFOREACHENTRY(bufmgr_gem, &bufmgr_list, managers) {
3106 if (bufmgr_gem->fd == fd) {
3107 atomic_inc(&bufmgr_gem->refcount);
3116 drm_intel_bufmgr_gem_unref(drm_intel_bufmgr *bufmgr)
3118 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3120 if (atomic_add_unless(&bufmgr_gem->refcount, -1, 1)) {
3121 pthread_mutex_lock(&bufmgr_list_mutex);
3123 if (atomic_dec_and_test(&bufmgr_gem->refcount)) {
3124 DRMLISTDEL(&bufmgr_gem->managers);
3125 drm_intel_bufmgr_gem_destroy(bufmgr);
3128 pthread_mutex_unlock(&bufmgr_list_mutex);
3133 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
3134 * and manage map buffer objections.
3136 * \param fd File descriptor of the opened DRM device.
3139 drm_intel_bufmgr_gem_init(int fd, int batch_size)
3141 drm_intel_bufmgr_gem *bufmgr_gem;
3142 struct drm_i915_gem_get_aperture aperture;
3143 drm_i915_getparam_t gp;
3147 pthread_mutex_lock(&bufmgr_list_mutex);
3149 bufmgr_gem = drm_intel_bufmgr_gem_find(fd);
3153 bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
3154 if (bufmgr_gem == NULL)
3157 bufmgr_gem->fd = fd;
3158 atomic_set(&bufmgr_gem->refcount, 1);
3160 if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
3167 ret = drmIoctl(bufmgr_gem->fd,
3168 DRM_IOCTL_I915_GEM_GET_APERTURE,
3172 bufmgr_gem->gtt_size = aperture.aper_available_size;
3174 fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
3176 bufmgr_gem->gtt_size = 128 * 1024 * 1024;
3177 fprintf(stderr, "Assuming %dkB available aperture size.\n"
3178 "May lead to reduced performance or incorrect "
3180 (int)bufmgr_gem->gtt_size / 1024);
3183 bufmgr_gem->pci_device = get_pci_device_id(bufmgr_gem);
3185 if (IS_GEN2(bufmgr_gem->pci_device))
3186 bufmgr_gem->gen = 2;
3187 else if (IS_GEN3(bufmgr_gem->pci_device))
3188 bufmgr_gem->gen = 3;
3189 else if (IS_GEN4(bufmgr_gem->pci_device))
3190 bufmgr_gem->gen = 4;
3191 else if (IS_GEN5(bufmgr_gem->pci_device))
3192 bufmgr_gem->gen = 5;
3193 else if (IS_GEN6(bufmgr_gem->pci_device))
3194 bufmgr_gem->gen = 6;
3195 else if (IS_GEN7(bufmgr_gem->pci_device))
3196 bufmgr_gem->gen = 7;
3197 else if (IS_GEN8(bufmgr_gem->pci_device))
3198 bufmgr_gem->gen = 8;
3199 else if (IS_GEN9(bufmgr_gem->pci_device))
3200 bufmgr_gem->gen = 9;
3207 if (IS_GEN3(bufmgr_gem->pci_device) &&
3208 bufmgr_gem->gtt_size > 256*1024*1024) {
3209 /* The unmappable part of gtt on gen 3 (i.e. above 256MB) can't
3210 * be used for tiled blits. To simplify the accounting, just
3211 * substract the unmappable part (fixed to 256MB on all known
3212 * gen3 devices) if the kernel advertises it. */
3213 bufmgr_gem->gtt_size -= 256*1024*1024;
3219 gp.param = I915_PARAM_HAS_EXECBUF2;
3220 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3224 gp.param = I915_PARAM_HAS_BSD;
3225 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3226 bufmgr_gem->has_bsd = ret == 0;
3228 gp.param = I915_PARAM_HAS_BLT;
3229 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3230 bufmgr_gem->has_blt = ret == 0;
3232 gp.param = I915_PARAM_HAS_RELAXED_FENCING;
3233 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3234 bufmgr_gem->has_relaxed_fencing = ret == 0;
3236 bufmgr_gem->bufmgr.bo_alloc_userptr = check_bo_alloc_userptr;
3238 gp.param = I915_PARAM_HAS_WAIT_TIMEOUT;
3239 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3240 bufmgr_gem->has_wait_timeout = ret == 0;
3242 gp.param = I915_PARAM_HAS_LLC;
3243 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3245 /* Kernel does not supports HAS_LLC query, fallback to GPU
3246 * generation detection and assume that we have LLC on GEN6/7
3248 bufmgr_gem->has_llc = (IS_GEN6(bufmgr_gem->pci_device) |
3249 IS_GEN7(bufmgr_gem->pci_device));
3251 bufmgr_gem->has_llc = *gp.value;
3253 gp.param = I915_PARAM_HAS_VEBOX;
3254 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3255 bufmgr_gem->has_vebox = (ret == 0) & (*gp.value > 0);
3257 if (bufmgr_gem->gen < 4) {
3258 gp.param = I915_PARAM_NUM_FENCES_AVAIL;
3259 gp.value = &bufmgr_gem->available_fences;
3260 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3262 fprintf(stderr, "get fences failed: %d [%d]\n", ret,
3264 fprintf(stderr, "param: %d, val: %d\n", gp.param,
3266 bufmgr_gem->available_fences = 0;
3268 /* XXX The kernel reports the total number of fences,
3269 * including any that may be pinned.
3271 * We presume that there will be at least one pinned
3272 * fence for the scanout buffer, but there may be more
3273 * than one scanout and the user may be manually
3274 * pinning buffers. Let's move to execbuffer2 and
3275 * thereby forget the insanity of using fences...
3277 bufmgr_gem->available_fences -= 2;
3278 if (bufmgr_gem->available_fences < 0)
3279 bufmgr_gem->available_fences = 0;
3283 /* Let's go with one relocation per every 2 dwords (but round down a bit
3284 * since a power of two will mean an extra page allocation for the reloc
3287 * Every 4 was too few for the blender benchmark.
3289 bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
3291 bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
3292 bufmgr_gem->bufmgr.bo_alloc_for_render =
3293 drm_intel_gem_bo_alloc_for_render;
3294 bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled;
3295 bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
3296 bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
3297 bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
3298 bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap;
3299 bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata;
3300 bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata;
3301 bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering;
3302 bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc;
3303 bufmgr_gem->bufmgr.bo_emit_reloc_fence = drm_intel_gem_bo_emit_reloc_fence;
3304 bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin;
3305 bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin;
3306 bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
3307 bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
3308 bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
3309 /* Use the new one if available */
3311 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec2;
3312 bufmgr_gem->bufmgr.bo_mrb_exec = drm_intel_gem_bo_mrb_exec2;
3314 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
3315 bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
3316 bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
3317 bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_unref;
3318 bufmgr_gem->bufmgr.debug = 0;
3319 bufmgr_gem->bufmgr.check_aperture_space =
3320 drm_intel_gem_check_aperture_space;
3321 bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse;
3322 bufmgr_gem->bufmgr.bo_is_reusable = drm_intel_gem_bo_is_reusable;
3323 bufmgr_gem->bufmgr.get_pipe_from_crtc_id =
3324 drm_intel_gem_get_pipe_from_crtc_id;
3325 bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references;
3327 DRMINITLISTHEAD(&bufmgr_gem->named);
3328 init_cache_buckets(bufmgr_gem);
3330 DRMINITLISTHEAD(&bufmgr_gem->vma_cache);
3331 bufmgr_gem->vma_max = -1; /* unlimited by default */
3333 DRMLISTADD(&bufmgr_gem->managers, &bufmgr_list);
3336 pthread_mutex_unlock(&bufmgr_list_mutex);
3338 return bufmgr_gem != NULL ? &bufmgr_gem->bufmgr : NULL;